metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jheidel/passpoller",
"score": 3
} |
#### File: jheidel/passpoller/irssi_post.py
```python
import logging
import string, os, urllib, urllib2, shlex
import requests
from subprocess import Popen, PIPE
class IrssiNotifier(object):
def __init__(self, api_token, enc_pass):
self.api_token = api_token
self.enc_pass = enc_pass
def encrypt(self, text):
command = 'openssl enc -aes-128-cbc -salt -base64 -A -pass env:OpenSSLEncPW'
opensslenv = os.environ.copy()
opensslenv['OpenSSLEncPW'] = self.enc_pass
output, errors = Popen(
shlex.split(command), stdin=PIPE, stdout=PIPE, stderr=PIPE,
env=opensslenv).communicate(text + ' ')
output = string.replace(output, '/', '_')
output = string.replace(output, '+', '-')
output = string.replace(output, '=', '')
return output
def send(self, message, chan='#local', nick='server'):
data = {
'apiToken': self.api_token,
'nick': self.encrypt(nick),
'channel': self.encrypt(chan),
'message': self.encrypt(message),
'version': 13,
}
url = 'https://irssinotifier.appspot.com/API/Message'
logging.info('Now sending message to irssinotifier API.')
resp = requests.post(url, data=data)
logging.info('Message sent: %s', resp)
```
#### File: jheidel/passpoller/multi_notifier.py
```python
import threading
class MultiNotifier(object):
"""Multithreaded notifier implementation."""
def __init__(self, notifiers):
self.notifiers = notifiers
def send(self, *args, **kwargs):
threads = [threading.Thread(target=n.send, args=args, kwargs=kwargs)
for n in self.notifiers]
for t in threads:
t.start()
for t in threads:
t.join()
```
#### File: jheidel/passpoller/passpoller.py
```python
from BeautifulSoup import BeautifulSoup
import collections
import feedparser
import logging
import sys
from time import sleep
import yaml
import irssi_post
import multi_notifier
def parse_pass_rss(txt):
"""Parses a WSDOT pass summary block into a python ordered dict."""
soup = BeautifulSoup(txt)
lines = soup.contents
# Indicies of heading lines
strongs = [i for i, t in list(enumerate(lines))
if getattr(t, 'name', None) == 'strong']
# Chunkify lines based on heading lines
chunk_bounds = zip(strongs, strongs[1:] + [len(lines)])
chunks = [lines[start:end] for start,end in chunk_bounds]
# Convert a chunk into a key value pair using fairly arbitary semicolon
# parsing (silly wsdot...)
def chunk_to_kv(c):
head, tail = '', ''
node_text = lambda n: str(getattr(n, 'text', n))
if len(c) >= 1:
s = node_text(c[0]).split(':')
head += s[0]
if len(s) > 1:
tail += ' '.join(s[1:])
if len(c) >= 2:
tail += ' '.join(node_text(l) for l in c[1:])
return head.upper().strip(), tail.strip()
return collections.OrderedDict(chunk_to_kv(c) for c in chunks)
class PassParser(object):
"""Manages a wsdot URL and handles fetching and parsing.
Duplicates are filtered out by keeping history.
"""
def __init__(self, url):
self.url = url
self.hist = []
def get(self):
feed = feedparser.parse(self.url)['entries'][0]
key = feed.id
data = parse_pass_rss(feed.summary)
if key in self.hist:
return None
else:
logging.debug('Appending key %s to history', key)
self.hist.append(key)
return data
class PassDiffer(object):
"""Checks for diffs between a new and old pass report."""
KEYS = ['EASTBOUND', 'WESTBOUND']
def __init__(self, initial_diff=False):
"""Initializer.
Args:
initial_diff: Whether to consider the first check new update.
"""
self.last = None
self.skip = not initial_diff
def map(self, new_data):
return [new_data.get(k, None) for k in self.KEYS]
def check(self, new_data):
m = self.map(new_data)
has_diff = (m != self.last) and not self.skip
self.skip = False
self.last = m
if has_diff:
logging.debug('Differ returning true; value is %s', m)
return has_diff
def format(d):
"""Pretty prints a wsdot ordered dict."""
return '\n' + '\n'.join('%s: %s' % (k,v) for k, v in d.iteritems())
def Poll(config_file):
"""Main passpoller loop."""
logging.basicConfig(
filename='passpoller.log', level=logging.DEBUG,
format=('%(asctime)s.%(msecs)d %(levelname)s %(module)s - %(funcName)s: '
'%(message)s'),
datefmt='%Y-%m-%d %H:%M:%S')
logging.info('Loading poller config from %s', config_file)
with open(config_file, 'r') as f:
config = yaml.load(f)
logging.info('Successfully loaded config.')
logging.debug('Config loaded: %s', config)
parser = PassParser(config['wsdot_url'])
differ = PassDiffer(initial_diff=False)
notifier = multi_notifier.MultiNotifier(
notifiers=[
irssi_post.IrssiNotifier(n['api_token'], n['password'])
for n in config['notifiers']
],
)
logging.info('Starting pass polling for %s on URL %s',
config['passname'], config['wsdot_url'])
while True:
try:
data = parser.get()
if data is not None:
logging.debug('New Data, checking for diff.')
if differ.check(data):
txt = format(data)
logging.info('New %s pass update: %s', config['passname'], txt)
notifier.send(txt, chan='#%s' % config['passname'], nick='wsdot')
logging.info('Notification complete.')
except Exception as e:
logging.error('Exception: ' + str(e))
sleep(config['poll_interval_sec'])
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Usage: python passpoller.py [path to config.yaml]'
sys.exit(1)
Poll(sys.argv[1])
``` |
{
"source": "J-Heinemann/faasm",
"score": 2
} |
#### File: faasmcli/tasks/codegen.py
```python
from multiprocessing.pool import Pool
from subprocess import check_output, run
from invoke import task
from faasmcli.util.codegen import find_codegen_func, find_codegen_shared_lib
from faasmcli.util.env import PY_RUNTIME_ROOT
@task(default=True)
def codegen(ctx, user, function, wamr=False):
"""
Generates machine code for the given function
"""
env = {"WASM_VM": "wamr" if wamr else "wavm"}
binary = find_codegen_func()
run(
"{} {} {}".format(binary, user, function),
shell=True,
env=env,
check=True,
)
@task
def user(ctx, user, wamr=False):
"""
Generates machine for all the functions belonging to the given user
"""
_do_codegen_user(user, wamr=wamr)
def _do_codegen_user(user, wamr=False):
print("Running codegen for user {}".format(user))
binary = find_codegen_func()
env = {"WASM_VM": "wamr" if wamr else "wavm"}
run("{} {}".format(binary, user), shell=True, env=env, check=True)
@task
def local(ctx, wamr=False):
"""
Runs codegen on functions used in tests
"""
_do_codegen_user("demo", wamr=wamr)
_do_codegen_user("errors", wamr=wamr)
_do_codegen_user("omp", wamr=wamr)
_do_codegen_user("mpi", wamr=wamr)
_do_codegen_user("rust", wamr=wamr)
# Always run the codegen required by the tests
codegen(ctx, "demo", "echo", wamr=True)
# Run these in parallel
p = Pool(2)
users = [
("python", wamr),
("sgd", wamr),
]
p.starmap(_do_codegen_user, users)
print("Running codegen on python shared objects")
binary = find_codegen_shared_lib()
check_output("{} {}".format(binary, PY_RUNTIME_ROOT), shell=True)
```
#### File: faasmcli/tasks/libs.py
```python
from os import makedirs
from os.path import exists
from os.path import join
from subprocess import run
from invoke import task
from faasmcli.util.codegen import find_codegen_shared_lib
from faasmtools.build import CMAKE_TOOLCHAIN_FILE, WASM_SYSROOT
from faasmcli.util.env import (
PROJ_ROOT,
FAASM_INSTALL_DIR,
FAASM_RUNTIME_ROOT,
)
from faasmcli.util.files import clean_dir
@task
def toolchain(ctx, clean=False):
"""
Compile and install all libs crucial to the toolchain
"""
faasm(ctx, clean=clean)
faasmp(ctx, clean=clean)
faasmpi(ctx, clean=clean)
fake(ctx, clean=clean)
pyinit(ctx, clean=clean)
rust(ctx, clean=clean)
@task
def native(ctx, clean=False):
"""
Compile and install Faasm native tools
"""
if not exists(FAASM_INSTALL_DIR):
makedirs(FAASM_INSTALL_DIR)
build_dir = join(PROJ_ROOT, "build", "native_tools")
clean_dir(build_dir, clean)
build_cmd = [
"cmake",
"-GNinja",
"-DCMAKE_CXX_COMPILER=/usr/bin/clang++-10",
"-DCMAKE_C_COMPILER=/usr/bin/clang-10",
"-DFAASM_BUILD_TYPE=native-tools",
"-DFAASM_STATIC_LIBS=OFF",
"-DFAABRIC_BUILD_TESTS=OFF",
"-DCMAKE_BUILD_TYPE=Release",
"-DCMAKE_INSTALL_PREFIX={}".format(FAASM_INSTALL_DIR),
PROJ_ROOT,
]
build_cmd_str = " ".join(build_cmd)
print(build_cmd_str)
run(build_cmd_str, shell=True, cwd=build_dir, check=True)
run("ninja", shell=True, cwd=build_dir, check=True)
run("sudo ninja install", shell=True, cwd=build_dir, check=True)
def _build_faasm_lib(dir_name, clean, verbose, target=None):
work_dir = join(PROJ_ROOT, dir_name)
build_dir = join(PROJ_ROOT, "build", dir_name)
clean_dir(build_dir, clean)
build_cmd = [
"cmake",
"-GNinja",
"-DFAASM_BUILD_TYPE=wasm",
"-DCMAKE_BUILD_TYPE=Release",
"-DCMAKE_TOOLCHAIN_FILE={}".format(CMAKE_TOOLCHAIN_FILE),
work_dir,
]
build_cmd_str = " ".join(build_cmd)
print(build_cmd_str)
run(build_cmd_str, shell=True, cwd=build_dir, check=True)
build_cmd = ["ninja", target if target else ""]
run(" ".join(build_cmd), shell=True, cwd=build_dir, check=True)
run("ninja install", shell=True, cwd=build_dir, check=True)
@task
def faasm(ctx, clean=False, verbose=False):
"""
Compile and install the Faasm library
"""
_build_faasm_lib("libs/cpp", clean, verbose)
@task
def faasmp(ctx, clean=False, verbose=False):
"""
Compile and install the Faasm OpenMP library
"""
_build_faasm_lib("libs/faasmp", clean, verbose)
@task
def faasmpi(ctx, clean=False, verbose=False):
"""
Compile and install the Faasm MPI library
"""
_build_faasm_lib("libs/faasmpi", clean, verbose)
@task
def rust(ctx, clean=False, verbose=False):
"""
Install Rust library
"""
_build_faasm_lib("libs/rust", clean, verbose)
@task
def pyinit(ctx, clean=False, verbose=False):
"""
Install pyinit library
"""
_build_faasm_lib("libs/pyinit", clean, verbose)
@task
def fake(ctx, clean=False):
"""
Compile and install the fake library used for testing
"""
work_dir = join(PROJ_ROOT, "func", "dynlink")
build_dir = join(PROJ_ROOT, "build", "libfake")
clean_dir(build_dir, clean)
build_cmd = [
"cmake",
"-GNinja",
"-DFAASM_BUILD_SHARED=ON",
"-DFAASM_BUILD_TYPE=wasm",
"-DCMAKE_TOOLCHAIN_FILE={}".format(CMAKE_TOOLCHAIN_FILE),
"-DCMAKE_BUILD_TYPE=Release",
"-DCMAKE_INSTALL_PREFIX={}".format(WASM_SYSROOT),
work_dir,
]
run(" ".join(build_cmd), shell=True, cwd=build_dir, check=True)
run("ninja", shell=True, cwd=build_dir, check=True)
run("ninja install", shell=True, cwd=build_dir, check=True)
# Copy shared object into place
sysroot_files = join(WASM_SYSROOT, "lib", "wasm32-wasi", "libfake*.so")
runtime_lib_dir = join(FAASM_RUNTIME_ROOT, "lib")
if not exists(runtime_lib_dir):
makedirs(runtime_lib_dir)
run(
"cp {} {}".format(sysroot_files, runtime_lib_dir),
shell=True,
check=True,
)
# Run codegen
shared_objs = [
join(FAASM_RUNTIME_ROOT, "lib", "libfakeLibA.so"),
join(FAASM_RUNTIME_ROOT, "lib", "libfakeLibB.so"),
]
binary = find_codegen_shared_lib()
for so in shared_objs:
print("Running codegen for {}".format(so))
run("{} {}".format(binary, so), shell=True, check=True)
```
#### File: faasmcli/tasks/release.py
```python
from github import Github
from invoke import task
from subprocess import run, PIPE, STDOUT
from os.path import join
from faasmcli.util.env import PROJ_ROOT
from faasmcli.util.config import get_faasm_config
from faasmcli.util.version import get_faasm_version
REPO_NAME = "faasm/faasm"
VERSIONED_FILES = [
".env",
"VERSION",
]
VERSIONED_DIRS = [
join(PROJ_ROOT, ".github"),
join(PROJ_ROOT, "deploy"),
]
def _tag_name(version):
return "v{}".format(version)
def _get_tag():
faasm_ver = get_faasm_version()
tag_name = _tag_name(faasm_ver)
return tag_name
def _get_current_branch_name():
branch_out = run(
"git rev-parse --abbrev-ref HEAD",
shell=True,
stdout=PIPE,
stderr=STDOUT,
)
branch_name = branch_out.stdout.decode()
branch_name = branch_name.strip()
return branch_name
def _get_release():
r = _get_repo()
rels = r.get_releases()
tag_name = _get_tag()
rel = rels[0]
if rel.tag_name != tag_name:
print(
"Expected latest release to have tag {} but had {}".format(
tag_name, rel.tag_name
)
)
exit(1)
return rel
def _get_github_instance():
conf = get_faasm_config()
if not conf.has_section("Github") or not conf.has_option(
"Github", "access_token"
):
print("Must set up Github config with access token")
token = conf["Github"]["access_token"]
g = Github(token)
return g
def _get_repo():
g = _get_github_instance()
return g.get_repo(REPO_NAME)
def _create_tag(tag_name, force=False):
# Create the tag
run(
"git tag {} {}".format("--force" if force else "", tag_name),
shell=True,
check=True,
cwd=PROJ_ROOT,
)
# Push tag
run(
"git push {} origin {}".format("--force" if force else "", tag_name),
shell=True,
check=True,
cwd=PROJ_ROOT,
)
@task
def bump(ctx, ver=None):
"""
Increase the version (defaults to bumping a single minor version)
"""
old_ver = get_faasm_version()
if ver:
new_ver = ver
else:
# Just bump the last minor version part
new_ver_parts = old_ver.split(".")
new_ver_minor = int(new_ver_parts[-1]) + 1
new_ver_parts[-1] = str(new_ver_minor)
new_ver = ".".join(new_ver_parts)
# Replace version in all files
for f in VERSIONED_FILES:
sed_cmd = "sed -i 's/{}/{}/g' {}".format(old_ver, new_ver, f)
run(sed_cmd, shell=True, check=True)
# Replace version in dirs
for d in VERSIONED_DIRS:
sed_cmd = [
"find {}".format(d),
"-type f",
"-exec sed -i -e 's/{}/{}/g'".format(old_ver, new_ver),
"{} \;",
]
sed_cmd = " ".join(sed_cmd)
print(sed_cmd)
run(sed_cmd, shell=True, check=True)
@task
def tag(ctx, force=False):
"""
Tags the latest commit on the current branch
"""
# Work out the tag name
tag_name = _get_tag()
branch_name = _get_current_branch_name()
# Create a tag from the current branch
print(
"Creating tag {} from current branch {}".format(tag_name, branch_name)
)
_create_tag(tag_name, force=force)
@task
def create(ctx):
"""
Create a draft release on Github
"""
# Work out the tag
faasm_ver = get_faasm_version()
tag_name = _tag_name(faasm_ver)
# Create a release in github from this tag
r = _get_repo()
r.create_git_release(
tag_name,
"Faasm {}".format(faasm_ver),
"Release {}\n".format(faasm_ver),
draft=True,
)
@task
def publish(ctx):
"""
Publish the draft release
"""
rel = _get_release()
rel.update_release(rel.title, rel.raw_data["body"], draft=False)
```
#### File: faasmcli/util/codegen.py
```python
from faasmcli.util.shell import find_command
def find_codegen_shared_lib():
return find_command("codegen_shared_obj")
def find_codegen_func(wamr=False):
if wamr:
return find_command("wamrc")
else:
return find_command("codegen_func")
``` |
{
"source": "jheinemeyer/JukeBot",
"score": 2
} |
#### File: jheinemeyer/JukeBot/bot.py
```python
from discord.ext import commands
import discord
from discord.player import FFmpegPCMAudio
import datetime, re
import asyncio
import math
import logging
import aiohttp
from yarl import URL
import sys
import traceback
import config
from pprint import pprint
from tempfile import TemporaryDirectory
from pathlib import Path
from datetime import timedelta
import functools
from pandora.clientbuilder import SettingsDictBuilder
from station import Station, Song
from utils import predicates
log: logging.Logger = logging.getLogger(__name__)
class JukeBot(commands.Bot):
def __init__(self, **options):
super().__init__(**options)
# for staging the files from Pandora
self.directory = TemporaryDirectory()
self.default_channel = None
self.client = None
self.info = None
self.add_command(self.shutdown)
self.add_command(self.login_pandora)
self.add_command(self.pandora_login_as_user)
self.add_command(self.default)
self.add_command(self.list)
self.add_command(self.stations)
self.add_command(self.play)
self.add_command(self.pause)
self.add_command(self.resume)
self.add_command(self.skip)
def run(self):
super().run(config.token, reconnect=True)
async def on_ready(self):
print(f'Logged in: {self.user.name} (ID: {self.user.id})')
print('------')
# Cache this crap.
self.default_channel = self.get_channel(id = config.default_channel)
self.info = await self.application_info()
await self.default_channel.send("```REACTOR ONLINE.\nSENSORS ONLINE.\nWEAPONS ONLINE.\nALL SYSTEMS NOMINAL.```")
async def on_command_error(self, ctx, error):
if isinstance(error, commands.NoPrivateMessage):
await ctx.author.send('This command cannot be used in private messages.')
elif isinstance(error, commands.DisabledCommand):
await ctx.author.send('Sorry. This command is disabled and cannot be used.')
elif isinstance(error, commands.CommandInvokeError):
print(f'In {ctx.command.qualified_name}:', file=sys.stderr)
traceback.print_tb(error.original.__traceback__)
print(f'{error.original.__class__.__name__}: {error.original}', file=sys.stderr)
@commands.command()
@commands.is_owner()
async def shutdown(self, ctx):
await self.close()
@commands.group(name="login", invoke_without_command=True)
async def login_pandora(self, ctx):
if ctx.invoked_subcommand == None:
await ctx.send("Login commands should be done in private messages.")
return
''' if not isinstance(ctx.message.channel, discord.PrivateChannel):
await ctx.send("Re-issue the command in this DM with the syntax `!login as [user] [password...]`")
return '''
def login_as_user(self, user_name: str = "", passwd: str = ""):
client = SettingsDictBuilder({
"DECRYPTION_KEY": "R=U!LH$O2B#",
"ENCRYPTION_KEY": "<KEY>",
"PARTNER_USER": "android",
"PARTNER_PASSWORD": "<PASSWORD>",
"DEVICE": "android-generic",
"AUDIO_QUALITY": "highQuality",
}).build()
try:
client.login(user_name, passwd)
except:
raise
self.client = client
@login_pandora.command(name="as")
@predicates.is_private()
async def pandora_login_as_user(self, ctx, user_name: str = "", *, passwd: str = ""):
try:
self.login_as_user(user_name, passwd)
except Exception as e:
return await ctx.send("Could not log in: {0}".format(e))
await ctx.send("Pandora seems to have authenticated.")
await self.change_presence(game = discord.Game(type = 0, name = "{0.message.author.name}'s music".format(ctx)))
await self.default_channel.send("{0.message.author.name} has logged me into Pandora!".format(ctx))
@login_pandora.command()
@commands.is_owner()
async def default(self, ctx):
self.login_as_user(config.default_user, config.default_pass)
await self.change_presence(game = discord.Game(type = 0, name = "{0.message.author.name}'s music".format(ctx)))
await self.default_channel.send("{0.message.author.name} has logged me into Pandora!".format(ctx))
@commands.group()
async def list(self, ctx):
if ctx.invoked_subcommand == None:
await ctx.send("List what?")
@list.command()
async def stations(self, ctx: commands.Context):
try:
stations = self.client.get_station_list()
except:
await ctx.send("I could not fetch any availible stations")
return
# I'm assuming the order of the stations returned is durable.
# It's a prototype, alright? It _totally_ won't see production.
embed = discord.Embed(title = "Available Stations")
i: int = 0
station_list: [str] = []
for station in stations:
i += 1
name = station.name[:-6] if station.name.endswith(" Radio") else station.name
station_list.append('#{:>3}:{:>30}'.format(str(i), name))
embed.description = "```" + "\n".join(station_list) + "```"
await ctx.send(embed=embed)
@commands.command()
@predicates.not_private()
async def play(self, ctx: commands.Context, index: int = 1):
if self.client is None:
await ctx.send("I haven't been logged into Pandora, yet!")
return
# Get that fucker's current voice channel.
voice_state = ctx.author.voice
if voice_state is None or voice_state.channel is None:
await ctx.send("You aren't in any voice channel... you tit!")
return
stations = self.client.get_station_list()
index -= 1
if index < 0 or index > (len(stations) - 1):
await ctx.send("There are no stations with that index.")
return
# Create the station handler
station: Station = Station(dir = self.directory, loop = self.loop, station = stations[index])
await station._fill_buffer()
if ctx.voice_client is None:
voice = await voice_state.channel.connect()
else:
voice = ctx.voice_client
await voice.move_to(voice_state.channel)
# Kick off the playlist
await self.play_station(ctx, station=station, voice=voice)
@commands.command()
async def pause(self, ctx):
if ctx.voice_client:
ctx.voice_client.pause()
return
ctx.send("You don't seem to be in a voice channel, {0.author.name}...".format(ctx))
@commands.command()
async def resume(self, ctx):
if ctx.voice_client:
ctx.voice_client.resume()
return
ctx.send("You don't seem to be in a voice channel, {0.author.name}...".format(ctx))
@commands.command()
async def skip(self, ctx):
if ctx.voice_client:
ctx.voice_client.stop()
return
ctx.send("You don't seem to be in a voice channel, {0.author.name}...".format(ctx))
async def play_station(self, ctx, station, voice):
song: Song = await station.dequeue()
def play_next(error):
if self._closed.is_set():
return
print("Reached callback")
coro = self.play_station(ctx=ctx, station=station, voice=voice)
fut = asyncio.run_coroutine_threadsafe(coro, self.loop)
try:
fut.result()
except:
pass
voice.play(song, after = play_next)
minutes = int(song.length / 60)
seconds = song.length % 60
await ctx.send("Now playing: `{0.name}` (by {0.artist}, {1}:{2:0>2})".format(song, minutes, seconds))
if __name__ == "__main__":
bot = JukeBot(command_prefix="!")
bot.run()
``` |
{
"source": "jheino/hashcache",
"score": 3
} |
#### File: jheino/hashcache/hashcache.py
```python
import argparse
import binascii
import hashlib
import logging
import os
import sqlite3
import sys
import traceback
class HashCache:
def __init__(self, filename):
self.connection = sqlite3.connect(filename)
self.cursor = self.connection.cursor()
self.cursor.execute('PRAGMA synchronous = 0')
self.migrate_database()
self.connection.commit()
def migrate_database(self):
self.cursor.execute('PRAGMA user_version')
user_version = self.cursor.fetchone()[0]
if user_version < 1:
self.cursor.execute('CREATE TABLE digest (dev INTEGER, ino INTEGER, size INTEGER, time INTEGER, md5 BLOB, sha256 BLOB, PRIMARY KEY (dev, ino))')
self.cursor.execute('PRAGMA user_version = 1')
def get(self, filename):
statinfo = os.stat(filename)
self.cursor.execute(
'SELECT md5, sha256 FROM digest WHERE dev = ? AND ino = ? AND size = ? AND time = ?',
(statinfo.st_dev, statinfo.st_ino, statinfo.st_size, statinfo.st_mtime_ns)
)
row = self.cursor.fetchone()
if row is None:
logging.info('Hashing: %s', filename)
md5, sha256 = get_digests(filename)
self.cursor.execute(
'INSERT OR REPLACE INTO digest VALUES (?, ?, ?, ?, ?, ?)',
(statinfo.st_dev, statinfo.st_ino, statinfo.st_size, statinfo.st_mtime_ns, md5, sha256)
)
self.connection.commit()
else:
md5, sha256 = row[0], row[1]
return md5, sha256
def close(self):
self.cursor.close()
self.connection.close()
def get_digests(filename):
md5 = hashlib.md5()
sha256 = hashlib.sha256()
with open(filename, 'rb') as file:
for block in iter(lambda: file.read(262144), b''):
md5.update(block)
sha256.update(block)
return md5.digest(), sha256.digest()
def walk(top):
for dirpath, dirnames, filenames in os.walk(top):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
yield filepath
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s [%(levelname)s] - %(message)s'
)
dbpath = os.path.join(
os.getenv('HOME') or os.getenv('USERPROFILE'),
'.hashcache'
)
parser = argparse.ArgumentParser()
parser.add_argument('--print-md5', action='store_const', dest='print', const='md5')
parser.add_argument('--print-sha256', action='store_const', dest='print', const='sha256')
parser.add_argument('--database', default=dbpath)
parser.add_argument('files', metavar='FILE', nargs='+')
args = parser.parse_args(argv)
hashcache = HashCache(args.database)
for arg in args.files:
filenames = walk(arg) if os.path.isdir(arg) else [arg]
for filename in filenames:
original_filename = filename
if sys.platform == 'win32':
# http://stackoverflow.com/questions/1857335/is-there-any-length-limits-of-file-path-in-ntfs
filename = '\\\\?\\' + os.path.abspath(filename)
if os.path.islink(filename) or not os.path.isfile(filename):
continue
try:
md5, sha256 = hashcache.get(filename)
except PermissionError:
traceback.print_exc()
continue
if args.print:
if args.print == 'md5':
digest = md5
elif args.print == 'sha256':
digest = sha256
sys.stdout.buffer.write('{} {}\n'.format(
binascii.hexlify(digest).decode('ascii'),
original_filename
).encode('utf8'))
hashcache.close()
return 0
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "JHeinzde/ansible-playbook-generator",
"score": 2
} |
#### File: playbook-minimizer/src/__main__.py
```python
import argparse
import os
import yaml
import detect_changed_roles
import diff_calculator
import playbook_minimizer
parser = argparse.ArgumentParser(description='Playbook minimizer')
parser.add_argument('playbook_dir', type=str, help='The directory where the playbook is located in the repository')
parser.add_argument('playbook_name', type=str, help='The name of the playbook file that should be minimized')
parser.add_argument('playbook_out_path', type=str,
help='The path to which the minimized playbook should be written too')
parser.add_argument('--force_roles_config_path', help='These roles will be force included in the minimized playbook',
type=str, required=False)
def main():
args = parser.parse_args()
repo_path = os.environ['CI_PROJECT_DIR']
before_sha = os.environ['CI_COMMIT_BEFORE_SHA']
after_sha = os.environ['CI_COMMIT_SHA']
environment_name = os.environ['CI_ENVIRONMENT_NAME']
if args.force_roles_config_path is not None:
with open(args.force_roles_config_path) as f:
force_roles = yaml.safe_load(f.read())
else:
force_roles = []
diff = diff_calculator \
.GitLabCIDiffer(repo_path, before_sha, after_sha, args.playbook_dir) \
.get_changed_files()
changed_roles = detect_changed_roles.get_changed_roles("/".join([repo_path, args.playbook_dir]), diff, environment_name)
minimizer = playbook_minimizer.PlaybookMinimizer(changed_roles, "/".join(
[repo_path, args.playbook_dir, args.playbook_name]),
args.playbook_out_path)
minimizer.minify_playbook(force_roles)
if __name__ == '__main__':
main()
```
#### File: playbook-minimizer/test/diff_calculator_test.py
```python
import unittest
import git
import mockito
from diff_calculator import GitLabCIDiffer
class DiffCalculatorTest(unittest.TestCase):
def test_ci_diff(self):
mock_repo = mockito.mock(git.Repo)
mock_commit_a = mockito.mock(git.Commit)
mock_commit_b = mockito.mock(git.Commit)
mock_diff_one = mockito.mock(git.Diff)
mock_diff_two = mockito.mock(git.Diff)
mockito.when(mock_repo).commit('this_is_a_test_commit').thenReturn(mock_commit_a)
mockito.when(mock_repo).commit('this_is_a_second_test_commit').thenReturn(mock_commit_b)
mock_diff_one.renamed_file = False
mock_diff_one.a_path = 'testerino'
mock_diff_two.renamed_file = True
mock_diff_two.renamed_to = 'test'
mockito.when(mock_commit_a).diff(mock_commit_b, 'test_playbook').thenReturn([mock_diff_one, mock_diff_two])
mockito.when(git).Repo('test_repo').thenReturn(mock_repo)
differ_under_test = GitLabCIDiffer('test_repo', 'this_is_a_test_commit', 'this_is_a_second_test_commit',
'test_playbook')
actual_changed_files = differ_under_test.get_changed_files()
self.assertEqual(['testerino', 'test'], actual_changed_files)
``` |
{
"source": "jheise/dns_analyze",
"score": 3
} |
#### File: dns_geoip/src/dns_geoip.py
```python
import argparse
import datastream.analyzer
import geoip2.database
class DomainAnalyzer(datastream.analyzer.Analyzer):
def __init__(self, zmq_host, zmq_topic, port, database):
super(DomainAnalyzer, self).__init__(zmq_host, zmq_topic, port)
self.database = geoip2.database.Reader(database)
def _analyze(self, data):
# check if this is a query for a new domain
try:
if "Answer" in data:
if data["Type"] == "A" or data["Type"] == "AAAA":
response = self.database.city(data["Answer"])
geolocation = "{0},{1}".format(response.location.latitude, response.location.longitude)
data["query_geolocation"] = geolocation
data["query_country"] = response.country.name
data["query_city"] = response.city.name
except Exception as e:
print e
#pass
return data
def main(zmq_host, zmq_topic, port, database):
domainanalyzer = DomainAnalyzer(zmq_host, zmq_topic, port, database)
domainanalyzer.activate()
running = True
try:
while running:
domainanalyzer.process()
except KeyboardInterrupt:
running = False
if __name__ == "__main__":
parser = argparse.ArgumentParser("Script to analyze incoming DNS traffic")
parser.add_argument("-zh", "--zmq-host", default="tcp://localhost:7777", help="host running zmq dns stream, default tcp://localhost:7777")
parser.add_argument("-zt", "--zmq-topic", default="dns", help="zmq topic to listen for")
parser.add_argument("-p", "--port", default=9999, type=int,
help="port to bind, default 9999")
parser.add_argument("-d", "--database", default="GeoLite2-Country.mmdb", help="path to geoip database")
args = parser.parse_args()
main(args.zmq_host, args.zmq_topic, args.port, args.database)
```
#### File: dns_probe/src/dns_probe.py
```python
import argparse
import zmq
import time
import json
from scapy.all import *
def expand(p):
yield p.name
while p.payload:
p = p.payload
yield p.name
class DnsProbe(object):
def __init__(self, socket):
super(DnsProbe, self).__init__()
self.socket = socket
self.running = True
def process(self):
while self.running:
sniff(filter="udp and port 53", prn=self.check_packet, count=1, store=0)
def check_packet(self, pkt):
try:
src_ip = None
dst_ip = None
if IP in pkt:
src_ip = pkt[IP].src
dst_ip = pkt[IP].dst
else:
src_ip = pkt[IPv6].src
dst_ip = pkt[IPv6].dst
query = pkt[DNS].qd.qname
captured = time.time()
data = {"SrcIP":src_ip,
"DstIP":dst_ip,
"Query":query,
"Timestamp":captured}
output = json.dumps(data)
print output
self.socket.send("dns " + output)
except Exception as e:
print e
print list(expand(pkt))
def main(interface, port):
print "Starting capture on {0}".format(interface)
# set the given interface
conf.iface = interface
# setup the zmq pub socket
context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.bind("tcp://*:{0}".format(port))
dnsprobe = DnsProbe(socket)
try:
print "scanning for dns requests..."
#sniff(filter="udp and port 53", prn=check_packet, count=10, store=0)
dnsprobe.process()
except KeyboardInterrupt:
print "Stopping..."
dnsprobe.running = False
if __name__ == "__main__":
parser = argparse.ArgumentParser("Script to analyze incoming DNS traffic")
parser.add_argument("-i", "--interface", default="eth0",
help="Interface to use, default eth0")
parser.add_argument("-p", "--port", default=7777, type=int,
help="port to bind, default 7777")
args = parser.parse_args()
main(args.interface, args.port)
``` |
{
"source": "jheiselman/spacewar-type-r",
"score": 3
} |
#### File: jheiselman/spacewar-type-r/actors.py
```python
import os
import math
import pygame
from lava.primitives import *
from lava.media import MediaManager
mediaman = MediaManager()
class Star(GLSprite):
def __init__(self, world, startx, starty):
GLSprite.__init__(self)
self.gravityEffect = world.g_collision
self.can_die = False
self.to_hit = 32000
self.load_texture(mediaman.load_image('sun.png', 'images'))
self.width = self.height = 50
self.radius = 19
self.x = startx
self.y = starty
self.vx = 0.0
self.vy = 0.0
self.facing = 0.0
def update(self, interval):
# Divide by 4 to fake a larger mass
self.x += (self.vx / 4) * interval
self.y += (self.vy / 4) * interval
def gravitate(self, interval):
for sprite in self.gravityEffect.iterate_sprites():
rad = self.radius * self.scale
# Get vector: from self to sprite
vector = (sprite.x - self.x, sprite.y - self.y)
# Find radian angle of vector, reverse direction
angle = math.atan2(vector[1], vector[0]) - math.pi
# Get distance, adjust for self's radius
distance = math.sqrt(vector[0] ** 2 + vector[1] ** 2) - rad
if distance > 0:
r = distance / 200 + 1
# Gravity calculation
force = 0.15 / 1000 / (r ** 2) * interval
else:
force = 0.0
(gx, gy) = sincos(angle, force)
sprite.vx += gx
sprite.vy += gy
class Ship(GLSprite):
def __init__(self, world, name = "Unknown", image = 'ship.png'):
GLSprite.__init__(self)
self.can_die = True
self.world = world
self.name = name
self.load_texture(mediaman.load_image(image, 'images'))
self.width = self.height = 40
self.radius = 18
# Starting position and orientation
self.facing = 0.0
self.x = 0.0
self.y = 0.0
self.vx = 0.0
self.vy = 0.0
# Movement attributes
self.accel = 0.15 / 1000.0
self.maxspeed = 0.3
self.turn_rate = 0.2
self.turn = 0
# Ship attributes
self.life = 3
self.gun_ready = True
self.fire_rate = 400
self.last_fired = 0
self.score = 0
def update(self, interval):
self.x += self.vx * interval
self.y += self.vy * interval
self.facing += self.turn * interval
self.facing %= 360
if not self.gun_ready:
ticks = pygame.time.get_ticks()
if ticks - self.last_fired > self.fire_rate:
self.gun_ready = True
def fire(self):
if not self.gun_ready:
return None
if not self.alive():
return None
self.last_fired = pygame.time.get_ticks()
self.gun_ready = False
Torpedo(self)
def thrust(self, interval):
radian = math.radians(self.facing + 90)
(vx, vy) = sincos(radian, self.accel)
self.vx += vx * interval
self.vy += vy * interval
current_speed = math.sqrt(self.vx ** 2 + self.vy ** 2)
if current_speed > self.maxspeed:
multiple = self.maxspeed / current_speed
self.vx *= multiple
self.vy *= multiple
def reverse_thrust(self, interval):
radian = math.radians(self.facing + 90)
(vx, vy) = sincos(radian, (self.accel / 2))
self.vx -= vx * interval
self.vy -= vy * interval
current_speed = math.sqrt(self.vx ** 2 + self.vy ** 2)
if current_speed > self.maxspeed:
multiple = self.maxspeed / current_speed
self.vx *= multiple
self.vy *= multiple
def rotate(self, multiplier):
self.turn = self.turn_rate * multiplier
def take_damage(self, attacker):
self.life -= attacker.to_hit
ParticleEmitter(self, 3)
if self.life <= 0:
self.kill()
def kill(self):
if self.alive():
ParticleEmitter(self, 10)
GLSprite.kill(self)
class Torpedo(GLSprite):
def __init__(self, parent):
GLSprite.__init__(self)
self.parent = parent
self.name = "torpedo"
self.can_die = True
self.to_hit = 1
self.load_texture(mediaman.load_image('torpedo.png', 'images'))
self.width = self.height = 6
self.radius = 4
# Starting position
self.facing = parent.facing
angle = math.radians(self.facing + 90)
distance = parent.radius * parent.scale + self.radius * self.scale
(sx, sy) = sincos(angle, distance + 1)
self.x = parent.x + sx
self.y = parent.y + sy
# Velocity and TTL
speed = 0.3
self.life = 1500
(tx, ty) = sincos(angle, speed)
self.vx = parent.vx + tx
self.vy = parent.vy + ty
# Add to world
parent.world.g_render.add(self)
parent.world.g_collision.add(self)
def update(self, interval):
self.x += self.vx * interval
self.y += self.vy * interval
self.life -= interval
if self.life <= 0:
self.kill()
``` |
{
"source": "jheister/dvc",
"score": 3
} |
#### File: dvc/utils/pager.py
```python
import logging
import os
import pydoc
import sys
from rich.pager import Pager
from dvc.env import DVC_PAGER
from dvc.utils import format_link
logger = logging.getLogger(__name__)
DEFAULT_PAGER = "less"
DEFAULT_PAGER_FORMATTED = (
f"{DEFAULT_PAGER} --chop-long-lines --clear-screen --RAW-CONTROL-CHARS"
)
def make_pager(cmd):
def _pager(text):
return pydoc.tempfilepager(pydoc.plain(text), cmd)
return _pager
def find_pager():
if not sys.stdout.isatty():
return pydoc.plainpager
env_pager = os.getenv(DVC_PAGER)
if env_pager:
return make_pager(env_pager)
if os.system(f"({DEFAULT_PAGER}) 2>{os.devnull}") == 0:
return make_pager(DEFAULT_PAGER_FORMATTED)
logger.warning(
"Unable to find `less` in the PATH. Check out "
"{} for more info.".format(
format_link("https://man.dvc.org/pipeline/show")
)
)
return pydoc.plainpager
def pager(text: str) -> None:
find_pager()(text)
class DvcPager(Pager):
def show(self, content: str) -> None:
pager(content)
``` |
{
"source": "jhejna/research-lightning",
"score": 3
} |
#### File: research-lightning/scripts/plot.py
```python
import argparse
import os
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
LOG_FILE_NAME = "log.csv"
def moving_avg(x, y, window_size=1):
if window_size == 1:
return x, y
moving_avg_y = np.convolve(y, np.ones(window_size) / window_size, 'valid')
return x[-len(moving_avg_y):], moving_avg_y
def plot_run(paths, name, ax=None, x_key="steps", y_keys=["eval/loss"], window_size=1, max_x_value=None):
for path in paths:
assert LOG_FILE_NAME in os.listdir(path), "Did not find log file, found " + " ".join(os.listdir(path))
for y_key in y_keys:
xs, ys = [], []
for path in paths:
df = pd.read_csv(os.path.join(path, LOG_FILE_NAME))
if y_key not in df:
print("[research] WARNING: y_key was not in run, skipping plot", path)
x, y = moving_avg(df[x_key], df[y_key], window_size=window_size)
assert len(x) == len(y)
if max_x_value is not None:
y = y[x <= max_x_value] # need to set y value first
x = x[x <= max_x_value]
xs.append(x)
ys.append(y)
xs = np.concatenate(xs, axis=0)
ys = np.concatenate(ys, axis=0)
plot_df = pd.DataFrame({x_key: xs, y_key: ys})
label = name + " " + y_key if len(y_keys) > 1 else name
ci = "sd" if len(paths) > 0 else None
sns.lineplot(ax=ax, x=x_key, y=y_key, data=plot_df, sort=True, ci=ci, label=label)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--output", "-o", type=str, default="plot.png", help="Path of output plot")
parser.add_argument("--path", "-p", nargs='+', type=str, required=True, help="Paths of runs to plot")
parser.add_argument("--legend", "-l", nargs='+', type=str, required=False, help="Names of each run to display in the legend")
parser.add_argument("--title", "-t", type=str, required=False, help="Plot title")
parser.add_argument("--window", "-w", type=int, default=1, help="Moving window averaging parameter.")
parser.add_argument("--x", "-x", type=str, default="step", help="X value to plot")
parser.add_argument("--max-x", "-m", type=int, default=None, help="Max x value to plot")
parser.add_argument("--x-label", "-xl", type=str, default=None, help="X label to display on the plot")
parser.add_argument("--y", "-y", type=str, nargs='+', default=["eval/loss"], help="Y value(s) to plot")
parser.add_argument("--y-label", "-yl", type=str, default=None, help="Y label to display on the plot")
parser.add_argument("--fig-size", "-f", nargs=2, type=int, default=(6, 4))
args = parser.parse_args()
paths = args.path
# Check to see if we should auto-expand the path.
# Do this only if the number of paths specified is one and each sub-path is a directory
if len(paths) == 1 and all([os.path.isdir(os.path.join(paths[0], d)) for d in os.listdir(paths[0])]):
paths = sorted([os.path.join(paths[0], d) for d in os.listdir(paths[0])])
# Now create the labels
labels = args.legend
if labels is None:
labels = [os.path.basename(path[:-1] if path.endswith('/') else path) for path in paths]
# Sort the paths alphabetically by the labels
paths, labels = zip(*sorted(zip(paths, labels), key=lambda x: x[0])) # Alphabetically sort by filename
for path, label in zip(paths, labels):
if LOG_FILE_NAME not in os.listdir(path):
path = [os.path.join(path, run) for run in os.listdir(path)]
else:
path = [path]
sns.set_context(context="paper", font_scale=1.2)
sns.set_style("darkgrid", {'font.family': 'serif'})
plot_run(path, label, x_key=args.x, y_keys=args.y, window_size=args.window, max_x_value=args.max_x)
# Set relevant labels
if args.title:
plt.title(args.title)
# Label X
if args.x_label is not None:
plt.x_label(args.xlabel)
elif args.x is not None:
plt.xlabel(args.x)
# Label Y
if args.y_label is not None:
plt.ylabel(args.y_label)
elif args.y is not None and len(args.y) == 1:
plt.ylabel(args.y[0])
# Save the plot
print("[research] Saving plot to", args.output)
plt.gcf().set_size_inches(*args.fig_size)
plt.tight_layout(pad=0)
plt.savefig(args.output, dpi=200) # Increase DPI for higher res.
``` |
{
"source": "jheld/dyicrate",
"score": 2
} |
#### File: dyicrate/diycrate/iter_utils.py
```python
import logging
import time
from typing import Union
from boxsdk import BoxAPIException
from boxsdk.object.file import File
from boxsdk.object.folder import Folder
from .log_utils import setup_logger
setup_logger()
crate_logger = logging.getLogger(__name__)
class SafeIter:
def __init__(self, iterable, path=None, tries=5):
self.iterable = iterable
self.path = path
self.tries = tries or 5
def __iter__(self):
return self
def __next__(self) -> Union[File, Folder, None]:
tries = self.tries
buffered_exc = None
for _ in range(tries):
try:
value = next(self.iterable)
except BoxAPIException as e:
buffered_exc = e
crate_logger.warning(
"Box API exception in folder: {path}".format(path=self.path),
exc_info=True,
)
time.sleep(5)
else:
break
else:
crate_logger.warning(
"Could not recover from Box API issues after {tries} "
"tries on folder: {path}".format(tries=tries, path=self.path),
exc_info=buffered_exc,
)
value = None
return value
``` |
{
"source": "jheld/kombu",
"score": 2
} |
#### File: funtests/tests/test_mongodb.py
```python
from nose import SkipTest
from kombu import Consumer, Producer, Exchange, Queue
from kombu.five import range
from kombu.utils import nested
from funtests import transport
class test_mongodb(transport.TransportCase):
transport = 'mongodb'
prefix = 'mongodb'
event_loop_max = 100
def before_connect(self):
try:
import pymongo # noqa
except ImportError:
raise SkipTest('pymongo not installed')
def after_connect(self, connection):
connection.channel().client # evaluate connection.
self.c = self.connection # shortcut
def test_fanout(self, name='test_mongodb_fanout'):
if not self.verify_alive():
return
c = self.connection
self.e = Exchange(name, type='fanout')
self.q = Queue(name, exchange=self.e, routing_key=name)
self.q2 = Queue(name + '2', exchange=self.e, routing_key=name + '2')
channel = c.default_channel
producer = Producer(channel, self.e)
consumer1 = Consumer(channel, self.q)
consumer2 = Consumer(channel, self.q2)
self.q2(channel).declare()
for i in range(10):
producer.publish({'foo': i}, routing_key=name)
for i in range(10):
producer.publish({'foo': i}, routing_key=name + '2')
_received1 = []
_received2 = []
def callback1(message_data, message):
_received1.append(message)
message.ack()
def callback2(message_data, message):
_received2.append(message)
message.ack()
consumer1.register_callback(callback1)
consumer2.register_callback(callback2)
with nested(consumer1, consumer2):
while 1:
if len(_received1) + len(_received2) == 20:
break
c.drain_events(timeout=60)
self.assertEqual(len(_received1) + len(_received2), 20)
# queue.delete
for i in range(10):
producer.publish({'foo': i}, routing_key=name)
self.assertTrue(self.q(channel).get())
self.q(channel).delete()
self.q(channel).declare()
self.assertIsNone(self.q(channel).get())
# queue.purge
for i in range(10):
producer.publish({'foo': i}, routing_key=name + '2')
self.assertTrue(self.q2(channel).get())
self.q2(channel).purge()
self.assertIsNone(self.q2(channel).get())
```
#### File: kombu/tests/test_common.py
```python
from __future__ import absolute_import
import socket
from amqp import RecoverableConnectionError
from kombu import common
from kombu.common import (
Broadcast, maybe_declare,
send_reply, collect_replies,
declaration_cached, ignore_errors,
QoS, PREFETCH_COUNT_MAX,
)
from .case import Case, ContextMock, Mock, MockPool, patch
class test_ignore_errors(Case):
def test_ignored(self):
connection = Mock()
connection.channel_errors = (KeyError, )
connection.connection_errors = (KeyError, )
with ignore_errors(connection):
raise KeyError()
def raising():
raise KeyError()
ignore_errors(connection, raising)
connection.channel_errors = connection.connection_errors = \
()
with self.assertRaises(KeyError):
with ignore_errors(connection):
raise KeyError()
class test_declaration_cached(Case):
def test_when_cached(self):
chan = Mock()
chan.connection.client.declared_entities = ['foo']
self.assertTrue(declaration_cached('foo', chan))
def test_when_not_cached(self):
chan = Mock()
chan.connection.client.declared_entities = ['bar']
self.assertFalse(declaration_cached('foo', chan))
class test_Broadcast(Case):
def test_arguments(self):
q = Broadcast(name='test_Broadcast')
self.assertTrue(q.name.startswith('bcast.'))
self.assertEqual(q.alias, 'test_Broadcast')
self.assertTrue(q.auto_delete)
self.assertEqual(q.exchange.name, 'test_Broadcast')
self.assertEqual(q.exchange.type, 'fanout')
q = Broadcast('test_Broadcast', 'explicit_queue_name')
self.assertEqual(q.name, 'explicit_queue_name')
self.assertEqual(q.exchange.name, 'test_Broadcast')
class test_maybe_declare(Case):
def test_cacheable(self):
channel = Mock()
client = channel.connection.client = Mock()
client.declared_entities = set()
entity = Mock()
entity.can_cache_declaration = True
entity.auto_delete = False
entity.is_bound = True
entity.channel = channel
maybe_declare(entity, channel)
self.assertEqual(entity.declare.call_count, 1)
self.assertIn(
hash(entity), channel.connection.client.declared_entities,
)
maybe_declare(entity, channel)
self.assertEqual(entity.declare.call_count, 1)
entity.channel.connection = None
with self.assertRaises(RecoverableConnectionError):
maybe_declare(entity)
def test_binds_entities(self):
channel = Mock()
channel.connection.client.declared_entities = set()
entity = Mock()
entity.can_cache_declaration = True
entity.is_bound = False
entity.bind.return_value = entity
entity.bind.return_value.channel = channel
maybe_declare(entity, channel)
entity.bind.assert_called_with(channel)
def test_with_retry(self):
channel = Mock()
client = channel.connection.client = Mock()
client.declared_entities = set()
entity = Mock()
entity.can_cache_declaration = True
entity.is_bound = True
entity.channel = channel
maybe_declare(entity, channel, retry=True)
self.assertTrue(channel.connection.client.ensure.call_count)
class test_replies(Case):
def test_send_reply(self):
req = Mock()
req.content_type = 'application/json'
req.content_encoding = 'binary'
req.properties = {'reply_to': 'hello',
'correlation_id': 'world'}
channel = Mock()
exchange = Mock()
exchange.is_bound = True
exchange.channel = channel
producer = Mock()
producer.channel = channel
producer.channel.connection.client.declared_entities = set()
send_reply(exchange, req, {'hello': 'world'}, producer)
self.assertTrue(producer.publish.call_count)
args = producer.publish.call_args
self.assertDictEqual(args[0][0], {'hello': 'world'})
self.assertDictEqual(args[1], {'exchange': exchange,
'routing_key': 'hello',
'correlation_id': 'world',
'serializer': 'json',
'retry': False,
'retry_policy': None,
'content_encoding': 'binary'})
@patch('kombu.common.itermessages')
def test_collect_replies_with_ack(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
body, message = Mock(), Mock()
itermessages.return_value = [(body, message)]
it = collect_replies(conn, channel, queue, no_ack=False)
m = next(it)
self.assertIs(m, body)
itermessages.assert_called_with(conn, channel, queue, no_ack=False)
message.ack.assert_called_with()
with self.assertRaises(StopIteration):
next(it)
channel.after_reply_message_received.assert_called_with(queue.name)
@patch('kombu.common.itermessages')
def test_collect_replies_no_ack(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
body, message = Mock(), Mock()
itermessages.return_value = [(body, message)]
it = collect_replies(conn, channel, queue)
m = next(it)
self.assertIs(m, body)
itermessages.assert_called_with(conn, channel, queue, no_ack=True)
self.assertFalse(message.ack.called)
@patch('kombu.common.itermessages')
def test_collect_replies_no_replies(self, itermessages):
conn, channel, queue = Mock(), Mock(), Mock()
itermessages.return_value = []
it = collect_replies(conn, channel, queue)
with self.assertRaises(StopIteration):
next(it)
self.assertFalse(channel.after_reply_message_received.called)
class test_insured(Case):
@patch('kombu.common.logger')
def test_ensure_errback(self, logger):
common._ensure_errback('foo', 30)
self.assertTrue(logger.error.called)
def test_revive_connection(self):
on_revive = Mock()
channel = Mock()
common.revive_connection(Mock(), channel, on_revive)
on_revive.assert_called_with(channel)
common.revive_connection(Mock(), channel, None)
def get_insured_mocks(self, insured_returns=('works', 'ignored')):
conn = ContextMock()
pool = MockPool(conn)
fun = Mock()
insured = conn.autoretry.return_value = Mock()
insured.return_value = insured_returns
return conn, pool, fun, insured
def test_insured(self):
conn, pool, fun, insured = self.get_insured_mocks()
ret = common.insured(pool, fun, (2, 2), {'foo': 'bar'})
self.assertEqual(ret, 'works')
conn.ensure_connection.assert_called_with(
errback=common._ensure_errback,
)
self.assertTrue(insured.called)
i_args, i_kwargs = insured.call_args
self.assertTupleEqual(i_args, (2, 2))
self.assertDictEqual(i_kwargs, {'foo': 'bar',
'connection': conn})
self.assertTrue(conn.autoretry.called)
ar_args, ar_kwargs = conn.autoretry.call_args
self.assertTupleEqual(ar_args, (fun, conn.default_channel))
self.assertTrue(ar_kwargs.get('on_revive'))
self.assertTrue(ar_kwargs.get('errback'))
def test_insured_custom_errback(self):
conn, pool, fun, insured = self.get_insured_mocks()
custom_errback = Mock()
common.insured(pool, fun, (2, 2), {'foo': 'bar'},
errback=custom_errback)
conn.ensure_connection.assert_called_with(errback=custom_errback)
class MockConsumer(object):
consumers = set()
def __init__(self, channel, queues=None, callbacks=None, **kwargs):
self.channel = channel
self.queues = queues
self.callbacks = callbacks
def __enter__(self):
self.consumers.add(self)
return self
def __exit__(self, *exc_info):
self.consumers.discard(self)
class test_itermessages(Case):
class MockConnection(object):
should_raise_timeout = False
def drain_events(self, **kwargs):
if self.should_raise_timeout:
raise socket.timeout()
for consumer in MockConsumer.consumers:
for callback in consumer.callbacks:
callback('body', 'message')
def test_default(self):
conn = self.MockConnection()
channel = Mock()
channel.connection.client = conn
conn.Consumer = MockConsumer
it = common.itermessages(conn, channel, 'q', limit=1)
ret = next(it)
self.assertTupleEqual(ret, ('body', 'message'))
with self.assertRaises(StopIteration):
next(it)
def test_when_raises_socket_timeout(self):
conn = self.MockConnection()
conn.should_raise_timeout = True
channel = Mock()
channel.connection.client = conn
conn.Consumer = MockConsumer
it = common.itermessages(conn, channel, 'q', limit=1)
with self.assertRaises(StopIteration):
next(it)
@patch('kombu.common.deque')
def test_when_raises_IndexError(self, deque):
deque_instance = deque.return_value = Mock()
deque_instance.popleft.side_effect = IndexError()
conn = self.MockConnection()
channel = Mock()
conn.Consumer = MockConsumer
it = common.itermessages(conn, channel, 'q', limit=1)
with self.assertRaises(StopIteration):
next(it)
class test_QoS(Case):
class _QoS(QoS):
def __init__(self, value):
self.value = value
QoS.__init__(self, None, value)
def set(self, value):
return value
def test_qos_exceeds_16bit(self):
with patch('kombu.common.logger') as logger:
callback = Mock()
qos = QoS(callback, 10)
qos.prev = 100
# cannot use 2 ** 32 because of a bug on OSX Py2.5:
# https://jira.mongodb.org/browse/PYTHON-389
qos.set(4294967296)
self.assertTrue(logger.warn.called)
callback.assert_called_with(prefetch_count=0)
def test_qos_increment_decrement(self):
qos = self._QoS(10)
self.assertEqual(qos.increment_eventually(), 11)
self.assertEqual(qos.increment_eventually(3), 14)
self.assertEqual(qos.increment_eventually(-30), 14)
self.assertEqual(qos.decrement_eventually(7), 7)
self.assertEqual(qos.decrement_eventually(), 6)
def test_qos_disabled_increment_decrement(self):
qos = self._QoS(0)
self.assertEqual(qos.increment_eventually(), 0)
self.assertEqual(qos.increment_eventually(3), 0)
self.assertEqual(qos.increment_eventually(-30), 0)
self.assertEqual(qos.decrement_eventually(7), 0)
self.assertEqual(qos.decrement_eventually(), 0)
self.assertEqual(qos.decrement_eventually(10), 0)
def test_qos_thread_safe(self):
qos = self._QoS(10)
def add():
for i in range(1000):
qos.increment_eventually()
def sub():
for i in range(1000):
qos.decrement_eventually()
def threaded(funs):
from threading import Thread
threads = [Thread(target=fun) for fun in funs]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
threaded([add, add])
self.assertEqual(qos.value, 2010)
qos.value = 1000
threaded([add, sub]) # n = 2
self.assertEqual(qos.value, 1000)
def test_exceeds_short(self):
qos = QoS(Mock(), PREFETCH_COUNT_MAX - 1)
qos.update()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX - 1)
qos.increment_eventually()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX)
qos.increment_eventually()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX + 1)
qos.decrement_eventually()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX)
qos.decrement_eventually()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX - 1)
def test_consumer_increment_decrement(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.update()
self.assertEqual(qos.value, 10)
mconsumer.qos.assert_called_with(prefetch_count=10)
qos.decrement_eventually()
qos.update()
self.assertEqual(qos.value, 9)
mconsumer.qos.assert_called_with(prefetch_count=9)
qos.decrement_eventually()
self.assertEqual(qos.value, 8)
mconsumer.qos.assert_called_with(prefetch_count=9)
self.assertIn({'prefetch_count': 9}, mconsumer.qos.call_args)
# Does not decrement 0 value
qos.value = 0
qos.decrement_eventually()
self.assertEqual(qos.value, 0)
qos.increment_eventually()
self.assertEqual(qos.value, 0)
def test_consumer_decrement_eventually(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.decrement_eventually()
self.assertEqual(qos.value, 9)
qos.value = 0
qos.decrement_eventually()
self.assertEqual(qos.value, 0)
def test_set(self):
mconsumer = Mock()
qos = QoS(mconsumer.qos, 10)
qos.set(12)
self.assertEqual(qos.prev, 12)
qos.set(qos.prev)
``` |
{
"source": "jheld/lifx-django",
"score": 2
} |
#### File: lifx-django/lifx_management/forms.py
```python
from django import forms
import sys
from os.path import pardir, join, abspath, dirname
bulbLabels = []
fullpathToLIFXPython = join(join(pardir,dirname(dirname(dirname(abspath(__file__))))),'lifx-python/')
if not fullpathToLIFXPython in sys.path:
sys.path.append(fullpathToLIFXPython)
import lifx
import imp
imp.load_source('schedule_cycle','{0}{1}'.format(fullpathToLIFXPython,'schedule-cycle.py'))
from schedule_cycle import ScheduleCycle
class ScheduleCycleForm(forms.Form):
for light in lifx.get_lights():
bulbLabels.append((light.bulb_label,light.bulb_label))
bulbs = forms.MultipleChoiceField(choices=bulbLabels)
time = forms.FloatField()
speed = forms.FloatField()
def startCycle(self,bulbs,time,speed):
lights = lifx.get_lights()
lightsCycle = []
for light in lights:
for bulb in bulbs:
if bulb in light.bulb_label:
lightsCycle.append(light)
scheduleCycle = ScheduleCycle()
scheduleCycle.driver(lightsCycle,time,speed)
```
#### File: lifx-django/lifx_management/views.py
```python
from django.shortcuts import render
from lifx_management.forms import ScheduleCycleForm
from django.views.generic.edit import FormView
import threading
# Create your views here.
class ScheduleCycleView(FormView):
template_name = 'schedule_cycle.html'
form_class = ScheduleCycleForm
success_url = '/'
def form_valid(self, form):
timer = threading.Timer(5, form.startCycle, args=[form.cleaned_data['bulbs'], form.cleaned_data['time'], form.cleaned_data['speed']])
timer.start()
return super(ScheduleCycleView, self).form_valid(form)
``` |
{
"source": "jhelgert/IliasDownloaderUniMA",
"score": 2
} |
#### File: IliasDownloaderUniMA/IliasDownloaderUniMA/IliasDL.py
```python
from requests import session, get, ConnectionError
from bs4 import BeautifulSoup
from urllib.parse import urljoin
from pathlib import Path as plPath
from dateparser import parse as parsedate
from datetime import datetime
from multiprocessing.pool import ThreadPool
import math
import os
import shutil
import re
class IliasDownloaderUniMA():
"""
Base class
"""
base_url = "https://ilias.uni-mannheim.de/"
desktop_url = "https://ilias.uni-mannheim.de/ilias.php?baseClass=ilPersonalDesktopGUI"
def __init__(self):
"""
Constructs a new instance.
"""
self.current_semester_pattern = self.getCurrentSemester()
self.courses = []
self.to_scan = []
self.files = []
self.params = {
'num_scan_threads' : 5,
'num_download_threads': 5,
'download_path': os.getcwd(),
'tutor_mode': False,
'verbose' : False
}
self.session = None
self.login_soup = None
self.background_task_files = []
self.background_tasks_to_clean = []
self.external_scrapers = []
def getCurrentSemester(self):
d = datetime.now()
if d.month in range(2, 8):
return rf"\((FSS|ST) {d.year}\)"
else:
return rf"\((HWS|WT) {d.year}\)"
def setParam(self, param, value):
"""
Sets the parameter.
:param arg: The parameter we want to alter
:type arg: string
:param value: The new parameter value
:type value: str or int
"""
if param in ['num_scan_threads', 'num_download_threads']:
if type(value) is int:
self.params[param] = value
if param == 'download_path':
if os.path.isdir(value):
self.params[param] = value
if param == 'verbose':
if type(value) is bool:
self.params[param] = value
if param == 'tutor_mode':
if type(value) is bool:
self.params[param] = value
def createIliasUrl(self, iliasid):
"""
Creates an ilias url from the ilias ref id.
:param iliasid: The ilias ref id
:type iliasid: int
:returns: feasible url
:rtype: str
"""
return self.base_url + "ilias.php?ref_id=" + str(iliasid) \
+ "&cmd=frameset" + "&cmdClass=ilrepositorygui" \
+ "&cmdNode=vi" + "&baseClass=ilrepositorygui"
def login(self, login_id, login_pw):
"""
create requests session and log into ilias.uni-mannheim.de
:param args: login details (uni-id and password)
:type args: list
:raises TypeError:
"""
if type(login_id) is not str or type(login_pw) is not str:
raise TypeError("...")
# User data and user-agent
data = {'username': login_id, 'password': <PASSWORD>}
head = {
'User-Agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) "\
+ "AppleWebKit/537.36 (KHTML, like Gecko) " \
+ "Chrome/56.0.2924.87 Safari/537.36",
'Connection': 'keep-alive'
}
self.session = session()
self.login_soup = BeautifulSoup(self.session.get("https://cas.uni-mannheim.de/cas/login").content, "lxml")
form_data = self.login_soup.select('form[action^="/cas/login"] input')
data.update({inp["name"]: inp["value"] for inp in form_data if inp["name"] not in data})
self.session.post('https://cas.uni-mannheim.de/cas/login', data=data, headers=head)
self.login_soup = BeautifulSoup(self.session.get(self.base_url).content, "lxml")
# Login successful? FIY
if not self.login_soup.find("a", {'id' : 'mm_desktop'}):
raise ConnectionError("Couldn't log into ILIAS. Make sure your provided uni-id and the password are correct.")
def addCourse(self, iliasid, course_name=None):
"""
Adds a course to the courses list.
:param iliasid: the ilias ref id of the course
:type iliasid: int
"""
url = self.createIliasUrl(iliasid)
if not course_name:
soup = BeautifulSoup(self.session.get(url).content, "lxml")
course_name = soup.select_one("#mainscrolldiv > ol > li:nth-child(3) > a").text
if (course_name := re.sub(r"\[.*\] ", "", course_name)):
self.courses += [{'name' : course_name, 'url': url}]
def addCourses(self, *iliasids):
"""
Adds multiple courses to the courses list
:param iliasids: the ilias ref ids of the courses
:type iliasids: list
"""
for iliasid in iliasids:
self.addCourse(iliasid)
def addAllSemesterCourses(self, semester_pattern=None, exclude_ids=[]):
"""
Extracts the users subscribed courses of the specified semester
and adds them to the course list.
:param semester_pattern: semester or regex for semester
:type semester_pattern: string
:param exclude_ids: optional ilias ids to ignore
:type exclude_ids: list
"""
if semester_pattern is None:
semester_pattern = self.current_semester_pattern
# Performance gain in case of many courses
semester_compiled = re.compile(semester_pattern)
extract_compiled = re.compile(r"ref_id=(\d+)")
for course in self.login_soup.find_all("a", "il_ContainerItemTitle"):
course_name = course.text
if semester_compiled.search(course_name):
url = course["href"]
if (match := extract_compiled.search(url)):
iliasid = int(match.group(1))
if iliasid not in exclude_ids:
self.addCourse(iliasid, course_name)
def _determineItemType(self, url):
if "target=file" in url:
return "file"
elif "calldirectlink" in url:
return "link"
elif "showThreads" in url:
return "forum"
elif "showOverview" in url:
return "task"
elif "ilHTLMPresentationGUI" in url:
return "lernmaterialien"
else:
return "folder"
def _parseFileProperties(self, bs_item):
"""
Tries to parse the file's size, modification date and the file ending.
Note: there are some cases where Ilias doesn't provide a modification
date and/or a file size.
:param bs_item: The beautifulsoup item
:type bs_item: { type_description }
:returns: file ending, file size, file modification date
:rtype: tuple
"""
props = bs_item.find_all('span', 'il_ItemProperty')
p = [i for i in props if len(i.text.split()) > 0 and "Version" not in i.text]
# Parse the file ending
if len(p[0].text.split()) > 1:
file_ending = ""
else:
file_ending = "." + p[0].text.split()[0]
# Parse the file size
if len(p) > 1:
size_tmp = p[1].text.lower().replace(".","").replace(",", ".").split()
size = float(size_tmp[0])
if size_tmp[1] == "kb":
size *= 1e-3
elif size_tmp[1] == "bytes":
size *= 1e-6
else:
size = math.nan
# Parse the modification date
if len(p) > 2:
mod_date = parsedate(p[2].text)
else:
mod_date = datetime.fromisoformat('2000-01-01')
return file_ending, size, mod_date
def parseVideos(self, mc_soup):
# Checks if there's a video inside the mediacontainer:
if (vsoup := mc_soup.find('video', {"class": "ilPageVideo"})):
if (v_src := vsoup.find('source')['src']):
v_url = urljoin(self.base_url, v_src)
v_name = re.search(r"mobs/mm_\d+/(.*)\?il_wac_token.*", v_src).group(1)
try:
v_size = float(self.session.head(v_url).headers['Content-Length']) * 1e-6
except:
v_size = math.nan
# The HEAD requests misses the 'last-modified' key, so it's not
# possible to get the mod date from there :(
v_mod_date = datetime.fromisoformat('2000-01-01')
return v_name, v_size, v_mod_date, v_url
else:
return None
def scanMediaContainer(self, course_name, file_path, soup):
"""
Scans videos on the top of the course inside the MediaContainer and
adds them to the list 'to_scan'.
:param soup: The soup
:type soup: { type_description }
"""
if self.params['verbose']:
print(f"Scanning Videos...")
for mc in soup.find_all("figure", {"class": "ilc_media_cont_MediaContainer"}):
if (video := self.parseVideos(mc)):
v_name, v_size, v_mod_date, v_url = video
self.files += [{
'course': course_name,
'type': 'file',
'name': v_name,
'size': v_size,
'mod-date': v_mod_date,
'url': v_url,
'path': file_path
}]
def scanContainerList(self, course_name, file_path, soup):
"""
Scans the soup object for links inside the ContainerList and adds
them to the list 'to_scan'. See _determineItemType() for the possible types of links.
:param soup:
:type soup: bs4.BeautifulSoup
"""
for i in (items := soup.find_all("div", "il_ContainerListItem")):
if (subitem := i.find('a', href=True)):
el_url = urljoin(self.base_url, subitem['href'])
el_name = subitem.text
el_type = self._determineItemType(el_url)
if el_type == "file":
ending, size, mod_date = self._parseFileProperties(i)
self.files += [{
'course': course_name,
'type': el_type,
'name': el_name + ending,
'size': size,
'mod-date': mod_date,
'url': el_url,
'path': file_path
}]
elif el_type in ["folder", "task", "lernmaterialien"]:
self.to_scan += [{
'type': el_type,
'name': el_name,
'url': el_url
}]
def scanFolder(self, course_name, url_to_scan):
"""
Scans a folder.
:param course_name: The name of the course the folder belongs to
:type course_name: str
:param url_to_scan: The url to scan
:type url_to_scan: str
"""
url = urljoin(self.base_url, url_to_scan)
soup = BeautifulSoup(self.session.get(url).content, "lxml")
file_path = course_name + "/" + "/".join(soup.find("body").find("ol").text.split("\n")[4:-1]) + "/"
file_path = file_path.replace(":", " - ")
if self.params['verbose']:
print(f"Scanning Folder...\n{file_path}\n{url}")
print("-------------------------------------------------")
self.scanMediaContainer(course_name, file_path, soup)
self.scanContainerList(course_name, file_path, soup)
def scanTaskUnit(self, course_name, url_to_scan):
"""
Scans a task unit.
:param course_name: The name of the course the Task belongs to
:type course_name: str
:param url_to_scan: The url to scan
:type url_to_scan: str
"""
url = urljoin(self.base_url, url_to_scan)
soup = BeautifulSoup(self.session.get(url).content, "lxml")
task_unit_name = soup.find("a", {"class" : "ilAccAnchor"}).text
file_path = course_name + "/" + "Aufgaben/" + task_unit_name + "/"
file_path = file_path.replace(":", " - ")
task_items = soup.find("div", {"id":"infoscreen_section_1"}).find_all("div", "form-group")
if self.params['verbose']:
print(f"Scanning TaskUnit...\n{file_path}\n{url}")
print("-------------------------------------------------")
for i in task_items:
el_url = urljoin(self.base_url, i.find('a')['href'])
el_name = i.find("div", 'il_InfoScreenProperty').text
el_type = 'file'
file_mod_date = datetime.fromisoformat('2000-01-01')
file_size = math.nan
self.files += [{
'course': course_name,
'type': el_type,
'name': el_name,
'size': file_size,
'mod-date': file_mod_date,
'url': el_url,
'path': file_path
}]
# Now scan the submissions
if self.params['tutor_mode']:
self.scanTaskUnitSubmissions(course_name, file_path, soup)
def scanTaskUnitSubmissions(self, course_name, file_path, soup):
form_data = {
'user_login': '',
'cmd[downloadSubmissions]': 'Alle Abgaben herunterladen'
}
# Deadline finished?
deadline = soup.select_one('#infoscreen_section_2 > div:nth-child(2) > div.il_InfoScreenPropertyValue.col-xs-9').text
if (deadline_time := parsedate(deadline)) < datetime.now():
# Access to the submissions?
if (tab_grades := soup.select_one('#tab_grades > a')):
tab_grades_url = urljoin(self.base_url, tab_grades['href'])
submissions_soup = BeautifulSoup(self.session.get(tab_grades_url).content, "lxml")
form_action_url = urljoin(self.base_url, submissions_soup.find('form', {'id': 'ilToolbar'})['action'])
# Post form data
r = self.session.post(form_action_url, data=form_data)
el_name = submissions_soup.select_one('#il_mhead_t_focus').text.replace("\n", "") + ".zip"
# Add backgroundtask file to list, we parse the download links
# later from the background tasks tab from the page header
self.background_task_files += [{
'course': course_name,
'type': 'file',
'name': el_name,
'size': math.nan,
'mod-date': deadline_time,
#'url': dl_url,
'path': file_path
}]
def searchBackgroundTaskFile(self, el_name):
#
# TO DO: Cleanup!!!
#
for idx, f in enumerate(self.background_task_files):
f["name"] = f["name"].encode()
f["name"] = f["name"].replace('ü'.encode(), b'ue')
f["name"] = f["name"].replace('Ü'.encode(), b'Ue')
f["name"] = f["name"].replace('ä'.encode(), b'ae')
f["name"] = f["name"].replace('Ä'.encode(), b'Ae')
f["name"] = f["name"].replace('ö'.encode(), b'oe')
f["name"] = f["name"].replace('Ö'.encode(), b'Oe')
f["name"] = f["name"].replace('ß'.encode(), b'ss')
f["name"] = f["name"].decode('utf-8')
if f["name"] == el_name:
return self.background_task_files.pop(idx)
def parseBackgroundTasks(self):
# time.sleep(5) # Not really needed?
# Reload ilias main page to parse the background tasks bar on the top
desktop_soup = BeautifulSoup(self.session.get(self.desktop_url).content, "lxml")
tasks_tab_url = urljoin(self.base_url, desktop_soup.select_one('#mm_tb_background_tasks')['refresh-uri'])
tasks_tab_soup = BeautifulSoup(self.session.get(tasks_tab_url).content, "lxml")
# Extract the items
for i in tasks_tab_soup.find_all('div', {'class': 'il-item-task'}):
# Extract the download url and the remove url
dl, rm = i.find_all('button', {'class': 'btn btn-default'})
dl_url = urljoin(self.base_url, dl['data-action'])
rm_url = urljoin(self.base_url, rm['data-action'])
self.background_tasks_to_clean.append(rm_url)
# Add file to downloads
el_name = i.find('div', {'class' : 'il-item-task-title'}).text.replace("\n", "") + ".zip"
if (bt := self.searchBackgroundTaskFile(el_name)):
self.files += [{
'course': bt['course'],
'type': 'file',
'name': el_name,
'size': bt['size'],
'mod-date': bt['mod-date'],
'url': dl_url,
'path': bt['path']
}]
def scanLernmaterial(self, course_name, url_to_scan):
pass
# ... to do ...
def scanHelper(self, course_name, el):
if len(self.to_scan) > 0:
self.to_scan.pop()
if el['type'] == "folder":
self.scanFolder(course_name, el['url'])
if el['type'] == "task":
self.scanTaskUnit(course_name, el['url'])
elif el['type'] == 'lernmaterialien':
self.scanLernmaterial(course_name, el['url'])
def searchForFiles(self, course_name):
"""
Scans an ilias url and all nested subfolders for files
:param arg: url for the "dateien" folder
:type arg: str
"""
while len(self.to_scan) > 0:
results = ThreadPool(self.params['num_scan_threads']).imap_unordered(lambda x: self.scanHelper(course_name, x), self.to_scan)
for r in results:
pass
def addExternalScraper(self, scraper, *args):
self.external_scrapers.append({'fun' : scraper, 'args': args})
def scanCourses(self):
"""
Scans all courses inside the instance's courses list.
"""
for course in self.courses:
self.to_scan += [{
'type' : 'folder',
'name': course['name'],
'url': course['url']
}]
print(f"Scanning {course['name']} with {self.params['num_scan_threads']} Threads....")
self.searchForFiles(course['name'])
# External Scrapers
for d in self.external_scrapers:
print(f"Scanning {d['args'][0]} with the external Scraper....")
self.files += d['fun'](*d['args'])
def downloadFile(self, file):
"""
Downloads a file.
:param file: The file we want do download
:type file: dict
:returns: { description_of_the_return_value }
:rtype: { return_type_description }
"""
file_dl_path = os.path.join(self.params['download_path'],file['path'], file['name'])
file_mod_date = file['mod-date'].timestamp()
size = file['size']
# Does the file already exists locally and is the newest version?
if os.path.exists(file_dl_path) and file_mod_date < os.path.getmtime(file_dl_path):
return
else:
# Download the file
r = self.session.get(file['url'], stream=True)
if r.status_code == 200:
try:
with open(file_dl_path, 'wb') as f:
print(f"Downloading {file['course']}: {file['name']} ({size:.1f} MB)...")
shutil.copyfileobj(r.raw, f)
except Exception as e:
return e
def downloadAllFiles(self):
"""
Downloads all files inside the instance's files list.
"""
# Scan all files
self.scanCourses()
if self.params['tutor_mode']:
# Parse the background tasks, i.e. add them to the download files
self.parseBackgroundTasks()
# Check the file paths
paths = list(set([os.path.join(self.params['download_path'],f['path']) for f in self.files]))
for p in paths:
if not plPath(p).exists():
plPath(p).mkdir(parents=True, exist_ok=True)
# Download all files
for r in ThreadPool(self.params['num_download_threads']).imap_unordered(self.downloadFile, self.files):
pass
# Clean the background tasks tab
if self.params['tutor_mode']:
if self.params['verbose']:
print("Tutor mode. Cleaning the background tasks...")
for r in ThreadPool(self.params['num_download_threads']).imap_unordered(lambda x: self.session.get(x), self.background_tasks_to_clean):
pass
```
#### File: IliasDownloaderUniMA/tests/test_parseVideo.py
```python
from IliasDownloaderUniMA import IliasDownloaderUniMA
from bs4 import BeautifulSoup
import datetime
import math
# Test for parseVideos()
# ------------------------------------------------------------------------------
v_with_caption = """
<html>
<body>
<figure class="ilc_media_cont_MediaContainer" style="display:table; margin-left: 0px;" xmlns:xhtml="http://www.w3.org/1999/xhtml">
<div class="ilc_Mob" style="width:400px;">
<div class="mejs-container svg ilPageVideo mejs-video" id="mep_0" style="width: 400px; height: 300px;">
<div class="mejs-inner">
<div class="mejs-mediaelement">
<video class="ilPageVideo" height="300" oncontextmenu="return false;" preload="none" src="./data/ILIAS/mobs/mm_1318784/Session_02_DesignofFlowLinesPart1.mp4?il_wac_token=8<PASSWORD>&il_wac_ttl=3&il_wac_ts=1603624381" width="400">
<source src="./data/ILIAS/mobs/mm_1318784/Session_02_DesignofFlowLinesPart1.mp4?il_wac_token=8<PASSWORD>&il_wac_ttl=3&il_wac_ts=1603624381" type="video/mp4">
<object data="libs/bower/bower_components/mediaelement/build/flashmediaelement.swf" height="300" type="application/x-shockwave-flash" width="400">
<param name="movie" value="libs/bower/bower_components/mediaelement/build/flashmediaelement.swf"/>
<param name="flashvars" value="controls=true&file=./data/ILIAS/mobs/mm_1318784/Session_02_DesignofFlowLinesPart1.mp4?il_wac_token=88ff75878db690a37e5fddfddcf055beea79693a&il_wac_ttl=3&il_wac_ts=1603624381"/>
</object>
</source>
</video>
</div>
</div>
</div>
</div>
<figcaption style="display: table-caption; caption-side: bottom;">
<div class="ilc_media_caption_MediaCaption">
Recording Session 02 Line Balancing
</div>
</figcaption>
</figure>
</body>
</html>
"""
v_without_caption = """
<html>
<body>
<figure class="ilc_media_cont_MediaContainer" style="display:table; margin-left: 0px;" xmlns:xhtml="http://www.w3.org/1999/xhtml">
<div class="ilc_Mob">
<div class="mejs-container svg ilPageVideo mejs-video" id="mep_0" style="width: 480px; height: 270px;">
<div class="mejs-inner">
<div class="mejs-mediaelement">
<video class="ilPageVideo" oncontextmenu="return false;" preload="none" src="./data/ILIAS/mobs/mm_1299655/HS20_EinfPoWi_Politische_Kultur_und_Sozialisation_A_komprimiert.m4v?il_wac_token=60e9a0575393b725438513fc1d2aadfba054221a&il_wac_ttl=3&il_wac_ts=1603624667">
<source src="./data/ILIAS/mobs/mm_1299655/HS20_EinfPoWi_Politische_Kultur_und_Sozialisation_A_komprimiert.m4v?il_wac_token=60e9a0575393b725438513fc1d2aadfba054221a&il_wac_ttl=3&il_wac_ts=1603624667" type="video/mp4">
<object data="libs/bower/bower_components/mediaelement/build/flashmediaelement.swf" type="application/x-shockwave-flash">
<param name="movie" value="libs/bower/bower_components/mediaelement/build/flashmediaelement.swf"/>
<param name="flashvars" value="controls=true&file=./data/ILIAS/mobs/mm_1299655/HS20_EinfPoWi_Politische_Kultur_und_Sozialisation_A_komprimiert.m4v?il_wac_token=60e9a0575393b725438513fc1d2aadfba054221a&il_wac_ttl=3&il_wac_ts=1603624667"/>
</object>
</source>
</video>
</div>
</div>
</div>
</div>
</figure>
</body>
</html>
"""
v_no_video = """
<html>
<body>
<figure class="ilc_media_cont_MediaContainer" style="display:table; margin-left: 0px;" xmlns:xhtml="http://www.w3.org/1999/xhtml">
<div class="ilc_Mob">
<img border="0" src="./data/ILIAS/mobs/mm_1171726/Quiz_03_Game.png?il_wac_token=4cac4b488b6d370df459c74070c97f07408afd8b&il_wac_ttl=3&il_wac_ts=1603624808" style="width:100%"/>
</div>
</figure>
</body>
</html>
"""
m = IliasDownloaderUniMA()
# We ignore the video size for the test as it's not possible to test
# the HEAD requests without being logged into ilias.
def test_no_video():
soup = BeautifulSoup(v_no_video, "lxml")
assert m.parseVideos(soup) is None
def test_without_caption():
soup = BeautifulSoup(v_without_caption, "lxml")
v_name, v_size, v_mod_date, v_url = m.parseVideos(soup)
assert v_name == 'HS20_EinfPoWi_Politische_Kultur_und_Sozialisation_A_komprimiert.m4v'
assert v_mod_date == datetime.datetime.fromisoformat('2000-01-01')
assert v_url == 'https://ilias.uni-mannheim.de/data/ILIAS/mobs/mm_1299655/HS20_EinfPoWi_Politische_Kultur_und_Sozialisation_A_komprimiert.m4v?il_wac_token=60e9a0575393b725438513fc1d2aadfba054221a&il_wac_ttl=3&il_wac_ts=1603624667'
def test_with_caption():
soup = BeautifulSoup(v_with_caption, "lxml")
v_name, v_size, v_mod_date, v_url = m.parseVideos(soup)
assert v_name == 'Session_02_DesignofFlowLinesPart1.mp4'
assert v_mod_date == datetime.datetime.fromisoformat('2000-01-01')
assert v_url == "https://ilias.uni-mannheim.de/data/ILIAS/mobs/mm_1318784/Session_02_DesignofFlowLinesPart1.mp4?il_wac_token=<PASSWORD>&il_wac_ttl=3&il_wac_ts=1603624381"
``` |
{
"source": "jhelgert/kickbase-api-python",
"score": 3
} |
#### File: kickbase_api/models/base_model.py
```python
class BaseModel:
_json_mapping = {
}
_json_transform = {
}
def __init__(self, d: dict = {}):
for key in d.keys():
value = d[key]
# Transform if necessary
if key in self._json_transform:
value = self._json_transform[key](value)
if key in self._json_mapping.keys():
setattr(self, self._json_mapping[key], value)
setattr(self, key, value)
```
#### File: kickbase_api/models/feed_item_comment.py
```python
from datetime import datetime
from kickbase_api.models._transforms import parse_date
from kickbase_api.models.base_model import BaseModel
class FeedItemComment(BaseModel):
comment: str = None
date: datetime = None
user_id: str = None
user_name: str = None
user_profile_path: str = None
def __init__(self, d: dict = {}):
self._json_transform = {
"date": parse_date
}
self._json_mapping = {
"userId": "user_id",
"userName": "user_name",
"userProfileUrl": "user_profile_path",
}
super().__init__(d)
```
#### File: kickbase_api/models/feed_item.py
```python
from datetime import datetime
from enum import IntEnum
from kickbase_api.models._transforms import parse_date
from kickbase_api.models.base_model import BaseModel
from kickbase_api.models.feed_meta import FeedMeta
class FeedType(IntEnum):
BUY = 12
FEED_AD_BANNER = 15
FEED_COMMENT = 14
MATCH_DAY_SUMMARY = 8
MATCH_DAY_SUMMARY_V2 = 17
NEWS = 1
NEWS_V2 = 16
NEW_PLAYER_ON_TM = 3
PLAYER_MATCH_DAY_SUMMARY = 10
SALE = 2
STATUS_MESSAGE = 9
TYPE_EMPTY = 20
USER_FOUNDED_LEAGUE = 6
USER_INVITED_OTHER_TO_LEAGUE = 7
USER_JOINED_LEAGUE = 5
USER_LEFT_LEAGUE = 13
USER_MATCH_DAY_SUMMARY = 11
PLAYER_PROFILE_STATUS = 14
PREDICTED_LINEUP = 18
TRANSFER_V2 = 15
TYPE_GOOGLE_ADS = 500
UNKNOWN = 9999999999
def _map_feed_type(v):
try:
return FeedType(v)
except:
return FeedType.UNKNOWN
class FeedItem(BaseModel):
id: str = None
comments: int = None
date: datetime = None
age: int = None
type: FeedType = None
source: int = None
meta: FeedMeta = None
season_id: int = None
def __init__(self, d: dict = {}):
self._json_transform = {
"date": parse_date,
"meta": FeedMeta,
"type": _map_feed_type
}
self._json_mapping = {
"seasonId": "season_id"
}
super().__init__(d)
```
#### File: kickbase_api/models/feed_meta.py
```python
from kickbase_api.models.base_model import BaseModel
class FeedMeta(BaseModel):
average_points: int = None
match_day: int = None
maximum_points: int = None
total_points: int = None
buyer_picture: str = None
buyer_id: str = None
buyer_name: str = None
buy_price: int = None
seller_picture: str = None
seller_id: str = None
seller_name: str = None
sell_price: int = None
player_id: str = None
player_first_name: str = None
player_last_name: str = None
player_known_name: str = None
c: int = None
e: int = None
player_summary_r: int = None
player_summary_y: int = None
player_summary_yr: int = None
assists: int = None
game_time: int = None
goals_shot: int = None
match_type: int = None
team_name: str = None
opponent_team_id: int = None
opponent_team_name: str = None
goals_team_1: int = None
goals_team_2: int = None
founder_picture_path: str = None
found_id: str = None
founder_name: str = None
league_name: str = None
league_id: str = None
inviter_profile_picture_path: str = None
inviter_id: str = None
inviter_name: str = None
invited_name: str = None
goal_keeper_points: int = None
defenders_points: int = None
midfielders_points: int = None
forwarders_points: int = None
total_points: int = None
user_id: str = None
user_name: str = None
user_picture_path: str = None
status_message: str = None
news: str = None
def __init__(self, d: dict = {}):
self._json_transform = {
}
self._json_mapping = {
"a": "average_points",
"day": "match_day",
"m": "maximum_points",
"t": "total_points",
"bi": "buyer_picture",
"bid": "buyer_id",
"bn": "buyer_name",
"p": "buy_price",
# "p": "sell_price",
"sn": "seller_name",
"pid": "player_id",
"pln": "player_last_name",
"pkn": "player_known_name",
"cr": "player_summary_r",
"cy": "player_summary_y",
"cyr": "player_summary_yr",
# "a": "assists",
# "t": "game_time",
"g": "goals_shot",
"h": "match_type",
"tn": "team_name",
"oid": "opponent_team_id",
"otn": "opponent_team_name",
"r1": "goals_team_1",
"r2": "goals_team_2",
"fi": "founder_picture_path",
"fid": "found_id",
"fn": "founder_name",
"ln": "league_name",
"li": "league_id",
"ii": "inviter_profile_picture_path",
"iid": "inviter_id",
"in": "inviter_name",
# "tn": "invited_name",
"pg": "goal_keeper_points",
"pd": "defenders_points",
"pm": "midfielders_points",
"pf": "forwarders_points",
"pfn": "player_first_name",
"pt": "total_points",
"uid": "user_id",
"un": "user_name",
"ui": "user_picture_path",
"s": "status_message",
"sid": "seller_id",
"n": "news"
}
super().__init__(d)
self.sell_price = self.buy_price
self.game_time = self.total_points
self.assists = self.average_points
self.invited_name = self.team_name
```
#### File: kickbase_api/models/league_data.py
```python
from datetime import datetime
from kickbase_api.models._transforms import parse_date
from kickbase_api.models.base_model import BaseModel
class LeagueData(BaseModel):
id: str = None
name: str = None
creator: str = None
creator_id: int = None
creation_date: datetime = None
activity_index: float = None
total_transfers: int = None
active_users: int = None
max_users: int = None
average_points: int = None
pub: bool = None
gm: int = None
player_limit_active: bool = None
player_limit: bool = None
image_path: str = None
def __init__(self, d: dict = {}):
self._json_transform = {
"creation": parse_date
}
self._json_mapping = {
"creatorId": "creator_id",
"creation": "creation_date",
"mpl": "player_limit_active",
"pl": "player_limit",
"ci": "image_path",
"ai": "activity_index",
"t": "total_transfers",
"au": "active_users",
"mu": "max_users",
"ap": "average_points"
}
super().__init__(d)
```
#### File: kickbase_api/models/league_match_day_user_stats.py
```python
from datetime import datetime
from kickbase_api.models._transforms import parse_date, parse_key_value_array_to_dict
from kickbase_api.models.base_model import BaseModel
class LeagueMatchDayUserStats(BaseModel):
season_id: str = None
day: int = None
placement: int = None
points: int = None
def __init__(self, d: dict = {}):
self._json_transform = {
}
self._json_mapping = {
"s": "season_id",
"p": "placement",
"pt": "points"
}
super().__init__(d)
```
#### File: kickbase_api/models/league_user_season_stats.py
```python
from datetime import datetime
from kickbase_api.models._transforms import parse_date, parse_key_value_array_to_dict
from kickbase_api.models.base_model import BaseModel
class LeagueUserSeasonStats(BaseModel):
season_id: str = None
season: str = None
points: int = None
average_points: int = None
max_points: int = None
min_points: int = None
wins: int = None
bought: int = None
sold: int = None
points_goal_keeper: int = None
points_defenders: int = None
points_mid_fielers: int = None
points_forwards: int = None
average_goal_keeper: int = None
average_defenders: int = None
average_mid_fielders: int = None
average_forwards: int = None
def __init__(self, d: dict = {}):
self._json_transform = {
}
self._json_mapping = {
"seasonId": "season_id",
"averagePoints": "average_points",
"maxPoints": "max_points",
"minPoints": "min_points",
"pointsGoalKeeper": "points_goal_keeper",
"pointsDefenders": "points_defenders",
"pointsMidFielders": "points_mid_fielers",
"pointsForwards": "points_forwards",
"averageGoalKeeper": "average_goal_keeper",
"averageDefenders": "average_defenders",
"averageMidFielders": "average_mid_fielders",
"averageForwards": "average_forwards"
}
super().__init__(d)
```
#### File: kickbase_api/models/league_user_stats.py
```python
from datetime import datetime
from kickbase_api.models._transforms import parse_date, parse_key_value_array_to_dict
from kickbase_api.models.base_model import BaseModel
from kickbase_api.models.league_user_season_stats import LeagueUserSeasonStats
class LeagueUserStats(BaseModel):
name: str = None
profile_image_path: str = None
cover_image_path: str = None
flags: int = None
placement: int = None
points: int = None
team_value: float = None
seasons: [LeagueUserSeasonStats] = None
team_values: {datetime: float}
def __init__(self, d: dict = {}):
self._json_transform = {
"teamValues": parse_key_value_array_to_dict(lambda o: parse_date(o["d"]), lambda o: o["v"]),
"seasons": lambda v: [LeagueUserSeasonStats(_d) for _d in v]
}
self._json_mapping = {
"profileUrl": "profile_image_path",
"coverUrl": "cover_image_path",
"teamValue": "team_value",
"teamValues": "team_values"
}
super().__init__(d)
```
#### File: kickbase_api/models/market.py
```python
from kickbase_api.models.base_model import BaseModel
from kickbase_api.models.market_player import MarketPlayer
class Market(BaseModel):
closed: bool = None
players: [MarketPlayer] = None
def __init__(self, d: dict = {}):
self._json_transform = {
"players": lambda v: [MarketPlayer(v_) for v_ in v]
}
self._json_mapping = {
"c": "closed"
}
super().__init__(d)
```
#### File: models/response/league_stats_response.py
```python
from kickbase_api.models.league_match_day_stats_data import LeagueMatchDayStatsData
class LeagueStatsResponse:
current_day: int = None
match_days: {int: [LeagueMatchDayStatsData]} = {}
def __init__(self, d: dict):
self.current_day = d["currentDay"]
for match_day in d["matchDays"]:
self.match_days[match_day["day"]] = [LeagueMatchDayStatsData(_d) for _d in match_day["users"]]
``` |
{
"source": "jhelgert/pyTTP",
"score": 2
} |
#### File: pyTTP/pyTTP/ttp_mip.py
```python
from mip import Model, xsum, MINIMIZE
import itertools
import numpy as np
class TTPMip:
def __init__(self, distance_matrix, max_mip_gap: float = 0.0,
max_home_away_stand: int = 3):
self.distance_matrix = distance_matrix
self.num_teams = distance_matrix.shape[0]
self.num_rounds = 2*self.num_teams - 2
self.teams = list(range(self.num_teams))
self.max_home_away_stand = max_home_away_stand
self._fix_scheduling_constrs = []
self._fix_home_away_pattern_constrs = []
self._create_mip_model()
self.mdl.solver.set_verbose(0)
self.mdl.max_mip_gap = max_mip_gap
def solve(self, start_obj_val: float, start_sol):
#sol = self._transform_start_sol(start_sol)
# Phase II
proceed = True
improved = False
while proceed is True:
proceed = False
# optimize home_away_pattern
self._fix_home_away_pattern(start_sol)
self.mdl.optimize()
if self.mdl.status != self.mdl.status.OPTIMAL:
return (improved,)
if self.mdl.objective_value < start_obj_val:
proceed = True
improved = True
# get the solution of the previous optimization
start_sol = self.get_solution()
start_obj_val = self.mdl.objective_value
print(
f"Phase II found new solution ( HA): {start_obj_val:6.0f}")
# remove the previous constraints
self.mdl.remove(self._fix_home_away_pattern_constrs)
# optimize schedule pattern and use previous solution as start
self._fix_scheduling(start_sol)
self.mdl.optimize()
if self.mdl.status != self.mdl.status.OPTIMAL:
return (improved,)
if self.mdl.objective_value < start_obj_val:
proceed = True
improved = True
# get the solution of the previous optimization
start_sol = self.get_solution()
start_obj_val = self.mdl.objective_value
print(
f"Phase II found new solution (nHA): {start_obj_val:6.0f}")
# else, get solution and continue
start_sol = self.get_solution()
# remove the previous constraints
self.mdl.remove(self._fix_scheduling_constrs)
return improved, start_obj_val, start_sol
def get_solution(self):
S = np.zeros((self.num_teams, self.num_rounds), dtype=np.int32)
for s in range(self.num_rounds):
for i, t1 in enumerate(self.teams):
for j, t2 in enumerate(self.teams):
if self.x[t1, t2, s].x > 0.0:
S[[i, j], s] = [-(j+1), i+1]
if self.x[t2, t1, s].x > 0.0:
S[[i, j], s] = [j+1, -(i+1)]
return S
def _transform_start_sol(self, start_sol):
sol = {
(t1, t2, s): 0 for t1 in self.teams for t2 in self.teams for s in range(self.num_rounds) if t1 != t2}
for i, team in enumerate(self.teams):
for s in range(self.num_rounds):
if (idx := start_sol[i, s]) > 0:
opponent = self.teams[idx-1]
sol[team, opponent, s] = 1
# print(sol)
return sol
def _create_mip_model(self):
self.rounds = list(range(self.num_rounds))
D = {(t1, t2): self.distance_matrix[i, j] for i, t1 in enumerate(self.teams)
for j, t2 in enumerate(self.teams)}
K = self.max_home_away_stand # max length of home stand and road trips
# -- sets --
self.PairTeams = list((i, j) for i in self.teams for j in self.teams)
self.TripleTeams = list((i, j, t)
for (i, j) in self.PairTeams for t in self.teams)
# mip model object
self.mdl = Model("bla", sense=MINIMIZE, solver_name="Gurobi")
# -- variables --
# x[i,j,s] = 1, if team i plays team j at home on round s
x = {(i, j, s): self.mdl.add_var(var_type="B", name=f"x[{i},{j},{s}]")
for (i, j) in self.PairTeams for s in self.rounds}
self.x = x
# y[t,i,j] = 1, if team t travels from team i to team j
y = {(i, j, t): self.mdl.add_var(var_type="B", name=f"y[{i},{j},{t}]")
for (i, j, t) in self.TripleTeams}
# z[i,j,k] = 1, if
z = {(i, j, s): self.mdl.add_var(var_type="B", name=f"z[{i},{j},{s}]")
for (i, j) in self.PairTeams for s in self.rounds}
# objective
expr1 = xsum(D[i, j]*x[i, j, 0] for (i, j) in self.PairTeams)
expr2 = xsum(D[i, j]*y[t, i, j] for (i, j, t) in self.TripleTeams)
expr3 = xsum(D[i, j]*x[i, j, self.num_rounds-1]
for (i, j) in self.PairTeams)
self.mdl.objective = expr1 + expr2 + expr3
for i in self.teams:
for s in self.rounds:
self.mdl.add_constr(x[i, i, s] == 0)
# each team has exact one match per round s
for s in self.rounds:
for i in self.teams:
self.mdl.add_constr(
xsum(x[i, j, s] + x[j, i, s] for j in self.teams) == 1)
# each team plays the other teams exactly once at home and exactly once away
for (i, j) in self.PairTeams:
if i != j:
self.mdl.add_constr(xsum(x[i, j, s] for s in self.rounds) == 1)
# do not exceed the number of allowed home stands or road trips
for s in self.rounds[:-K]:
for i in self.teams:
self.mdl.add_constr(xsum(x[i, j, s+l]
for l in range(K+1) for j in self.teams) <= K)
for j in self.teams:
self.mdl.add_constr(xsum(x[i, j, s+l]
for l in range(K+1) for i in self.teams) <= K)
# no repeaters, i.e.
# x[j,i,s] + x[i,j,s] == 2 --> x[j,i,s+1] + x[i, j,s+1] == 0
for (i, j) in self.PairTeams:
for s in self.rounds[:-1]:
self.mdl.add_constr(x[i, j, s]+x[j, i, s] +
x[i, j, s+1]+x[j, i, s+1] <= 1)
for i in self.teams:
for s in self.rounds:
self.mdl.add_constr(z[i, i, s] == xsum(
x[j, i, s] for j in self.teams))
for (i, j) in self.PairTeams:
if i != j:
for s in self.rounds:
self.mdl.add_constr(z[i, j, s] == x[i, j, s])
for (i, j, t) in self.TripleTeams:
for s in self.rounds[:-1]:
self.mdl.add_constr(
y[t, i, j] >= z[t, i, s] + z[t, j, s+1] - 1)
def _fix_home_away_pattern(self, start_sol):
self._fix_home_away_pattern_constrs = []
sol = self._transform_start_sol(start_sol)
for (i, j) in self.PairTeams:
if i != j:
for s in range(self.num_rounds):
con = self.mdl.add_constr(
self.x[i, j, s] + self.x[j, i, s] == sol[i, j, s] + sol[j, i, s])
# save the constraints such that we can delete them later
self._fix_home_away_pattern_constrs.append(con)
def _fix_scheduling(self, start_sol):
self._fix_scheduling_constrs = []
sol = self._transform_start_sol(start_sol)
for j in self.teams:
for s in self.rounds:
lhs1 = xsum(self.x[i, j, s] for i in self.teams if i != j)
lhs2 = xsum(self.x[j, i, s] for i in self.teams if i != j)
rhs1 = xsum(sol[i, j, s] for i in self.teams if i != j)
rhs2 = xsum(sol[j, i, s] for i in self.teams if i != j)
con1 = self.mdl.add_constr(lhs1 == rhs1)
con2 = self.mdl.add_constr(lhs2 == rhs2)
# save the constraints such that we can delete them later
self._fix_scheduling_constrs.append(con1)
self._fix_scheduling_constrs.append(con2)
``` |
{
"source": "jhelison/challenge-weef-twitter",
"score": 2
} |
#### File: apps/users/models.py
```python
from django.db import models
from django.contrib.auth.models import (
BaseUserManager,
AbstractBaseUser,
PermissionsMixin,
)
class AccountManager(BaseUserManager):
def create_user(self, email, name=None, password=None):
if not email:
raise ValueError("Users must have an email address")
user = self.model(email=self.normalize_email(email), name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, name, email, password):
user = self.create_user(
name=name, email=self.normalize_email(email), password=password
)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
class UserFollowing(models.Model):
user_id = models.ForeignKey(
"User", related_name="following", on_delete=models.CASCADE
)
following_user_id = models.ForeignKey(
"User", related_name="followers", on_delete=models.CASCADE
)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ["-created_at"]
unique_together = (("user_id", "following_user_id"),)
def __str__(self):
return f"{self.user_id.email} | {self.following_user_id.email}"
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(verbose_name="email", unique=True)
name = models.CharField(max_length=50)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
is_active = models.BooleanField(default=True)
is_superuser = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
USERNAME_FIELD = "email"
REQUIRED_FIELDS = ["name"]
objects = AccountManager()
def __str__(self):
return str(f"{self.email} | {self.name}")
def count_followers(self):
return UserFollowing.objects.filter(user_id=self).count()
def count_following(self):
return UserFollowing.objects.filter(following_user_id=self).count()
```
#### File: apps/users/views.py
```python
from django.contrib.auth.hashers import check_password
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from rest_framework.authtoken.models import Token
from rest_framework.permissions import IsAuthenticated
from apps.users.models import User
from apps.users.serializers import (
UserSerializer,
LoginSerializer,
)
@api_view(["POST"])
def signin(request):
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(["POST"])
def login(request):
serializer = LoginSerializer(data=request.data)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
try:
user = User.objects.get(email=serializer.data["email"])
if not check_password(serializer.data["password"], user.password):
raise IOError()
except:
return Response(
"Email or password invalid.", status=status.HTTP_400_BAD_REQUEST
)
token = Token.objects.get_or_create(user=user)[0]
return Response({"token": token.key}, status=status.HTTP_200_OK)
@api_view(["GET"])
@permission_classes([IsAuthenticated])
def logout(request):
request.user.auth_token.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
``` |
{
"source": "jhell96/MCMC-Metropolis-Hastings-Decryption",
"score": 4
} |
#### File: jhell96/MCMC-Metropolis-Hastings-Decryption/mcmc.py
```python
from __future__ import division
from util import Distribution
import string
import math
import random
import sys
import matplotlib.pyplot as plt
def build_letter_transition_dist(file):
"""
Builds a transition matrix (dict of dicts) which measures the probability of transitioning from
one letter to another letter, based on the frequencies from the sample file.
i.e. "Spam and Eggs" measures frequency of s->p, p->a, a->m, m->" ", " "->a, a->n etc...
Inputs
------
file : a string which is the path to a file containing the reference document
Returns
-------
a dictionary of Distribution objects (inherited from dict - see util) where each
key is a letter, and each key of each Distribution object is also a letter; the
value is the probablity of transitioning between letters,
i.e. d[first_letter][second_letter] = Probability of first_letter -> second_letter
"""
charset = string.lowercase+" "
dist = {key:Distribution() for key in charset}
doc = clean_document(open(file).read())
# laplace smoothing - setting the prior to a uniform distribution.
# avoids probabilites of 0 in the transition distirbution.
for a in charset:
for b in charset:
dist[a][b] += 1
for i in range(1, len(doc)):
first_letter = doc[i-1] if doc[i-1].isalpha() else " "
second_letter = doc[i] if doc[i].isalpha() else " "
dist[first_letter][second_letter] +=1
for k in dist:
dist[k].renormalize()
return dist
def plot_freq(dist):
"""
Plots the transition distribution created in build_letter_transition_dist -- for utility and visualization
"""
data = [ [dist[i][j] for j in dist[i]] for i in dist]
charset = [i for i in dist]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim((0, len(charset)-1))
ax.set_ylim((0, len(charset)-1))
ax.set_xticks([i for i in range(len(charset))])
ax.set_xticklabels(charset)
ax.set_yticks([i for i in range(len(charset))])
ax.set_yticklabels(charset)
plt.grid(True, color='white')
plt.imshow(data)
plt.colorbar(orientation='vertical')
plt.show()
def clean_document(document):
"""
Removes punctuation from a document, and converts everything to lowercase
"""
return document.translate(None, string.punctuation).lower()
def compute_log_likelihood(document, expected_letter_distribution):
"""
Computes the log-likelihood of a document
Inputs
------
document : a string, of which, we compute the likelihood
expected_letter_distribution : a dictionary of Distribution objects (inherited from dict - see util) where each
key is a letter, and each key of each Distribution object is also a letter; the
value is the probablity of transitioning between letters,
i.e. d[first_letter][second_letter] = Probability of first_letter -> second_letter
Returns
-------
a double which is the log-likelihood of the document
"""
s = 0
for i in range(1, len(document)):
first_letter = document[i-1].lower() if document[i-1].isalpha() else " "
second_letter = document[i].lower() if document[i].isalpha() else " "
s += math.log(expected_letter_distribution[first_letter][second_letter])
return s
def decrypt_document(encrypted_document, cipher):
"""
Decrypts a document from a cipher
Inputs
------
encrypted_document : a string, which we want to transform with a cipher
cipher : a string, in which order matters, that is mapped to from the alphabet in the
encrypted document i.e. abcdefg.. -> udhjenk...
Returns
-------
a string in which each original letter, is replaced with its corresponding letter in the cipher
"""
mapping = create_mapping_from_cipher(cipher)
document = ""
for c in encrypted_document:
if (c.isalpha() and (c.lower() in mapping)):
document += mapping[c.lower()]
else:
document += " "
return document
def create_mapping_from_cipher(cipher):
"""
Creates the mapping between the alphabet string and the cipher string
Inputs
------
cipher : a string, in which order matters, that is mapped to from the alphabet in the
encrypted document i.e. abcdefg.. -> udhjenk...
Returns
-------
a dictionary in which each key is a letter of the alphabet, and each value is
the corresponding letter in the cipher
"""
charset = list(string.lowercase)
return {charset.pop(0):elem for elem in cipher}
def propose_cipher(current_cipher):
"""
Proposes a new cipher by randomly swapping the place of two letters in the current cipher
Inputs
------
current_cipher : a string, in which order matters, that is mapped to from the alphabet in the
encrypted document i.e. abcdefg.. -> udhjenk...
Returns
-------
a string, which is the new proposed cipher
"""
first_letter = random.choice(list(current_cipher))
second_letter = random.choice(list(current_cipher))
while(first_letter == second_letter):
second_letter = random.choice(list(current_cipher))
new_cipher = ""
for c in current_cipher:
if (c == first_letter):
new_cipher += second_letter
elif (c == second_letter):
new_cipher += first_letter
else:
new_cipher+=c
return new_cipher
def generate_random_cipher():
"""
Generates a random cipher string
Returns
-------
a string, containing all the letters of the alphabet, in a randomly permuated order
"""
current_cipher = list(string.lowercase)
random.shuffle(current_cipher)
return "".join(current_cipher)
def acceptance_criteria(log_proposal, log_current):
"""
Accepts the sample according to the Metropolis-Hastings algorithm
"""
return (random.random() < math.exp(log_proposal - log_current))
def run_metropolis_hastings(encrypted_document, expected_letter_distribution, max_acceptance_iter=4000):
"""
Runs the Metropolis-Hastings algorithm to decode the document. The iteration number represents
the number of accepted samples from the distribution, and depends heavily on the length of the document
to be decoded: A longer document usually implies smaller terminal iteration number.
If it doesn't decode the document the first time, it is often useful to run multiple times to yield the best
cipher.
"""
encrypted_document = clean_document(encrypted_document)
current_cipher = generate_random_cipher()
best_document = ("", float("-inf"))
number_accepted = 0
i = 0
while(number_accepted < max_acceptance_iter):
i+=1
proposal_cipher = propose_cipher(current_cipher)
proposal_document = decrypt_document(encrypted_document, proposal_cipher)
current_document = decrypt_document(encrypted_document, current_cipher)
log_likelihood_proposal = compute_log_likelihood(proposal_document, expected_letter_distribution)
log_likelihood_current = compute_log_likelihood(current_document, expected_letter_distribution)
if (log_likelihood_proposal > best_document[1]):
best_document = (proposal_document, log_likelihood_proposal)
if(acceptance_criteria(log_likelihood_proposal, log_likelihood_current)):
number_accepted += 1
current_cipher = proposal_cipher
print number_accepted, i
print best_document
return best_document
def encrypt_document(document):
"""
Useful method to encrypt a document using a random cipher
"""
cipher = generate_random_cipher()
return decrypt_document(document, cipher)
if __name__ == '__main__':
if len(sys.argv) != 4:
print "Usage: python mcmc.py <path to file to decode> <number of iterations> <path to reference document>"
print "Example: python mcmc.py decode_this.txt 2000 war_and_peace.txt"
sys.exit(1)
file_to_decode = open(sys.argv[1]).read()
expected_letter_distribution = build_letter_transition_dist(sys.argv[3])
iterations = int(sys.argv[2])
print run_metropolis_hastings(file_to_decode, expected_letter_distribution, iterations)
``` |
{
"source": "jhell96/music-perception-mcmc",
"score": 3
} |
#### File: jhell96/music-perception-mcmc/mcmc.py
```python
from keyboard import Keyboard
import numpy as np
from util import *
from tqdm import tqdm
import matplotlib.pyplot as plt
class MCMC_MH():
def __init__(self, max_iterations, proposal_method='uniform', proposal_sensitivity=1000.0, similarity_sensitivity=100.0):
self.max_iterations = max_iterations
self.keyboard = Keyboard()
self.history = []
self.proposal_method = proposal_method
self.proposal_sensitivity = proposal_sensitivity
self.similarity_sensitivity = similarity_sensitivity
def estimate(self, audio):
# ALG:
# initialize
# generate candidate from g(x' | x_t)
# calculate acceptance probability A(x', x_t) = min(1, ( p(x') / p(x) ) * ( g(x_t | x') / g(x' | x_t) ) )
# uniformly generate number [0, 1]
# if u <= A(x', x_t) accept and set x_{t+1} = x'
# otherwise reject the new state and copy the old state forward
# increment t
######################################################
# init
self.keyboard.state = [0]*len(self.keyboard.state)
# self.keyboard.toggle_note(73)
num_accepted = 0
for t in tqdm(range(self.max_iterations)):
states = self.keyboard.possible_next_states()
state_dist = self.proposal_dist(states)
proposal_idx = np.random.choice(len(states), 1, p=state_dist)[0]
proposal_state = states[proposal_idx]
current_score = self.keyboard.score(audio)
proposal_score = self.keyboard.score(audio, state=proposal_state)
# print(current_score, proposal_score)
score_distribution = self.keyboard.softmax([current_score, proposal_score], scale=self.similarity_sensitivity)
# print(score_distribution, self.keyboard.state, proposal_state)
current_prob, proposal_prob = score_distribution
acceptance_probability = min(1, (proposal_prob/(current_prob + 1e-5)))
u = np.random.uniform(0, 1)
if (u <= acceptance_probability):
self.keyboard.state = proposal_state
self.history.append(proposal_state)
num_accepted += 1
else:
self.history.append(self.keyboard.state)
def proposal_dist(self, states):
if self.proposal_method == 'uniform':
# Uniform dist:
return [1/len(states) for i in states]
if self.proposal_method == 'sim':
# Similarity dist
current_state = self.keyboard.state
curr_energy = self.keyboard.get_state_chroma_energy()
sim = []
for s in states:
energy = self.keyboard.get_state_chroma_energy(state=s)
sim.append(np.dot(curr_energy, energy))
dist = np.array(sim)/(sum(sim) + 1e-5)
dist = self.keyboard.softmax(dist, scale=self.proposal_sensitivity)
return dist
def plot_history(self, correct_state=None):
if correct_state:
correct_state = list(map(lambda x: x*2, correct_state))
out = self.history[:]
for i in range(int(max(1, 0.05*len(out)))):
out.append(correct_state)
else:
out = self.history
h = np.array(out)
plt.title("prop method: {}, prop_sen: {}, sim_sen: {}".format(self.proposal_method, self.proposal_sensitivity, self.similarity_sensitivity))
plt.imshow(h.T, origin='lower', aspect='auto')
plt.xlabel("Iteration")
plt.ylabel("Note")
plt.show()
def get_dist(self):
burn_in = 100
cut = int(min(burn_in, 0.3 * self.max_iterations))
s = np.sum(np.array(self.history[cut:]), axis=0)
probabilities = s/np.sum(s)
return np.array(probabilities)
def run_test(self, test_num):
burn_in = 1000
audio_file = "piano/resources/tests/test{}.wav".format(test_num)
audio = load_wav(audio_file)
with open("piano/resources/tests/correct_notes.txt", 'r') as f:
correct = f.read()
print("Getting correct answer...")
correct_state = [0]*self.keyboard.num_notes
for t in [x.split(":") for x in correct.split('\n')]:
if t[0] == 'test{}'.format(test_num):
for p in t[1].split(','):
correct_state[int(p)-self.keyboard.starting_pitch] = 1
print("Running MCMC...")
self.estimate(audio)
cut = int(min(burn_in, 0.3 * self.max_iterations))
s = np.sum(np.array(self.history[cut:]), axis=0)
pitches = np.arange(self.keyboard.starting_pitch, self.keyboard.starting_pitch+self.keyboard.num_notes)
probabilities = s/np.sum(s)
print("Pitch Probabilities")
print("Pitch Prob")
for pitch, prob in zip(pitches, probabilities):
print("{} {}".format(pitch, round(prob, 3)))
print("")
print("Top Note: " + str(np.argmax(s)+self.keyboard.starting_pitch))
print("Final state: " + str(self.keyboard.state))
print("Correct State: " + str(correct_state))
print("Pitches: " + str(pitches))
print("")
print("Playing original audio...")
print("")
play_wav(audio_file)
print("")
print("Playing estimated audio...")
print("")
self.keyboard.play_current_state()
self.plot_history(correct_state)
if __name__ == '__main__':
# number of iterations to run (normal values: 10 - 10,000)
num_iters = 10000
# proposal distribution method (normal values: 'uniform' or 'sim' for simliarity proposal)
# method = 'uniform'
method = 'sim'
# sets the sensitivity of how simliar we think any state is to a piece of audio
# normal values: 50 - 500
sim_sen = 120
# sets the sensitivity of how simliar we think our proposed state is to the current state
# normal values: 10 - 1,000
# ONLY ACTUALLY USED WHEN USING 'sim' METHOD
prop_sen = 50
# initialize mcmc
mh = MCMC_MH(num_iters, proposal_method=method, proposal_sensitivity=prop_sen, similarity_sensitivity=sim_sen)
# run test number 5
# will play audio and will generate a plot
# this uses
mh.run_test(4)
``` |
{
"source": "jhellan/authomatic",
"score": 3
} |
#### File: pyramid/dataporten/main.py
```python
from wsgiref.simple_server import make_server
from pyramid.config import Configurator
from pyramid.response import Response
from authomatic import Authomatic
from authomatic.adapters import WebObAdapter
from authomatic.providers import oauth2
CONFIG = {
'dp': {
'class_': oauth2.Dataporten,
'consumer_key': 'client-id-from-dashboard.dataporten.no',
'consumer_secret': 'client-secret-from-dashboard.dataporten.no'
}
}
secret = 'ergeresf' # Used for signing session cookies and salting CSRF tokens
authomatic = Authomatic(config=CONFIG, secret=secret)
def login(request):
response = Response()
result = authomatic.login(WebObAdapter(request, response), 'dp')
if result:
# If there is a result, the login procedure is over and we can write to response.
response.write('<a href="..">Home</a>')
if result.error:
response.write(u'<h2>Login failed: {0}</h2>'.format(result.error.message))
elif result.user:
# OAuth 2.0 provides only limited user data on login,
# We need to update the user to get more info.
if not (result.user.name and result.user.id):
result.user.update()
response.write(u'<h1>Hi {0}</h1>'.format(result.user.name))
response.write(u'<h2>Your id is: {0}</h2>'.format(result.user.id))
response.write(u'<h2>Your email is: {0}</h2>'.format(result.user.email))
return response
def home(request):
return Response('''
Login with <a href="login">Dataporten</a>.<br />
''')
if __name__ == '__main__':
config = Configurator()
config.add_route('home', '/')
config.add_view(home, route_name='home')
config.add_route('login', '/login')
config.add_view(login, route_name='login')
app = config.make_wsgi_app()
server = make_server('127.0.0.1', 8080, app)
server.serve_forever()
``` |
{
"source": "j-helland/grn",
"score": 3
} |
#### File: examples/mnist/download_mnist.py
```python
import argparse
import tensorflow as tf
import torchvision as tv
def download_tf():
tf.keras.datasets.mnist.load_data()
def download_torch():
tv.datasets.MNIST(root='./', download=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='./')
args = parser.parse_args()
download_tf()
download_torch()
```
#### File: examples/mnist/tf_mnist_train.py
```python
import argparse
import grn
@grn.job()
def train_job(epochs: int, batch_size: int) -> 'tf.keras.Model':
import tensorflow as tf
# Configure tensorflow to progressively allocate GPU memory rather than fully
# allocating all available memory.
physical_devices = tf.config.list_physical_devices('GPU')
for gpu_instance in physical_devices:
tf.config.experimental.set_memory_growth(gpu_instance, True)
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train / 255.0
x_test = x_test / 255.0
# Build the model
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10)
])
model.build(input_shape=(None, 28, 28))
model.compile(
optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(
x_train, y_train,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, y_test)) # Bad form to use test as val, but this is just a load-balancing test.
return model
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=1)
parser.add_argument('--batch-size', type=int, default=128)
args = parser.parse_args()
trained_model = train_job(
epochs=args.epochs,
batch_size=args.batch_size)
```
#### File: grn/core/gpu_master.py
```python
import sys
import os
from concurrent import futures
import signal
import logging
import grpc
from grn.utils.utils import find_free_port
from grn.utils.gpu_monitors import GPUMonitor
import grn.grpc_resources.master_pb2 as protos
import grn.grpc_resources.master_pb2_grpc as services
from grn.core.constants import GRN_SERVER_TMP_INFO_PATH, ServiceErrorCode, ResourcePolicy
from grn.core.globals import GPUStates, JobStates
from typing import Optional, Callable
log = logging.getLogger(__file__)
__all__ = ['serve']
def get_next_available_gpu(jobstr: str, resource_policy: ResourcePolicy) -> protos.GPU:
GPUStates.STATES_INIT.wait()
is_new_job_type = (JobStates.JOB_TYPES.get(jobstr) is None)
if len(GPUStates.GPU_QUEUE) == 0:
errorcode = ServiceErrorCode.WAITING_FOR_JOB_PROFILE
else:
with GPUStates.LOCK:
# FIXME: We assume GPU homogeneity -- all GPUs have identical architecture.
# TODO: Implement a segmented heap to group homogenous hardware together.
# Then we can have a resource heap for each bin.
def _find_first_non_profiling_gpu(pop_func: Callable) -> int:
num_gpus = len(GPUStates.GPU_QUEUE)
i = 0
gid = None
while (i < num_gpus) and len(GPUStates.GPU_QUEUE):
gid = pop_func()[0]
if gid not in GPUStates.PROFILING_GROUP:
break
i += 1
else:
return None
return gid
if is_new_job_type:
gid = _find_first_non_profiling_gpu(GPUStates.GPU_QUEUE.poplast)
else:
if resource_policy == ResourcePolicy.SPREAD:
gid = _find_first_non_profiling_gpu(GPUStates.GPU_QUEUE.poplast)
elif resource_policy == ResourcePolicy.PACK:
gid = _find_first_non_profiling_gpu(GPUStates.GPU_QUEUE.popfirst)
else:
raise NotImplementedError(resource_policy)
def _handle_pack_policy(gid: int, mem_request: int) -> Optional[int]:
i = 0
group = [ (gid, GPUStates.STATES[gid]['memoryUsed']) ]
while (i < len(GPUStates.GPU_QUEUE)):
if (GPUStates.STATES[group[-1][0]]['memoryFree'] >= mem_request):
break
group.append( GPUStates.GPU_QUEUE.popfirst() )
else:
GPUStates.GPU_QUEUE.extend(group)
return None
# Reinsert all but last element; last element will be given new resource priority.
GPUStates.GPU_QUEUE.extend(group[:-1])
return group[-1][0]
def _handle_spread_policy(gid: int, mem_request: int) -> Optional[int]:
if GPUStates.STATES[gid]['memoryFree'] < mem_request:
return None
return gid
# If we reached the end of the heap, there must be no devices that aren't locked.
if gid is None:
errorcode = ServiceErrorCode.WAITING_FOR_JOB_PROFILE
else:
mem_free = GPUStates.STATES[gid]['memoryFree']
mem_total = GPUStates.STATES[gid]['memoryTotal']
# NOTE: Single python dict op should be inherently thread-safe.
requested_memory = JobStates.JOB_TYPES.get(jobstr)
log.debug(f'[get_next_available_gpu] (total) {mem_total}, (free) {mem_free}, (request) {requested_memory}')
errorcode = ServiceErrorCode.OK
if requested_memory is not None:
if resource_policy == ResourcePolicy.SPREAD:
gid = _handle_spread_policy(gid, requested_memory)
elif resource_policy == ResourcePolicy.PACK:
gid = _handle_pack_policy(gid, requested_memory)
else:
raise NotImplementedError(resource_policy)
if gid is None:
errorcode = ServiceErrorCode.EXCEEDS_CURRENT_MEMORY
else:
# Reinsert gpu with new resource priority.
# We need to update GPU state right now so that other threads
# will immediately become aware of new resource constraints.
# Also need to update the heap so that other threads don't select
# the GPU we just filled.
GPUStates.STATES[gid]['memoryUsed'] += requested_memory
GPUStates.STATES[gid]['memoryFree'] -= requested_memory
GPUStates.GPU_QUEUE.insert(gid, GPUStates.STATES[gid]['memoryUsed'])
# Check for other instances of this unprofiled job type.
# We shouldn't launch this job until we get a profile.
# NOTE: Single dict op should be inherently thread-safe.
elif JobStates.ACTIVE_JOBS.get(jobstr):
errorcode = ServiceErrorCode.WAITING_FOR_JOB_PROFILE
# Artificially mark this device as fully allocated to avoid running
# other new profile jobs on it.
elif is_new_job_type:
GPUStates.STATES[gid]['memoryFree'] = 0
# Add a device lock that will persist until a profile is sent back.
# This is to prevent this device from being used for any other jobs
# since we don't know how many resources are required yet.
# NOTE: This device lock can only removed in the CompleteJob service.
GPUStates.PROFILING_GROUP[gid] = jobstr
# If anything went wrong, set the GPU ID to something unreachable.
if errorcode != ServiceErrorCode.OK:
gid = -1
return protos.GPU(
gpu_id=gid,
errorcode=errorcode.value)
# TODO: These need to be methods in grn.core.globals
def update_job_state(jobstr: str, state) -> None:
# Accumulate the max resource consumption.
with JobStates.LOCK:
JobStates.JOB_TYPES[jobstr] = max(
JobStates.JOB_TYPES.get(jobstr, state),
state)
def push_active_job(jobstr: str) -> None:
with JobStates.LOCK:
JobStates.ACTIVE_JOBS[jobstr] = JobStates.ACTIVE_JOBS.get(jobstr, 0) + 1
def pop_active_job(jobstr: str) -> None:
with JobStates.LOCK:
JobStates.ACTIVE_JOBS[jobstr] = JobStates.ACTIVE_JOBS.get(jobstr, 1) - 1
class GPUMasterServicer(services.GPUMasterServicer):
"""
"""
def __init__(self, max_num_jobs: Optional[int] = None):
# NOTE: Don't really need to clear these, but doing so provides an explicit
# in-code reminder that GPUMasterServicer uses these globals.
JobStates.JOB_TYPES.clear()
JobStates.ACTIVE_JOBS.clear()
if (max_num_jobs is not None) and (max_num_jobs <= 0):
raise ValueError('max_num_jobs must be a positive integer')
JobStates.MAX_NUM_JOBS = max_num_jobs
def RequestGPU(self, request: protos.JobType, context) -> protos.GPU:
"""Service that client invokes to obtain GPU resources.
If this is a new job type, the GPU ID provisioned is guaranteed
to not have another profiling job running on it.
If this is a profiled job type, then a GPU ID will be doled out
according to the cached resource profile. If the resource request
exceeds available resources, then wait until the request can be
fulfilled.
"""
log.debug(f'[RequestGPU] request\n{request}')
jobstr = request.jobstr
resource_policy = ResourcePolicy(request.resource_policy)
gpu = get_next_available_gpu(jobstr, resource_policy)
errorcode = ServiceErrorCode(gpu.errorcode)
if errorcode == ServiceErrorCode.OK:
push_active_job(jobstr)
log.debug(f'[RequestGPU] serving GPU ID {gpu.gpu_id}')
return gpu
def CompleteJob(self, request: protos.JobProfile, context) -> protos.Empty:
"""Service that a client uses to send a completed job profile.
If this is a new job type, then profile results will be cached
and used to allocate resources to future jobs of this type.
"""
log.debug(f'[CompleteJob] request\n{request}')
jobstr = request.jobtype.jobstr
# NOTE: Single python dict ops should be inherently thread-safe, so this
# should be okay.
is_new_job_type = (JobStates.JOB_TYPES.get(jobstr) is None)
# # TODO: With the new single process client-side monitoring, we should be able to
# # re-enable continuous monitoring.
# if request.succeeded:
# update_job_state(jobstr, state=request.max_gpu_memory_used)
pop_active_job(jobstr)
# When a job receives a profile, we should signal threads that are waiting on
# job profiles to attempt to resolve their resource requests.
if is_new_job_type and request.succeeded:
# Remove the device lock since profiling has finished.
assert request.gpu.gpu_id in GPUStates.PROFILING_GROUP
assert GPUStates.PROFILING_GROUP[request.gpu.gpu_id] == jobstr
del GPUStates.PROFILING_GROUP[request.gpu.gpu_id]
update_job_state(jobstr, state=request.max_gpu_memory_used)
return protos.Empty()
def JobTypeExists(self, request: protos.JobType, context) -> protos.BoolResponse:
"""Service that simply checks if a job type has already been profiled.
"""
return protos.BoolResponse(
response=(JobStates.JOB_TYPES.get(request.jobstr) is not None))
def __server_shutdown_sig_handler(*args) -> None:
log.info('[GPUMaster] Cleaning up...')
if os.path.exists(GRN_SERVER_TMP_INFO_PATH):
os.remove(GRN_SERVER_TMP_INFO_PATH)
sys.exit()
def serve(debug=False, max_workers=10):
# TODO: Implement multiple servers at once without needing separate system environments (e.g. virtual environment, docker container).
if os.path.isfile(GRN_SERVER_TMP_INFO_PATH):
raise SystemError(
f'GPU Master is already running! Shut down current server process before launching a new one.')
signal.signal(signal.SIGINT, __server_shutdown_sig_handler)
signal.signal(signal.SIGTERM, __server_shutdown_sig_handler)
# Obtain a random free port from the OS and cache it to a secret file.
# This file will be removed once the server shuts down.
port = find_free_port()
with open(GRN_SERVER_TMP_INFO_PATH, 'w') as grn_file:
grn_file.write(f'{port}')
with GPUMonitor(delay=0.1):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=max_workers))
services.add_GPUMasterServicer_to_server(GPUMasterServicer(), server)
server.add_insecure_port(f'[::]:{port}')
server.start()
server.wait_for_termination()
# Just in case.
__server_shutdown_sig_handler()
```
#### File: grn/core/job.py
```python
import os
import sys
import signal
import functools
import inspect
import contextlib
import logging
import time
import grpc
from grn.utils.utils import is_server_available, find_gpu_master_address
from grn.utils.gpu_monitors import SingleGPUProcessMonitor
import grn.grpc_resources.master_pb2 as protos
import grn.grpc_resources.master_pb2_grpc as services
from grn.core.constants import ServiceErrorCode, ResourcePolicy
from typing import (
Callable,
Iterable,
Tuple,
Any,
Optional,
)
log = logging.getLogger(__file__)
__all__ = ['job']
def __grpc_failed_job_handler(
*args,
stub: services.GPUMasterStub,
jobtype: protos.JobType
) -> None:
log.warning('[grn] Caught interrupt signal, sending failed job message to server.')
profile = protos.JobProfile(
jobtype=jobtype,
succeeded=False)
stub.CompleteJob(profile, wait_for_ready=True)
sys.exit()
@contextlib.contextmanager
def __grpc_handle_signals(
sigs: Iterable[signal.Signals],
stub: services.GPUMasterStub,
jobtype: protos.JobType,
) -> None:
orig_handlers = [signal.getsignal(s) for s in sigs]
for s in sigs:
signal.signal(s, functools.partial(
__grpc_failed_job_handler,
stub=stub, jobtype=jobtype))
yield
# Restore control to original handlers
for s, handler in zip(sigs, orig_handlers):
signal.signal(s, handler)
def __request_gpu(
stub: services.GPUMasterStub,
jobtype: protos.JobType
) -> int:
response: protos.GPU = stub.RequestGPU(jobtype, wait_for_ready=True)
errorcode = ServiceErrorCode(response.errorcode)
# Dumb retry policy where we just keep checking over and over again
# for available resources. The increasing delay period helps reduce a
# bit of waste, but still isn't great.
# TODO: Would be much better to do some kind of event-driven approach where
# jobs can efficiently wait until resources free up rather than wasting
# cycles.
delay = 0.1
max_delay = 5.0
while ( (errorcode == ServiceErrorCode.EXCEEDS_CURRENT_MEMORY) or
(errorcode == ServiceErrorCode.WAITING_FOR_JOB_PROFILE) ):
time.sleep(min(max_delay, delay))
delay *= 2
response: protos.GPU = stub.RequestGPU(jobtype, wait_for_ready=True)
errorcode = ServiceErrorCode(response.errorcode)
if errorcode == ServiceErrorCode.EXCEEDS_TOTAL_MEMORY:
raise MemoryError(f'{errorcode}: Cannot complete job \n```\n{jobtype}```\n')
return response.gpu_id
def __run_job(
job_func: Callable,
gpu_id: int,
jobtype: protos.JobType
) -> Tuple[Any, protos.JobProfile]:
os.environ['CUDA_VISIBLE_DEVICES'] = f'{gpu_id}'
# Collects job profile in a separate thread.
# Default sampling rate is once per 10 milliseconds.
with SingleGPUProcessMonitor(gpu_id, pid=os.getpid(), delay=0.01) as monitor:
mem_used = None
try:
outputs = job_func()
except RuntimeError as e:
outputs = None
profile = protos.JobProfile(
jobtype=jobtype,
succeeded=False)
else:
mem_used = monitor.max_mem_used
if mem_used == 0:
raise SystemError(
f'No usage metrics could be collected for PID {os.getpid()} for job '
f'\'{jobtype.jobstr}\' on GPU {gpu_id} because no such PID was ever '
f'found by NVML. Does this job actually use the GPU?'
f'\nHINT: If you are running in a docker container, make sure to'
f'use the --pid=host flag to turn off PID namespace isolation. '
f'NVML uses the host PID namespace regardless of the container settings.')
profile = protos.JobProfile(
jobtype=jobtype,
succeeded=True,
gpu=protos.GPU(
gpu_id=gpu_id,
errorcode=0),
max_gpu_memory_used=mem_used)
return outputs, profile
def job(
jobstr: Optional[str] = None,
resource_policy: str = 'spread',
continue_on_server_unavailable: bool = False,
) -> Callable:
if resource_policy in {'spread', 'spreading'}:
__RESOURCE_POLICY = ResourcePolicy.SPREAD.value
elif resource_policy in {'pack', 'packing'}:
__RESOURCE_POLICY = ResourcePolicy.PACK.value
else:
raise ValueError(f'Got resource_policy={resource_policy}, but options are (\'spread\', \'pack\').')
def load_balance(
func: Callable,
) -> Callable:
# Construct the jobstr that specifies the job type.
func_file = inspect.getfile(func)
__JOBSTR = jobstr or (
f'{func_file}::{func.__class__.__name__}' if inspect.isclass(func)
else f'{func_file}::{func.__name__}')
__JOBTYPE = protos.JobType(
jobstr=__JOBSTR,
resource_policy=__RESOURCE_POLICY)
__REGISTRY = {}
# This function is added as an attribute of the decorator at the end.
# This allows it to be invoked as a decorator itself.
def __profile(pfunc: Callable) -> Callable:
if __REGISTRY.get('profiler') is not None:
raise AttributeError(
f'Tried to register profiler {inspect.getfile(pfunc)}::{pfunc.__name__} '
f'for {__JOBSTR}, but one already exists: '
f'{inspect.getfile(__REGISTRY["profiler"])}::{__REGISTRY["profiler"].__name__}')
__REGISTRY['profiler'] = pfunc
return pfunc
@functools.wraps(func)
def decorator(*args, **kwargs):
try:
__GPU_MASTER_ADDR = find_gpu_master_address()
except FileNotFoundError as e:
if not continue_on_server_unavailable:
raise e
__GPU_MASTER_ADDR = 'None'
# If the server is unavailable, then raise exception by default.
# Otherwise, proceed with computation without load balancing.
if not is_server_available(__GPU_MASTER_ADDR):
if not continue_on_server_unavailable:
raise ConnectionError('GPU Master is not available.')
log.warn(
'GPU Master is not running but continue_on_server_unavailable=True, '
'proceeding anyway... This may result in unexpected job failures.')
return func(*args, **kwargs)
with grpc.insecure_channel(
__GPU_MASTER_ADDR,
options=(('grpc.enable_http_proxy', 0),)
) as channel:
stub = services.GPUMasterStub(channel)
# We need to communicate abnormal process termination to the server.
# TODO: Using a gRPC stream should eliminate the need for this signal
# handling.
with __grpc_handle_signals((signal.SIGINT,), stub, __JOBTYPE):
# Run the profile job if this is the first invocation of this job
# relative to the server lifetime.
# If no profiler was registered, then run the full job as the profile
# job and return its outputs.
is_job_profiled = stub.JobTypeExists(__JOBTYPE).response
if not is_job_profiled:
job_func = __REGISTRY.get(
(lambda: __REGISTRY['profiler'](*args, **kwargs)),
(lambda: func(*args, **kwargs)))
gpu_id = __request_gpu(stub, __JOBTYPE)
try:
outputs, profile = __run_job(
job_func=job_func,
gpu_id=gpu_id,
jobtype=__JOBTYPE)
except SystemError as e:
print(e)
__grpc_failed_job_handler(stub=stub, jobtype=__JOBTYPE)
stub.CompleteJob(profile)
# If a profiler was registered, now run the actual job and
# return its outputs. If the profile failed, don't run the full
# job.
# The other case is that this job type has already been profiled,
# which means we should just run it.
if (
is_job_profiled or (
(not is_job_profiled) and
(profile.succeeded) and
(__REGISTRY.get('profiler') is not None))
):
gpu_id = __request_gpu(stub, __JOBTYPE)
try:
outputs, profile = __run_job(
job_func=(lambda: func(*args, **kwargs)),
gpu_id=gpu_id,
jobtype=__JOBTYPE)
except SystemError as e:
print(e)
__grpc_failed_job_handler(stub=stub, jobtype=__JOBTYPE)
stub.CompleteJob(profile)
return outputs
decorator.profile = __profile
return decorator
return load_balance
```
#### File: grn/grpc_resources/master_pb2_grpc.py
```python
import grpc
from . import master_pb2 as master__pb2
class GPUMasterStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.RequestGPU = channel.unary_unary(
'/grn.GPUMaster/RequestGPU',
request_serializer=master__pb2.JobType.SerializeToString,
response_deserializer=master__pb2.GPU.FromString,
)
self.CompleteJob = channel.unary_unary(
'/grn.GPUMaster/CompleteJob',
request_serializer=master__pb2.JobProfile.SerializeToString,
response_deserializer=master__pb2.Empty.FromString,
)
self.JobTypeExists = channel.unary_unary(
'/grn.GPUMaster/JobTypeExists',
request_serializer=master__pb2.JobType.SerializeToString,
response_deserializer=master__pb2.BoolResponse.FromString,
)
class GPUMasterServicer(object):
"""Missing associated documentation comment in .proto file."""
def RequestGPU(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CompleteJob(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def JobTypeExists(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_GPUMasterServicer_to_server(servicer, server):
rpc_method_handlers = {
'RequestGPU': grpc.unary_unary_rpc_method_handler(
servicer.RequestGPU,
request_deserializer=master__pb2.JobType.FromString,
response_serializer=master__pb2.GPU.SerializeToString,
),
'CompleteJob': grpc.unary_unary_rpc_method_handler(
servicer.CompleteJob,
request_deserializer=master__pb2.JobProfile.FromString,
response_serializer=master__pb2.Empty.SerializeToString,
),
'JobTypeExists': grpc.unary_unary_rpc_method_handler(
servicer.JobTypeExists,
request_deserializer=master__pb2.JobType.FromString,
response_serializer=master__pb2.BoolResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'grn.GPUMaster', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class GPUMaster(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def RequestGPU(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/grn.GPUMaster/RequestGPU',
master__pb2.JobType.SerializeToString,
master__pb2.GPU.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CompleteJob(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/grn.GPUMaster/CompleteJob',
master__pb2.JobProfile.SerializeToString,
master__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def JobTypeExists(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/grn.GPUMaster/JobTypeExists',
master__pb2.JobType.SerializeToString,
master__pb2.BoolResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
``` |
{
"source": "j-helland/warp",
"score": 3
} |
#### File: basic/example/A.py
```python
import warp
from warp import Pipe, Source, Parameter, ParameterFile, Product
import warp.utils as utils
source = Source()(
p0='config/A1.yml',
p1='config/A2.yml')
# By using the @static decorator, we can force all contained products to be cached statically.
class A(Pipe):
### parameters
# Multiple config files can be specified to handle parameters with more granularity.
config_file1 = ParameterFile('config/A1.yml')
config_file2 = ParameterFile('config/A2.yml')
# The config files contain `message1` and `message2` entries, which will be automatically loaded.
message1_attr = Parameter('message1', default='MESSAGE1_DEFAULT')
message2_attr = Parameter('message2', default='MESSAGE2_DEFAULT')
### products
# Note: the `static=True` flag can be passed to force WARP to not create a copy for each distinct session.
productA1 = Product('data/A1.txt')
productA2 = Product('data/A2.txt')
# @warp.dependencies(
# source1=source.Main.p0,
# source2=source.Main.p1)
@warp.produces(
productA1=productA1,
productA2=productA2)
def run(self) -> None:
# Imports here will be available for all methods of `Main`.
with utils.GlobalImport(globals()):
import os
# print('Source product: ', self.source1)
print(f'message1 = {self.message1_attr}, message2 = {self.message2_attr}')
# Note that the parameter `self.message` behaves like the value it contains.
# `Product`s behave similarly; the value that will be saved can be assigned directly
# to the `Product` variable.
self.productA1 << 'PRODUCTA1'
# `<<` is the same as `.value =`
self.productA2.value = self.message2_attr
# `@produces` automatically saves products via pickling after `run` completes
```
#### File: basic/example/B.py
```python
import warp
from warp import Pipe, Parameter, Product
from . import A
class Main(Pipe):
message = Parameter('test_B')
### products
# Since this product is never declared via `@produces`, it is ignored by WARP.
product = Product('data/B.txt', static=True)
@warp.dependencies(
productA1=A.A.productA1,
productA2=A.A.productA2)
def run(self):
print('main() in {:s}'.format(__file__))
with open(self.product, 'a') as f:
f.write(self.message)
```
#### File: warp/utils/config_parsing.py
```python
import datetime
from copy import deepcopy
from collections import deque
import yaml
# from .lazy_loader import LazyLoader as LL
# yaml = LL('yaml', globals(), 'yaml')
# json = LL('json', globals(), 'json')
# types
from typing import Dict, Any, Union, Tuple
__all__ = [
'load_config_file',
'save_config']
BASIC_TYPES: Tuple[type, ...] = (
type(None),
bool,
int,
float,
str,
datetime.datetime,
bytes,
complex)
ITERABLE_TYPES: Tuple[type, ...] = (
list,
tuple,
set,
dict)
class HyperParameter:
verbose = False
@classmethod
def set_verbosity(cls, value):
cls.verbose = value
def __init__(self, values=None, spec_type=None, spec=None):
# Default version is to provide a list of actual values
if values and type(values) is not list:
raise TypeError(f'hyperparameter values must be a list not {type(values)}')
if values:
if not isinstance(values[0],dict) and not isinstance(values[0],list):
values = sorted(set(values))
if self.verbose: print('Found literal (unique) hparam values: ',values)
elif len(values)==1 and isinstance(values[0],dict):
raise TypeError(f'known bug/unsupported, hparam len(values)==1 but elm is a dict')
else:
# values = sorted(values)
if self.verbose: print('Found literal hparam values: ',values)
# Can support other value shorthands/generators
if values is None:
# A simple count or range(n) type
if spec_type == 'int':
values = [i for i in range(spec)]
else:
raise TypeError(f'no generator for hyperparameter spec.type: {spec_type}')
# Could add another range type with low, high, stepsize... etc
if self.verbose: print('Found constructable hparam values: ',values)
self.values = values
def set_value(dictionary, keychain, value):
if len(keychain) == 1:
dictionary[keychain[0]] = value
return
set_value(dictionary[keychain[0]],keychain[1:],value)
return dictionary
class BFTreeExpander:
roots = {}
# hparam_keys = set()
# hparam_keychains = set()
hparam_keychains = {}
@classmethod
def reset_roots(cls):
cls.roots = {}
@classmethod
def get_roots(cls):
return [v.root for k,v in cls.roots.items()]
@classmethod
def reset_keys(cls):
# cls.hparam_keys = set()
# cls.hparam_keychains = set()
cls.hparam_keychains = {}
# @classmethod
# def get_hparam_key_list(cls):
# return list(cls.hparam_keys)
@classmethod
def get_hparam_keychains(cls):
return list(cls.hparam_keychains.keys())
# return cls.hparam_keychains
def __init__(self, root):
self.root = root
self.queue = deque()
self.id = id(self)
self.roots[self.id] = self
# recursive traverser
def expand(self, node = None, keychain = []):
if node is None: node = self.root
if isinstance(node, HyperParameter):
# self.hparam_keys.add(keychain[-1])
# self.hparam_keychains.add(".".join(keychain[1:])) # drop root key
self.hparam_keychains[".".join(keychain[1:])] = None
if len(node.values) == 1:
set_value(self.root,keychain,node.values[0])
return False
else:
for val in node.values:
new_root = set_value(deepcopy(self.root),keychain,val)
new_tree = BFTreeExpander(new_root)
return True # "expansion was performed"
if isinstance(node, dict):
for key,val in node.items():
if val is not None:
new_keychain = keychain.copy()
new_keychain.append(key)
self.queue.append((val, new_keychain))
while len(self.queue) > 0:
next_node, next_keychain = self.queue.popleft()
expanded = self.expand(next_node, next_keychain)
if expanded:
# since we had to expand this tree further,
# we can now remove it from the working set
# pop w/ default None, instead of del, as this can get called repeatedly on way up
self.roots.pop(self.id, None)
return True # bubble up
return False # no expansion performed
def expand_config(orig_config):
old_roots = [{'root': orig_config}]
while True:
old_ct = len(old_roots)
new_roots = []
for input_root in old_roots:
BFTreeExpander.reset_roots()
bfte = BFTreeExpander(input_root)
bfte.expand()
new_roots.extend(bfte.get_roots())
if old_ct == len(new_roots):
break
old_roots = new_roots.copy()
roots, keychains = [tree['root'] for tree in new_roots], BFTreeExpander.get_hparam_keychains()
BFTreeExpander.reset_roots()
BFTreeExpander.reset_keys()
return roots, keychains
############ PyYAML Custom obj constructors/representers ###############
def hparam_constructor(loader, node):
fields = loader.construct_mapping(node, deep=True)
hparam = HyperParameter(**fields)
yield hparam
def tuple_to_list_constructor(loader, node):
return list(loader.construct_sequence(node, deep=True))
def hparam_representer(dumper, node):
return dumper.represent_mapping(u'!HYPERPARAMETER', [("values",node.values)], flow_style=False )
# def load_config_file(path: str) -> Dict[str, Any]:
def load_config_file(path: str) -> Tuple[list, list]:
"""Load a YAML file into a dict.
Extensions accepted are `{.yml, .yaml}`.
Arguments:
path: The relative path to the YAML file to load.
Returns:
A dict version of the YAML file.
"""
yaml.add_constructor('!HYPERPARAMETER', hparam_constructor, yaml.FullLoader)
yaml.add_representer(HyperParameter, hparam_representer)
# HyperParameter.set_verbosity(args.verbose)
file_ext = path.split('.')[-1]
if file_ext in {'yml', 'yaml'}:
with open(path, 'rb') as file:
config = yaml.load(file, Loader=yaml.FullLoader)
else:
raise NotImplementedError('unrecognized file extension .{:s} for file {:s}'.format(file_ext, path))
# expanded_set, keychains = expand_config(config)
return expand_config(config)
# return config
def typecheck_config(config: Dict[str, Any]) -> None:
invalid_types = set()
def recursive_typecheck(struct: Union[Dict[str, Any], Any]) -> bool:
# Recurse through iterables
if isinstance(struct, ITERABLE_TYPES):
if isinstance(struct, dict):
return all(map(recursive_typecheck, struct.values()))
return all(map(recursive_typecheck, struct))
# Check against allowed types. Aggregate any found violations.
else:
if not isinstance(struct, BASIC_TYPES):
invalid_types.add(type(struct))
return False
return True
if not recursive_typecheck(config):
raise TypeError(f'config {config} contains invalid type(s) {invalid_types}')
def save_config(path: str, config: Dict[str, Any]) -> None:
try:
typecheck_config(config)
except TypeError as e:
raise RuntimeError( [e, RuntimeError('Cannot cache runtime parameter values due to invalid type(s).')] )
# cache
with open(path, 'w') as file:
yaml.dump(config, file, default_flow_style=False)
```
#### File: warp/visualization/gui.py
```python
import datetime
from warp.utils import GlobalImport
# from warp.graph import Ancestry
import warp.globals
import warp.constants as constants
__all__ = ['create_server']
def add_layout(app, workspace):
# sessions = sorted(workspace.sessions, key=lambda k: float(k))
sessions = workspace.sessions
session_timestamps = workspace.session_timestamps
sessions = sorted(enumerate(sessions), key=lambda k: session_timestamps[k[0]])
sessions = list(zip(*sessions))[1] # unzip and take dir names
dropdown_options = [{
'label': o, #datetime.fromtimestamp(float(o)),
'value': o}
for o in sessions] + [{'label': 'new session...', 'value': 'new'}]
app.layout = html.Div([
html.P(id='null'),
html.Div(
children=[
html.H1('Pipeline'),
html.P([
'Select a session: ',
dcc.Dropdown(
id='session-id-selector',
# value=f'Session: {workspace.home.session_id}',
options=dropdown_options,
placeholder=f'Currently loaded: {workspace.home.session_id}', #datetime.fromtimestamp(float(workspace.home.session_id))}',
),
]),
],
className='row',
style=dict(textAlign='center')),
html.Div(
className='row',
children=[
html.Div(
className='row',
style=dict(border='2px black solid'),
children=[
visdcc.Network(
id='pipeline-graph',
options=dict(
height='600px',
width='100%',
interaction=dict(hover='true'),
# layout=dict(hierarchical=dict(
# # enabled='true',
# sortMethod='directed')),
physics={
# enabled='true',
'solver': 'forceAtlas2Based',
'forceAtlas2Based.springLength': 200}
),
),
],
),
html.Div(
id='actions',
className='row',
style=dict(textAlign='center'),
children=[
html.H3('Actions'),
html.Button('Status', id='node-status-button', style={'margin-left': '15px'}),
html.Button('Backfill', id='node-backfill-button', style={'margin-left': '15px'}),
html.Button('Build', id='node-build-button', style={'margin-left': '15px'}),
html.P(),
html.Button('Reset Workspace', id='workspace-reset-button', style={'margin-left': '15px'}),
'\tWARNING: this will delete all non-static saved products in the current session.'
]
),
html.Div(
className='row',
children=[
html.H3('Source Code'),
dcc.Markdown(id='source-code-markdown'),
]
),
])
])
def create_server(graph, workspace, verbose=False):
with GlobalImport(globals()):
from datetime import datetime
from textwrap import dedent as d
import json
import time
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import visdcc
external_stylesheets = ['https://cdnjs.cloudflare.com/ajax/libs/vis/4.20.1/vis.min.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.title = 'Pipeline'
add_layout(app, workspace)
# TODO: I know it's bad to have an everything function, but visdcc seems to be breaking in unexpected ways otherwise
n_clicks_prev = dict(
status=0,
build=0,
backfill=0,
workspace=0)
@app.callback(
Output('pipeline-graph', 'data'),
Output('source-code-markdown', 'children'),
Output('session-id-selector', 'options'),
Output('session-id-selector', 'placeholder'),
[Input('pipeline-graph', 'selection'),
Input('node-status-button', 'n_clicks'),
Input('node-build-button', 'n_clicks'),
Input('node-backfill-button', 'n_clicks'),
Input('workspace-reset-button', 'n_clicks'),
Input('session-id-selector', 'value')])
def display_pipe_status(
selection,
n_clicks_status,
n_clicks_build,
n_clicks_backfill,
n_clicks_workspace,
session_id,
) -> None:
# sessions = sorted(workspace.sessions, key=lambda k: float(k))
sessions = workspace.sessions
session_placeholder = f'Currently loaded: {workspace.home.session_id}' #datetime.fromtimestamp(float(workspace.home.session_id))}'
session_dropdown_options = [{
'label': o, #datetime.fromtimestamp(float(o)),
'value': o}
for o in sessions] + [{'label': 'new session...', 'value': 'new'}]
if (session_id is not None) and (session_id != workspace.home.session_id):
if session_id == 'new':
session_id = str(time.time())
workspace.create_session(session_id)
else:
workspace.load_session(session_id)
session_placeholder = f'Currently loaded: {workspace.home.session_id}' #datetime.fromtimestamp(float(session_id))}'
state_change_clicked = False
source_str = ''
lineage = set()
if selection is not None:
source_str = """### {:s}\n```python\n{:s}\n```"""
for n in selection['nodes']:
source_str = source_str.format(n, workspace.view_pipe(n, return_str=True))
# lineage = set(Ancestry.lineage(graph, node_name=n))
lineage = set(workspace.PG.get_lineage(nj))
if (n_clicks_status is not None) and (n_clicks_status > n_clicks_prev['status']):
workspace.status(n)
n_clicks_prev['status'] = n_clicks_status
if (n_clicks_build is not None) and (n_clicks_build > n_clicks_prev['build']):
workspace.build(n)
n_clicks_prev['build'] = n_clicks_build
state_change_clicked = True
if (n_clicks_backfill is not None) and (n_clicks_backfill > n_clicks_prev['backfill']):
workspace.backfill(n)
n_clicks_prev['backfill'] = n_clicks_backfill
state_change_clicked = True
if (n_clicks_workspace is not None) and (n_clicks_workspace > n_clicks_prev['workspace']):
session_id = workspace.home.session_id
workspace.clear_cache(session_id=session_id)
n_clicks_prev['workspace'] = n_clicks_workspace
state_change_clicked = True
nodes = map(
lambda n: dict(
id=n,
label=n.split('.')[-1],
title=n,
color={
'background':
'ForestGreen' if (
(workspace._is_pipe_built(n) and
n not in workspace._gap_pipes(n)) # inefficient
or workspace.PG.is_source_pipe(n)
) else 'FireBrick',
'border' : 'Orange' if n in lineage else 'Black',
'highlight' : {
'background':
'LimeGreen' if (
(workspace._is_pipe_built(n) and
n not in workspace._gap_pipes(n)) # inefficient
or workspace.PG.is_source_pipe(n)
) else 'LightCoral',
},
},
font=dict(color='Gainsboro'),
shape='box',
borderWidth=2),
graph.nodes)
edges = map(
lambda e: {
'id' : str(e),
'from' : e[0],
'to' : e[1],
'arrows': 'to',
'label' : e[2].split('/')[-1],
'title' : e[2].format(warp.globals.product_dir()),
'color' : {
'color': 'Black'}},
graph.edges(data='label'))
data = dict(
nodes=list(nodes),
edges=list(edges))
return data, source_str, session_dropdown_options, session_placeholder
app.run_server(
host=constants.WARP_HOST_NAME,
port=constants.WARP_PORT,
use_reloader=False)
``` |
{
"source": "j-helman/OpenAssetIO",
"score": 2
} |
#### File: openassetio/hostAPI/transactions.py
```python
from .._core.audit import auditApiCall
from .._core.debug import debugApiCall, Debuggable
__all__ = ['TransactionCoordinator', 'ScopedActionGroup']
class TransactionCoordinator(Debuggable):
"""
The TransactionCoordinator simplifies Host implementation by
providing a stack-like interface around the simple start/stop/cancel
transaction API exposed by the ManagerInterface.
"""
def __init__(self, manager):
self.__manager = manager
self._debugLogFn = manager._debugLogFn
def manager(self):
"""
@returns the Manager for which transactions are being managed.
"""
return self.__manager
## @name Action Group Management
## @ref action_group Management.
## Manages an action group stack within the Context, which in turn takes care
## of correctly calling the ManagerInterface's transactional API.
## @{
@debugApiCall
@auditApiCall("Transactions")
def scopedActionGroup(self, context):
"""
@return A python context manager that pushes an action group on
creation, and pops it when the scope is exit. Use with a 'with'
statement, to simplify implementing action groups in a host. for
example:
@code
with transactionCoordinator.scopedActionGroup(context):
for t in textures:
publish(t)
@endcode
"""
return ScopedActionGroup(self, context)
@debugApiCall
@auditApiCall("Transactions")
def pushActionGroup(self, context):
"""
Push an ActionGroup onto the supplied Context. This will
increase the depth by 1, and a @ref transaction started if
necessary.
@return int The new depth of the Action Group stack
"""
if context.actionGroupDepth == 0:
self.__manager._startTransaction(context.managerInterfaceState)
context.actionGroupDepth += 1
return context.actionGroupDepth
@debugApiCall
@auditApiCall("Transactions")
def popActionGroup(self, context):
"""
Pops an ActionGroup from the supplied Context. This will
decrease the depth by 1 and the current @ref transaction will be
finished if necessary.
@return int The new depth of the Action Group stack
@exception RuntimeError If pop is called before push (ie: the
stack depth is 0)
"""
if context.actionGroupDepth == 0:
raise RuntimeError("Action group popped with none on the stack")
context.actionGroupDepth -= 1
if context.actionGroupDepth == 0:
self.__manager._finishTransaction(context.managerInterfaceState)
return context.actionGroupDepth
@debugApiCall
@auditApiCall("Transactions")
def cancelActions(self, context):
"""
Clears the current ActionGroup stack (if one has been started),
cancelling the @ref transaction if one has been started.
@return bool True if the current cancelled successfully and any
actions performed since it began have been undone, or if there
was nothing to cancel. Otherwise False - which indicates the
Manager may not have been able to undo-unwind any actions that
occurred since the first ActionGroup was pushed onto the stack.
"""
status = True
if context.actionGroupDepth == 0:
return status
status = self.__manager._cancelTransaction(context.managerInterfaceState)
context.actionGroupDepth = 0
return status
def actionGroupDepth(self, context):
"""
@return int The current ActionGroup depth in the context.
"""
return context.actionGroupDepth
## @}
## @name State Distribution
## @ref stable_resolution_manager_state_distribution Management.
## In order to correlate a series of distributed tasks, the Manager's state
## held in a Context can be serialized, shared with other processes and
## restored. A common use of this in in distributed rendering scenarios, where
## it is desired to provide stable asset resolution over time.
## By distributing the Managers state token to each of job, the Manager can
## snapshot resolution at the time the originating Context was first created.
## @{
@auditApiCall("Transactions")
def freezeManagerState(self, context):
"""
Returns a serialized representation of the @ref manager_state
held in the supplied Context, so that it can be distributed to
other processes/etc...
@warning From this point, the context should not be used further
without first thawing the state back into the context.
@return str an ASCII compatible string
@see thawManagerState
"""
## @todo Ensure that other actions error after this point
## @todo Should this clear out the state/dept from the Context?
token = self.__manager._freezeState(context.managerInterfaceState)
return "%i_%s" % (context.actionGroupDepth, token)
@auditApiCall("Transactions")
def thawManagerState(self, token, context):
"""
Restores the @ref manager_state in the supplied Context so that
it represents the context as previously frozen.
@param token str The string returned by @ref freezeManagerState
@param context Context The context to restore the state into.
@note It is perfectly legal to thaw the same context multiple
times in parallel, as long as the ActionGroup depth is not
changed - ie: push/pop/cancelActionGroup should not be called.
This is because it quickly creates an incoherent state for the
Manager. The Host *must* guarantee that a given state has only
been thawed to a single active Context before such actions are
performed.
@warning This call only handles the opaque @ref manager_state
object, it does *not* restore other properties of the Context
(ie: access/retention, etc...)
"""
## @todo Sanitize input
depth, managerToken = token.split('_', 1)
context.actionGroupDepth = int(depth)
state = self.__manager._thawState(managerToken)
context.managerInterfaceState = state
## @}
class ScopedActionGroup(object):
"""
A convenience class to push/pop an action group based on the
lifetime of the object, useful when combined with a 'with'
statement.
"""
def __init__(self, transactionCoordinator, context, cancelOnException=True):
super(ScopedActionGroup, self).__init__()
self.cancelOnException = cancelOnException
self.__transactionCoordinator = transactionCoordinator
self.__context = context
def __enter__(self):
self.__transactionCoordinator.pushActionGroup(self.__context)
def __exit__(self, exceptionType, exceptionValue, traceback):
if exceptionType is not None and self.cancelOnException:
self.__transactionCoordinator.cancelActions(self.__context)
else:
self.__transactionCoordinator.popActionGroup(self.__context)
```
#### File: openassetio/pluginSystem/ManagerPlugin.py
```python
from .PluginSystemPlugin import PluginSystemPlugin
__all__ = ['ManagerPlugin']
class ManagerPlugin(PluginSystemPlugin):
"""
This class represents the various derived classes that make up the
binding to a @ref asset_management_system.
It used by the dynamic plug-in discovery mechanism (@ref
openassetio.pluginSystem.PluginSystem) to instantiate the main
classes in an implementation.
The class will never be instantiated itself, so all functionality is
via class methods.
In order to register a new asset management system, simply place a
python package on the appropriate search path, that has a top-level
attribute called 'plugin', that holds a class derived from this.
@warning This class, may be used in a batch, or UI session, so
consequently, it is imperative that no ui libraries (QtCore, QtGui
etc...) are imported unless @ref uiDelegate() is called, and
ideally, even then, this should be deferred until something is
requested from the @needsref
openassetio-ui.implementation.ManagerUIDelegate.
"""
@classmethod
def identifier(cls):
"""
Returns an identifier to uniquely identify the plug-in.
Generally, this should be the identifier used by the manager.
The identifier should use only alpha-numeric characters and '.',
'_' or '-'. For example:
"uk.co.foundry.asset.testManager"
@return str
@see openassetio.managerAPI.ManagerInterface
"""
raise NotImplementedError
@classmethod
def interface(cls):
"""
Constructs an instance of the @ref
openassetio.managerAPI.ManagerInterface.
This is an instance of some class derived from ManagerInterface
to be bound to the Host-facing @ref openassetio.hostAPI.Manager.
Generally this is only directly called by the @ref
openassetio.pluginSystem.PluginSystemManagerFactory. It may be
called multiple times in a session, but there as the
ManagerInterface API itself is specified as being stateless
(aside from any internal caching/etc...) then there is no
pre-requisite to always return a new instance.
@return ManagerInterface instance
"""
raise NotImplementedError
@classmethod
def uiDelegate(cls, interfaceInstance):
"""
Constructs an instance of the @needsref
openassetio-ui.implementation.ManagerUIDelegate
This is an instance of some class derived from ManagerUIDelegate
that is used by the @needsref UISessionManager to provide widgets to
a host that may bind into panels in the application, or to allow
the application to delegate asset browsing/picking etc...
@param interfaceInstance An instance of the plugins interface as
returned by @ref interface(), this is to allow UI to be
configured in relation to a specific instantiation, which may
perhaps target a different endpoint due to its settings, etc...
@note It is safe to import any UI toolkits, etc... *within in
this call*, but generally you may want to deffer this to methods
in the delegate.
@return An instance of some class derived from @needsref
ManagerUIDelegate.
"""
return None
```
#### File: openassetio/hostAPI/test_transactions.py
```python
import pytest
from unittest import mock
from openassetio import Context
from openassetio.hostAPI import transactions as t, Manager
from openassetio.managerAPI import HostSession, ManagerInterface
@pytest.fixture
def mock_host_session():
return mock.create_autospec(HostSession)
@pytest.fixture
def mock_manager(mock_host_session):
mock_interface = mock.create_autospec(ManagerInterface)
return Manager(mock_interface, mock_host_session)
@pytest.fixture
def transaction_coordinator(mock_manager):
return t.TransactionCoordinator(mock_manager)
@pytest.fixture
def a_context():
context = Context()
context.managerInterfaceState = "some-manager-state"
return context
@pytest.fixture
def a_scoped_group(a_context):
mocked_coordinator = mock.create_autospec(t.TransactionCoordinator)
return t.ScopedActionGroup(mocked_coordinator, a_context)
class TestTransactionCoordinator:
def test_construction(self, mock_manager):
coordinator = t.TransactionCoordinator(mock_manager)
assert coordinator.manager() is mock_manager
def test_scopedActionGroup(self, transaction_coordinator, a_context):
scoped_group = transaction_coordinator.scopedActionGroup(a_context)
assert isinstance(scoped_group, t.ScopedActionGroup)
assert scoped_group.cancelOnException is True
def test_pushActionGroup(self, transaction_coordinator, mock_host_session, a_context):
mock_manager = transaction_coordinator.manager()._interface()
state = a_context.managerInterfaceState
assert a_context.actionGroupDepth == 0
self.__assertTransactionCalls(
mock_manager, mock_host_session, start=None, finish=None, cancel=None)
transaction_coordinator.pushActionGroup(a_context)
assert a_context.actionGroupDepth == 1
self.__assertTransactionCalls(
mock_manager, mock_host_session, start=state, finish=None, cancel=None)
transaction_coordinator.pushActionGroup(a_context)
assert a_context.actionGroupDepth == 2
self.__assertTransactionCalls(
mock_manager, mock_host_session, start=None, finish=None, cancel=None)
def test_popActionGroup(self, transaction_coordinator, mock_host_session, a_context):
mock_manager = transaction_coordinator.manager()._interface()
state = a_context.managerInterfaceState
a_context.actionGroupDepth = 2
transaction_coordinator.popActionGroup(a_context)
assert a_context.actionGroupDepth == 1
self.__assertTransactionCalls(
mock_manager, mock_host_session, start=None, finish=None, cancel=None)
transaction_coordinator.popActionGroup(a_context)
assert a_context.actionGroupDepth == 0
self.__assertTransactionCalls(
mock_manager, mock_host_session, start=None, finish=state, cancel=None)
with pytest.raises(RuntimeError):
transaction_coordinator.popActionGroup(a_context)
self.__assertTransactionCalls(
mock_manager, mock_host_session, start=None, finish=None, cancel=None)
def test_cancelActions(self, transaction_coordinator, mock_host_session, a_context):
mock_manager = transaction_coordinator.manager()._interface()
state = a_context.managerInterfaceState
# Check return values and depth management
mock_manager.cancelTransaction.return_value = True
a_context.actionGroupDepth = 2
assert transaction_coordinator.cancelActions(a_context) is True
self.__assertTransactionCalls(
mock_manager, mock_host_session, start=None, finish=None, cancel=state)
assert a_context.actionGroupDepth == 0
mock_manager.cancelTransaction.return_value = False
a_context.actionGroupDepth = 2
assert transaction_coordinator.cancelActions(a_context) is False
self.__assertTransactionCalls(
mock_manager, mock_host_session, start=None, finish=None, cancel=state)
assert a_context.actionGroupDepth == 0
# Check depth == 0 is an early-out
mock_manager.cancelTransaction.return_value = False
a_context.actionGroupDepth = 0
assert transaction_coordinator.cancelActions(a_context) is True
self.__assertTransactionCalls(
mock_manager, mock_host_session, start=None, finish=None, cancel=None)
assert a_context.actionGroupDepth == 0
def test_actionGroupDepth(self, transaction_coordinator, a_context):
assert a_context.actionGroupDepth == 0
assert transaction_coordinator.actionGroupDepth(a_context) == 0
transaction_coordinator.pushActionGroup(a_context)
assert transaction_coordinator.actionGroupDepth(a_context) == 1
a_context.actionGroupDepth = 77
assert transaction_coordinator.actionGroupDepth(a_context) == 77
def test_managerInterfaceState_freeze_thaw(
self, transaction_coordinator, mock_host_session, a_context):
mock_manager = transaction_coordinator.manager()._interface()
state = a_context.managerInterfaceState
mock_frozen_state = f"frozen-{state}"
mock_manager.freezeState.return_value = mock_frozen_state
mock_manager.thawState.return_value = state
action_group_depth = 4
a_context.actionGroupDepth = action_group_depth
# Freeze
token = transaction_coordinator.freezeManagerState(a_context)
mock_manager.freezeState.assert_called_once_with(state, mock_host_session)
assert isinstance(token, str)
assert token != ""
# Clear context (this isn't done by freeze)
a_context.managerInterfaceState = None
a_context.actionGroupDepth = 0
mock_manager.thawState.assert_not_called()
# Thaw
transaction_coordinator.thawManagerState(token, a_context)
mock_manager.thawState.assert_called_once_with(mock_frozen_state, mock_host_session)
assert a_context.managerInterfaceState == state
assert a_context.actionGroupDepth == action_group_depth
@staticmethod
def __assertTransactionCalls(mock_manager, mock_host_session, start, finish, cancel):
for method, arg in (
(mock_manager.startTransaction, start),
(mock_manager.finishTransaction, finish),
(mock_manager.cancelTransaction, cancel)
):
if arg is None:
method.assert_not_called()
else:
method.assert_called_once_with(arg, mock_host_session)
mock_manager.reset_mock()
class TestScopedActionGroup:
def test_scope(self, a_scoped_group):
mock_coordinator = a_scoped_group._ScopedActionGroup__transactionCoordinator
a_context = a_scoped_group._ScopedActionGroup__context
a_scoped_group.cancelOnException = True
with a_scoped_group:
self.__assertActionGroupCalls(mock_coordinator, push=a_context, pop=None, cancel=None)
self.__assertActionGroupCalls(mock_coordinator, push=None, pop=a_context, cancel=None)
with pytest.raises(RuntimeError):
with a_scoped_group:
self.__assertActionGroupCalls(
mock_coordinator, push=a_context, pop=None, cancel=None)
raise RuntimeError
self.__assertActionGroupCalls(mock_coordinator, push=None, pop=None, cancel=a_context)
def test_scope_does_not_cancel(self, a_scoped_group):
mock_coordinator = a_scoped_group._ScopedActionGroup__transactionCoordinator
a_context = a_scoped_group._ScopedActionGroup__context
a_scoped_group.cancelOnException = False
with pytest.raises(RuntimeError):
with a_scoped_group:
self.__assertActionGroupCalls(
mock_coordinator, push=a_context, pop=None, cancel=None)
raise RuntimeError
self.__assertActionGroupCalls(mock_coordinator, push=None, pop=a_context, cancel=None)
@staticmethod
def __assertActionGroupCalls(mock_coordinator, push, pop, cancel):
for method, arg in (
(mock_coordinator.pushActionGroup, push),
(mock_coordinator.popActionGroup, pop),
(mock_coordinator.cancelActions, cancel)
):
if arg is None:
method.assert_not_called()
else:
method.assert_called_once_with(arg)
mock_coordinator.reset_mock()
``` |
{
"source": "Jhelum-Ch/sequential_social_dilemma_games",
"score": 2
} |
#### File: sequential_social_dilemma_games/run_scripts/train_baseline.py
```python
import ray
from ray import tune
from ray.rllib.agents.registry import get_agent_class
from ray.rllib.agents.ppo.ppo_policy_graph import PPOPolicyGraph
from ray.rllib.models import ModelCatalog
from ray.tune import run_experiments
from ray.tune.registry import register_env
import tensorflow as tf
from social_dilemmas.envs.harvest import HarvestEnv
from social_dilemmas.envs.cleanup import CleanupEnv
from models.conv_to_fc_net import ConvToFCNet
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'exp_name', None,
'Name of the ray_results experiment directory where results are stored.')
tf.app.flags.DEFINE_string(
'env', 'cleanup',
'Name of the environment to rollout. Can be cleanup or harvest.')
tf.app.flags.DEFINE_string(
'algorithm', 'A3C',
'Name of the rllib algorithm to use.')
tf.app.flags.DEFINE_integer(
'num_agents', 5,
'Number of agent policies')
tf.app.flags.DEFINE_integer(
'train_batch_size', 30000,
'Size of the total dataset over which one epoch is computed.')
tf.app.flags.DEFINE_integer(
'checkpoint_frequency', 20,
'Number of steps before a checkpoint is saved.')
tf.app.flags.DEFINE_integer(
'training_iterations', 10000,
'Total number of steps to train for')
tf.app.flags.DEFINE_integer(
'num_cpus', 2,
'Number of available CPUs')
tf.app.flags.DEFINE_integer(
'num_gpus', 1,
'Number of available GPUs')
tf.app.flags.DEFINE_boolean(
'use_gpus_for_workers', False,
'Set to true to run workers on GPUs rather than CPUs')
tf.app.flags.DEFINE_boolean(
'use_gpu_for_driver', False,
'Set to true to run driver on GPU rather than CPU.')
tf.app.flags.DEFINE_float(
'num_workers_per_device', 2,
'Number of workers to place on a single device (CPU or GPU)')
harvest_default_params = {
'lr_init': 0.00136,
'lr_final': 0.000028,
'entropy_coeff': -.000687}
cleanup_default_params = {
'lr_init': 0.00126,
'lr_final': 0.000012,
'entropy_coeff': -.00176}
def setup(env, hparams, algorithm, train_batch_size, num_cpus, num_gpus,
num_agents, use_gpus_for_workers=False, use_gpu_for_driver=False,
num_workers_per_device=1):
if env == 'harvest':
def env_creator(_):
return HarvestEnv(num_agents=num_agents)
single_env = HarvestEnv()
else:
def env_creator(_):
return CleanupEnv(num_agents=num_agents)
single_env = CleanupEnv()
env_name = env + "_env"
register_env(env_name, env_creator)
obs_space = single_env.observation_space
act_space = single_env.action_space
# Each policy can have a different configuration (including custom model)
def gen_policy():
return (PPOPolicyGraph, obs_space, act_space, {})
# Setup PPO with an ensemble of `num_policies` different policy graphs
policy_graphs = {}
for i in range(num_agents):
policy_graphs['agent-' + str(i)] = gen_policy()
def policy_mapping_fn(agent_id):
return agent_id
# register the custom model
model_name = "conv_to_fc_net"
ModelCatalog.register_custom_model(model_name, ConvToFCNet)
agent_cls = get_agent_class(algorithm)
config = agent_cls._default_config.copy()
# information for replay
config['env_config']['func_create'] = tune.function(env_creator)
config['env_config']['env_name'] = env_name
config['env_config']['run'] = algorithm
# Calculate device configurations
gpus_for_driver = int(use_gpu_for_driver)
cpus_for_driver = 1 - gpus_for_driver
if use_gpus_for_workers:
spare_gpus = (num_gpus - gpus_for_driver)
num_workers = int(spare_gpus * num_workers_per_device)
num_gpus_per_worker = spare_gpus / num_workers
num_cpus_per_worker = 0
else:
spare_cpus = (num_cpus - cpus_for_driver)
num_workers = int(spare_cpus * num_workers_per_device)
num_gpus_per_worker = 0
num_cpus_per_worker = spare_cpus / num_workers
# hyperparams
config.update({
"train_batch_size": train_batch_size,
"horizon": 1000,
"lr_schedule":
[[0, hparams['lr_init']],
[20000000, hparams['lr_final']]],
"num_workers": num_workers,
"num_gpus": gpus_for_driver, # The number of GPUs for the driver
"num_cpus_for_driver": cpus_for_driver,
"num_gpus_per_worker": num_gpus_per_worker, # Can be a fraction
"num_cpus_per_worker": num_cpus_per_worker, # Can be a fraction
"entropy_coeff": hparams['entropy_coeff'],
"multiagent": {
"policy_graphs": policy_graphs,
"policy_mapping_fn": tune.function(policy_mapping_fn),
},
"model": {"custom_model": "conv_to_fc_net", "use_lstm": True,
"lstm_cell_size": 128}
})
return algorithm, env_name, config
def main(unused_argv):
ray.init(num_cpus=FLAGS.num_cpus, redirect_output=True)
if FLAGS.env == 'harvest':
hparams = harvest_default_params
else:
hparams = cleanup_default_params
alg_run, env_name, config = setup(FLAGS.env, hparams, FLAGS.algorithm,
FLAGS.train_batch_size,
FLAGS.num_cpus,
FLAGS.num_gpus, FLAGS.num_agents,
FLAGS.use_gpus_for_workers,
FLAGS.use_gpu_for_driver,
FLAGS.num_workers_per_device)
if FLAGS.exp_name is None:
exp_name = FLAGS.env + '_' + FLAGS.algorithm
else:
exp_name = FLAGS.exp_name
print('Commencing experiment', exp_name)
run_experiments({
exp_name: {
"run": alg_run,
"env": env_name,
"stop": {
"training_iteration": FLAGS.training_iterations
},
'checkpoint_freq': FLAGS.checkpoint_frequency,
"config": config,
}
})
if __name__ == '__main__':
tf.app.run(main)
``` |
{
"source": "jhembe/python-learning",
"score": 4
} |
#### File: python-learning/test/challenge1.py
```python
names_array = []
user_input = ""
def add_people():
num_people = int(input("Enter the number of people you want to add : "))
for person in range(num_people):
new_name = str(input(" > "))
names_array.append(new_name)
print(f'The people in the list are : {names_array}')
def del_people():
num_people = input("Enter the name of the user you wish to delete : ")
names_array.remove(num_people)
print(f'The people in the list are : {names_array}')
while user_input.lower() != "quit":
user_input = input("> ")
if user_input.lower() == "add":
add_people()
elif user_input.lower() == "del":
del_people()
elif user_input.lower() == "help":
print('''
add - To add users
del - TO delete users
quit - TO exit the program
''')
elif user_input.lower() == "quit":
exit()
else :
print("I dont understand yout input")
```
#### File: jhembe/python-learning/test.py
```python
import utils
numbers = [3,5,1,2,54,2,56,7,13,56,78,9,0]
maximum_number = utils.find_max(numbers)
print(maximum_number)
``` |
{
"source": "jhemedin/ACT",
"score": 3
} |
#### File: act/qc/clean.py
```python
import xarray as xr
import re
import numpy as np
import copy
@xr.register_dataset_accessor('clean')
class CleanDataset(object):
"""
Class for cleaning up QC variables to standard cf-compliance
"""
def __init__(self, xarray_obj):
self._obj = xarray_obj
@property
def matched_qc_variables(self, check_arm_syntax=True):
"""
Find variables that are QC variables and return list of names.
Parameters
----------
check_arm_syntax : boolean
ARM ueses a standard of starting all quality control variables
with "qc" joined with an underscore. This is a more robust method
of getting the quality control variables before the standard_name
attribute is added. If this is true will first check using
attributes and will then check if variable starts with "qc".
Returns
-------
variables : list of str
A list of strings containing the name of each variable.
"""
variables = []
# Will need to find all historical cases and add to list
qc_dict = {'description':
["See global attributes for individual.+bit descriptions.",
("This field contains bit packed integer values, where each "
"bit represents a QC test on the data. Non-zero bits indicate "
"the QC condition given in the description for those bits; "
"a value of 0.+ indicates the data has not "
"failed any QC tests."),
(r"This field contains bit packed values which should be "
r"interpreted as listed..+")
]
}
# Loop over each variable and look for a match to an attribute that
# would exist if the variable is a QC variable
for var in self._obj.data_vars:
attributes = self._obj[var].attrs
for att_name in attributes:
if att_name in qc_dict.keys():
for value in qc_dict[att_name]:
if re.match(value, attributes[att_name]) is not None:
variables.append(var)
break
# Check the start of the variable name. If it begins with qc_ assume quality
# control variable from ARM.
if check_arm_syntax:
variables_qc = [var for var in self._obj.data_vars if var.startswith('qc_')]
variables = variables + variables_qc
variables = list(set(variables))
return variables
def cleanup(self, cleanup_arm_qc=True, clean_arm_state_vars=None,
handle_missing_value=True, link_qc_variables=True,
normalize_assessment=False,
**kwargs):
"""
Wrapper method to automatically call all the standard methods
for obj cleanup.
Parameters
----------
cleanup_arm_qc : bool
Option to clean xarray object from ARM QC to CF QC standards.
Default is True.
clean_arm_state_vars : list of str
Option to clean xarray object state variables from ARM to CF
standards. Pass in list of variable names.
handle_missing_value : bool
Go through variables and look for cases where a QC or state varible
was convereted to a float and missing values set to np.nan. This
is done because of xarry's default to use mask_and_scale=True.
This will convert the data type back to integer and replace
any instances of np.nan to a missing value indicator (most
likely -9999).
link_qc_variables : bool
Option to link QC variablers through ancillary_variables if not
already set.
normalize_assessment : bool
Option to clean up assessments to use the same terminology. Set to
False for default because should only be an issue after adding DQRs
and the function to add DQRs calls this method.
**kwargs : keywords
Keyword arguments passed through to clean.clean_arm_qc
method.
"""
# Convert ARM QC to be more like CF state fields
if cleanup_arm_qc:
self._obj.clean.clean_arm_qc(**kwargs)
# Convert ARM state fields to be more liek CF state fields
if clean_arm_state_vars is not None:
self._obj.clean.clean_arm_state_variables(clean_arm_state_vars)
# Correctly convert data type because of missing value
# indicators in state and QC variables. Needs to be run after
# clean.clean_arm_qc to use CF attribute names.
if handle_missing_value:
self._obj.clean.handle_missing_values()
# Add some ancillary_variables linkages
# between data variable and QC variable
if link_qc_variables:
self._obj.clean.link_variables()
# Update the terminology used with flag_assessments to be consistent
if normalize_assessment:
self._obj.clean.normalize_assessment()
def handle_missing_values(self, default_missing_value=np.int32(-9999)):
"""
Correctly handle missing_value and _FillValue in object.
xarray will automatically replace missing_value and
_FillValue in the data with NaN. This is great for data set
as type float but not great for int data. Can cause issues
with QC and state fields. This will loop through the array
looking for state and QC fields and revert them back to int
data type if upconverted to float to handle NaNs. Issue is that
xarray will convert data type to float if the attribute is defined
even if no data are set as missing value. xarray will also then
remove the missing_value or _FillValue variable attribute. This
will put the missing_value attribute back if needed.
Parameters
----------
default_missing_value : numpy int or float
The default missing value to use if a missing_value attribute
is not defined but one is needed.
"""
state_att_names = ['flag_values', 'flag_meanings',
'flag_masks', 'flag_attributes']
# Look for variables that have 2 of the state_att_names defined
# as attribures and is of type float. If so assume the variable
# was incorreclty converted to float type.
for var in self._obj.data_vars:
var_att_names = self._obj[var].attrs.keys()
if (len(set(state_att_names) & set(var_att_names)) >= 2 and
self._obj[var].values.dtype in
[np.dtype('float16'), np.dtype('float32'),
np.dtype('float64')]):
# Look at units variable to see if this is the stupid way some
# ARM products mix data and state variables. If the units are not
# in the normal list of unitless type assume this is a data variable
# and skip. Other option is to lookf or a valid_range attribute
# and skip. This is commented out for now since the units check
# appears to be working.
try:
if self._obj[var].attrs['units'] not in ['1', 'unitless', '', ' ']:
continue
# self._obj[var].attrs['valid_range']
# continue
except KeyError:
pass
# Change any np.nan values to missing value indicator
data = self._obj[var].values
data[np.isnan(data)] = default_missing_value.astype(data.dtype)
# Convert data to match type of flag_mask or flag_values
# as the best guess of what type is correct.
found_dtype = False
for att_name in ['flag_masks', 'flag_values']:
try:
att_value = self._obj[var].attrs[att_name]
if isinstance(att_value, (list, tuple)):
dtype = att_value[0].dtype
else:
dtype = att_value.dtype
data = data.astype(dtype)
found_dtype = True
break
except (KeyError, IndexError):
pass
# If flag_mask or flag_values is not available choose an int type
# and set data to that type.
if found_dtype is False:
data = data.astype(default_missing_value.dtype)
# Return data to object and add missing value indicator
# attribute to variable.
self._obj[var].values = data
self._obj[var].attrs['missing_value'] = \
default_missing_value.astype(data.dtype)
def get_attr_info(self, variable=None, flag=False):
"""
Get ARM quality control definitions from the ARM standard
bit_#_description, ... attributes and return as dictionary.
Will attempt to guess if the flag is integer or bit packed
based on what attributes are set.
Parameters
----------
variable : str
Variable name to get attribute information. If set to None
will get global attributes.
flag : bool
Optional flag indicating if QC is expected to be bitpacked
or integer. Flag = True indicates integer QC. Default
is bitpacked or False.
Returns
-------
attributes dictionary : dict or None
A dictionary contianing the attribute information converted from
ARM QC to CF QC. All keys include 'flag_meanings', 'flag_masks',
'flag_values', 'flag_assessments', 'flag_tests', 'arm_attributes'.
Returns None if none found.
"""
string = 'bit'
if flag:
string = 'flag'
else:
found_string = False
try:
if self._obj.attrs['qc_bit_comment']:
string = 'bit'
found_string = True
except KeyError:
pass
if found_string is False:
try:
if self._obj.attrs['qc_flag_comment']:
string = 'flag'
found_string = True
except KeyError:
pass
if found_string is False:
var = self.matched_qc_variables
if len(var) > 0:
try:
if self._obj[variable].attrs['flag_method'] == 'integer':
string = 'flag'
found_string = True
del self._obj[variable].attrs['flag_method']
except KeyError:
pass
try:
if variable:
attr_description_pattern = (r"(^" + string +
r")_([0-9]+)_(description$)")
attr_assessment_pattern = (r"(^" + string +
r")_([0-9]+)_(assessment$)")
attr_comment_pattern = (r"(^" + string +
r")_([0-9]+)_(comment$)")
attributes = self._obj[variable].attrs
else:
attr_description_pattern = (r"(^qc_" + string +
r")_([0-9]+)_(description$)")
attr_assessment_pattern = (r"(^qc_" + string +
r")_([0-9]+)_(assessment$)")
attr_comment_pattern = (r"(^qc_" + string +
r")_([0-9]+)_(comment$)")
attributes = self._obj.attrs
except KeyError:
return None
assessment_bit_num = []
description_bit_num = []
comment_bit_num = []
flag_masks = []
flag_meanings = []
flag_assessments = []
flag_comments = []
arm_attributes = []
dtype = np.int32
for att_name in attributes:
try:
description = re.match(attr_description_pattern, att_name)
description_bit_num.append(int(description.groups()[1]))
flag_meanings.append(attributes[att_name])
arm_attributes.append(att_name)
except AttributeError:
pass
try:
assessment = re.match(attr_assessment_pattern, att_name)
assessment_bit_num.append(int(assessment.groups()[1]))
flag_assessments.append(attributes[att_name])
arm_attributes.append(att_name)
except AttributeError:
pass
try:
comment = re.match(attr_comment_pattern, att_name)
comment_bit_num.append(int(comment.groups()[1]))
flag_comments.append(attributes[att_name])
arm_attributes.append(att_name)
except AttributeError:
pass
if variable is not None:
# Try and get the data type from the variable if it is an integer
# If not an integer make the flag values integers.
try:
dtype = self._obj[variable].values.dtype
if np.issubdtype(dtype, np.integer):
pass
else:
dtype = np.int32
except AttributeError:
pass
# Sort on bit number to ensure correct description order
index = np.argsort(description_bit_num)
flag_meanings = np.array(flag_meanings)
description_bit_num = np.array(description_bit_num)
flag_meanings = flag_meanings[index]
description_bit_num = description_bit_num[index]
# Sort on bit number to ensure correct assessment order
if len(flag_assessments) > 0:
if len(flag_assessments) < len(flag_meanings):
for ii in range(1, len(flag_meanings) + 1):
if ii not in assessment_bit_num:
assessment_bit_num.append(ii)
flag_assessments.append('')
index = np.argsort(assessment_bit_num)
flag_assessments = np.array(flag_assessments)
flag_assessments = flag_assessments[index]
# Sort on bit number to ensure correct comment order
if len(flag_comments) > 0:
if len(flag_comments) < len(flag_meanings):
for ii in range(1, len(flag_meanings) + 1):
if ii not in comment_bit_num:
comment_bit_num.append(ii)
flag_comments.append('')
index = np.argsort(comment_bit_num)
flag_comments = np.array(flag_comments)
flag_comments = flag_comments[index]
# Convert bit number to mask number
if len(description_bit_num) > 0:
flag_masks = np.array(description_bit_num)
flag_masks = np.left_shift(1, flag_masks - 1)
# build dictionary to return values
if len(flag_masks) > 0 or len(description_bit_num) > 0:
return_dict = dict()
return_dict['flag_meanings'] = list(np.array(flag_meanings,
dtype=str))
if len(flag_masks) > 0 and max(flag_masks) > np.iinfo(np.uint32).max:
flag_mask_dtype = np.uint64
else:
flag_mask_dtype = np.uint32
if flag:
return_dict['flag_values'] = list(np.array(description_bit_num,
dtype=dtype))
return_dict['flag_masks'] = list(np.array([],
dtype=flag_mask_dtype))
else:
return_dict['flag_values'] = list(np.array([],
dtype=dtype))
return_dict['flag_masks'] = list(np.array(flag_masks,
dtype=flag_mask_dtype))
return_dict['flag_assessments'] = list(np.array(flag_assessments,
dtype=str))
return_dict['flag_tests'] = list(np.array(description_bit_num,
dtype=dtype))
return_dict['flag_comments'] = list(np.array(flag_comments,
dtype=str))
return_dict['arm_attributes'] = arm_attributes
else:
# If nothing to return set to None
return_dict = None
return return_dict
def clean_arm_state_variables(self,
variables,
override_cf_flag=True,
clean_units_string=True,
integer_flag=True):
"""
Function to clean up state variables to use more CF style.
Parameters
----------
variables : str or list of str
List of variable names to update.
override_cf_flag : bool
Option to overwrite CF flag_meanings attribute if it exists
with the values from ARM QC bit_#_description.
clean_units_string : bool
Option to update units string if set to 'unitless' to be
udunits compliant '1'.
integer_flag : bool
Pass through keyword of 'flag' for get_attr_info().
"""
if isinstance(variables, str):
variables = [variables]
for var in variables:
flag_info = self.get_attr_info(variable=var, flag=integer_flag)
if flag_info is None:
return
# Add new attributes to variable
for attr in ['flag_values', 'flag_meanings', 'flag_masks']:
if len(flag_info[attr]) > 0:
# Only add if attribute does not exist.
if attr in self._obj[var].attrs.keys() is False:
self._obj[var].attrs[attr] = copy.copy(flag_info[attr])
# If flag is set set attribure even if exists
elif override_cf_flag:
self._obj[var].attrs[attr] = copy.copy(flag_info[attr])
# Remove replaced attributes
arm_attributes = flag_info['arm_attributes']
for attr in arm_attributes:
try:
del self._obj[var].attrs[attr]
except KeyError:
pass
# Clean up units attribute from unitless to udunits '1'
if (clean_units_string and
self._obj[var].attrs['units'] == 'unitless'):
self._obj[var].attrs['units'] = '1'
def correct_valid_minmax(self, qc_variable):
"""
Function to correct the name and location of quality control limit
variables that use valid_min and valid_max incorrectly.
Parameters
----------
qc_variable : str
Name of quality control variable in xarray object to correct.
"""
test_dict = {'valid_min': 'fail_min',
'valid_max': 'fail_max',
'valid_delta': 'fail_delta'}
aa = re.match(r"^qc_(.+)", qc_variable)
variable = None
try:
variable = aa.groups()[0]
except AttributeError:
return
made_change = False
try:
flag_meanings = copy.copy(
self._obj[qc_variable].attrs['flag_meanings'])
except KeyError:
return
for attr in test_dict.keys():
for ii, test in enumerate(flag_meanings):
if attr in test:
flag_meanings[ii] = re.sub(attr, test_dict[attr], test)
made_change = True
try:
self._obj[qc_variable].attrs[test_dict[attr]] = \
copy.copy(self._obj[variable].attrs[attr])
del self._obj[variable].attrs[attr]
except KeyError:
pass
if made_change:
self._obj[qc_variable].attrs['flag_meanings'] = flag_meanings
def link_variables(self):
"""
Add some attributes to link and explain data
to QC data relationship. Will use non-CF standard_name
of quality_flag. Hopefully this will be added to the
standard_name table in the future.
"""
for var in self._obj.data_vars:
aa = re.match(r"^qc_(.+)", var)
try:
variable = aa.groups()[0]
qc_variable = var
except AttributeError:
continue
# Skip data quality fields.
try:
if not ('Quality check results on field:' in
self._obj[var].attrs['long_name']):
continue
except KeyError:
pass
# Get existing data variable ancillary_variables attribute
try:
ancillary_variables = self._obj[variable].\
attrs['ancillary_variables']
except KeyError:
ancillary_variables = ''
# If the QC variable is not in ancillary_variables add
if qc_variable not in ancillary_variables:
ancillary_variables = qc_variable
self._obj[variable].attrs['ancillary_variables']\
= copy.copy(ancillary_variables)
# Check if QC variable has correct standard_name and iff not fix it.
correct_standard_name = 'quality_flag'
try:
if self._obj[qc_variable].attrs['standard_name'] != correct_standard_name:
self._obj[qc_variable].attrs['standard_name'] = correct_standard_name
except KeyError:
self._obj[qc_variable].attrs['standard_name'] = correct_standard_name
def clean_arm_qc(self,
override_cf_flag=True,
clean_units_string=True,
correct_valid_min_max=True):
"""
Function to clean up xarray object QC variables.
Parameters
----------
override_cf_flag : bool
Option to overwrite CF flag_masks, flag_meanings, flag_values
if exists.
clean_units_string : bool
Option to clean up units string from 'unitless'
to udunits compliant '1'.
correct_valid_min_max : bool
Option to correct use of valid_min and valid_max with QC variables
by moving from data variable to QC varible, renaming to fail_min,
fail_max and fail_detla if the valid_min, valid_max or valid_delta
is listed in bit discription attribute. If not listed as
used with QC will assume is being used correctly.
"""
global_qc = self.get_attr_info()
for qc_var in self.matched_qc_variables:
# Clean up units attribute from unitless to udunits '1'
try:
if (clean_units_string and
self._obj[qc_var].attrs['units'] == 'unitless'):
self._obj[qc_var].attrs['units'] = '1'
except KeyError:
pass
qc_attributes = self.get_attr_info(variable=qc_var)
if qc_attributes is None:
qc_attributes = global_qc
# Add new attributes to variable
for attr in ['flag_masks', 'flag_meanings',
'flag_assessments', 'flag_values', 'flag_comments']:
if qc_attributes is not None and len(qc_attributes[attr]) > 0:
# Only add if attribute does not exists
if attr in self._obj[qc_var].attrs.keys() is False:
self._obj[qc_var].attrs[attr] = copy.copy(qc_attributes[attr])
# If flag is set add attribure even if already exists
elif override_cf_flag:
self._obj[qc_var].attrs[attr] = copy.copy(qc_attributes[attr])
# Remove replaced attributes
if qc_attributes is not None:
arm_attributes = qc_attributes['arm_attributes']
if 'description' not in arm_attributes:
arm_attributes.append('description')
if 'flag_method' not in arm_attributes:
arm_attributes.append('flag_method')
for attr in arm_attributes:
try:
del self._obj[qc_var].attrs[attr]
except KeyError:
pass
# Check for use of valid_min and valid_max as QC limits and fix
if correct_valid_min_max:
self._obj.clean.correct_valid_minmax(qc_var)
# Clean up global attributes
if global_qc is not None:
global_attributes = global_qc['arm_attributes']
global_attributes.extend(['qc_bit_comment'])
for attr in global_attributes:
try:
del self._obj.attrs[attr]
except KeyError:
pass
def normalize_assessment(self, variables=None, exclude_variables=None,
qc_lookup={"Incorrect": "Bad", "Suspect": "Indeterminate"}):
"""
Method to clean up assessment terms used to be consistent between
embedded QC and DQRs.
Parameters
----------
variables : str or list of str
Optional data variable names to check and normalize. If set to
None will check all variables.
exclude_variables : str or list of str
Optional data variable names to exclude from processing.
qc_lookup : dict
Optional dictionary used to convert between terms.
"""
# Get list of variables if not provided
if variables is None:
variables = list(self._obj.data_vars)
# Ensure variables is a list
if not isinstance(variables, (list, tuple)):
variables = [variables]
# If exclude variables provided remove from variables list
if exclude_variables is not None:
if not isinstance(exclude_variables, (list, tuple)):
exclude_variables = [exclude_variables]
variables = list(set(variables) - set(exclude_variables))
# Loop over variables checking if a QC variable exits and use the
# lookup dictionary to convert the assessment terms.
for var_name in variables:
qc_var_name = self._obj.qcfilter.check_for_ancillary_qc(
var_name, add_if_missing=False, cleanup=False)
if qc_var_name is not None:
try:
flag_assessments = self._obj[qc_var_name].attrs['flag_assessments']
except KeyError:
continue
for ii, assess in enumerate(flag_assessments):
try:
flag_assessments[ii] = qc_lookup[assess]
except KeyError:
continue
``` |
{
"source": "jhemedin/chicago-nowcast",
"score": 2
} |
#### File: chicago-nowcast/code/klot_radar_scans.py
```python
from pylab import *
import pyart, boto3, tempfile, os, shutil, datetime, matplotlib
import numpy as np
import pandas as pd
import seaborn as sns
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
from matplotlib import animation
from botocore.handlers import disable_signing
from tint import Cell_tracks
from tint import animate as tint_animate
from tint.visualization import embed_mp4_as_gif
from math import sin, cos, sqrt, atan2, radians
from glob import glob
def get_radar_scan(station='KLOT', date=None, key_index=-15):
'''
Function will pull the latest radar scan from any radar site using
Amazon S3.
----------
Station = Four letter NEXRAD identifier
Example: 'KEPZ'
Date = default is none for current date, else enter date in format "YYYY/MM/DD"
Ex: date ='2013/11/17
Key_index = Number of keys you want pulled from most recent scan.
Ex: key_index = -15 would pull ht most recent 15 scans
'''
#creating a bucket and a client to be able to pull data from AWS and setting
#it as unsigned
bucket = 'noaa-nexrad-level2'
s3 = boto3.resource('s3')
s3.meta.client.meta.events.register('choose-signer.s3.*', disable_signing)
#connects the bucket create above with radar data
aws_radar = s3.Bucket(bucket)
#setting the date and time to current.
#this will allow for allow the current date's radar scands to be pulled
if date == None:
target_string = datetime.datetime.utcnow().strftime('%Y/%m/%d/'+station)
else:
target_string = date+'/'+station
for obj in aws_radar.objects.filter(Prefix= target_string):
'{0}:{1}'.format(aws_radar.name, obj.key)
my_list_of_keys = [this_object.key for this_object in aws_radar.objects.filter(Prefix= target_string)]
keys = my_list_of_keys[key_index:]
print(keys)
return aws_radar, keys
def new_directory(date = 'current',
year = datetime.datetime.utcnow().strftime('%Y'),
month = datetime.datetime.utcnow().strftime('%m'),
day = datetime.datetime.utcnow().strftime('%d'),
hour = datetime.datetime.utcnow().strftime('%H'),
path = '/home/jhemedinger/suli_projects/chicago-nowcast/events'):
"""
Function will create a new directory and save all data and images to that file
----------
date: options are either current or past- current will create a file of the current date and time
past will allow for a file of a previous date and time to be created
Ex: date='past'
year: year of the date for the file being created. If no yearis given then current year is used
month: same as year but for month
day: same as year and month but for day
hour: hour for which the data is from, if not set hour is set to current
paht: path for where the new directory will be created and saved
"""
if date == 'past':
past_date = str(datetime.datetime(year, month, day).strftime('%Y_%m_%d'))
out_dir_path = path+'/'+past_date
event_date = str(datetime.datetime(year, month, day, hour).strftime('%Y%m%d-%H'))
elif date == 'current':
cur_date = str(datetime.datetime.utcnow().strftime('%Y_%m_%d'))
out_dir_path = path+'/'+cur_date
event_date = str(datetime.datetime.utcnow().strftime('%Y%m%d-%H'))
out_dir = os.makedirs(out_dir_path, exist_ok=True)
out_path_dir = out_dir_path+'/'+event_date+'Z'
out_path = os.makedirs(out_path_dir, exist_ok=True)
print('current saving directory:', out_path_dir)
return out_path_dir
#setting the radar information to be pulled from AWS as well as created a new directory for the data to be saved to
aws_radar, keys = get_radar_scan()
out_path_dir = new_directory()
#creating a radar animation using pyart and matplotlib functions
def animate(nframe):
plt.clf()
localfile = tempfile.NamedTemporaryFile()
aws_radar.download_file(keys[nframe], localfile.name)
radar = pyart.io.read(localfile.name)
display = pyart.graph.RadarMapDisplay(radar)
# Delete radar after use to save memory.
del radar
display.plot_ppi_map('reflectivity', sweep=0, resolution='l',
vmin=-8, vmax=64, mask_outside=False,
fig=fig, width=350000, height=350000,
cmap = pyart.graph.cm.LangRainbow12 )
display.basemap.drawcounties()
display.plot_point(-87.981810, 41.713969 , label_text='ANL', symbol='ko')
fig = plt.figure(figsize=(10, 8))
anim_klot = animation.FuncAnimation(fig, animate,
frames=len(keys))
anim_klot.save(out_path_dir + '/reflectivity_animation.gif',
writer='imagemagick', fps=2)
plt.show()
plt.close()
#turing the data into grid data and saving it to a folder
def get_grid(aws_radar, key):
localfile = tempfile.NamedTemporaryFile()
aws_radar.download_file(key, localfile.name)
radar = pyart.io.read(localfile.name)
grid = pyart.map.grid_from_radars(
radar, grid_shape=(31, 401, 401),
grid_limits=((0, 15000), (-200000, 200000), (-200000, 200000)),
fields=['reflectivity'], weighting_function='Barnes', gridding_algo='map_gates_to_grid',
h_factor=0., nb=0.6, bsp=1., min_radius=200.)
return grid
for num,key in enumerate(keys):
print('saving grid', num)
grid = get_grid(aws_radar, key)
name = os.path.join(out_path_dir, 'grid_' + str(num).zfill(3) + '.nc')
pyart.io.write_grid(name, grid)
del grid
#reading in the gridded data to be used with TINT
files = glob(out_path_dir + '/grid_*')
files.sort()
#creating a grid generator to be able to read the grids into TINT
grid_gen = (pyart.io.read_grid(f) for f in files)
#creating the cell tracks and changing the minimum threshold value for reflectivity
tracks_obj = Cell_tracks()
tracks_obj.params['FIELD_THRESH']=35
tracks_obj.get_tracks(grid_gen)
tracks_obj.tracks
# this section is only necessary to run if there is already a file within the directory with the same name
#if you rerun code without deleting the old file an error will occur since the old file was not overwritten
if os.path.exists(out_path_dir + '/tracks_animation.mp4'):
print(out_path_dir + '/tracks_animation.mp4'
+ ' already exists, removing file')
os.remove(out_path_dir + '/tracks_animation.mp4')
#using the animate function within TINT to get the cell tracks
grid_gen = (pyart.io.read_grid(f) for f in files)
tint_animate(tracks_obj, grid_gen, os.path.join(out_path_dir, 'tracks_animation'), tracers=True,
cmap=pyart.graph.cm.LangRainbow12)#, lat_lines=lat_lines, lon_lines=lon_lines)
embed_mp4_as_gif(out_path_dir + '/tracks_animation.mp4')
#seperating the data by uid
cells = tracks_obj.tracks.groupby(level='uid')
for uid in cells:
print(uid)
tracks_obj.tracks.groupby(level='uid').size().sort_values(ascending=False)[:]
#pulling the data for a specific uid
df_0 = pd.DataFrame(tracks_obj.tracks.xs('5', level='uid'))
lons, lats = np.array(df_0['lon']), np.array(df_0['lat'])
time = np.array(pd.to_datetime(df_0['time']))
print(df_0)
#creating the linear regression using polyfit and poly1d
fit = polyfit(lons[:10],lats[:10],1)
fit_fn = poly1d(fit)
#plotting the regression and the lat/lon data and showing the 95% confidence interval of the regression model
fig = plt.figure(figsize=(10,8))
plt.plot(lons[:10], lats[:10], '--ok', label='Latitude/Longitude')
sns.regplot(lons[:10], lats[:10], color='b')
#for i, txt in enumerate(time[:11]):
# plt.annotate(txt, (lons[:][i], lats[:][i]))
plt.plot(lons[:10], fit_fn(lons[:10]), '-b',
label='Linear Regression \nwith 95% Confidence Interval')
plt.xlabel('LONGITUDE')
plt.ylabel('LATITUDE')
plt.legend(loc=4)
#font = { 'family' : 'normal',
# 'size' : 15}
#matplotlib.rc('font', **font)
#plt.grid()
plt.title('June 26, 2018 FIELD_THRESH=35dBz')
plt. savefig(out_path_dir + '/regression.png', dpi=300, bbox_inches='tight')
plt.show()
plt.close()
#plotting a time series of the latitude and longitude data
t = (time[:10] - np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's')
x, y = lats[:10], lons[:10]
fit_lat = polyfit(t,x,1)
fit_lon = polyfit(t,y,1)
fit_fn_lon = poly1d(fit_lon)
fit_fn_lat = poly1d(fit_lat)
#font = { 'family' : 'normal',
# 'size' : 15}
#matplotlib.rc('font', **font)
fig = plt.figure(figsize=(10,8))
plt.plot(time[:10], x, 'ro', time[:10], fit_fn_lat(t), '--k')
plt.xlabel('TIME (UTC)')
plt.ylabel('LATITUDE')
plt.title('Latitudinal Time Series')
plt.savefig(out_path_dir + '/lat_reg.png', dpi=300)
plt.show()
plt.close()
fig = plt.figure(figsize=(10,8))
plt.plot(time[:10], y, 'bo', time[:10], fit_fn_lon(t), '--k')
plt.xlabel('TIME (UTC)')
plt.ylabel('LONGITUDE')
plt.title('Longitudinal Time Series')
plt.savefig(out_path_dir + '/lon_reg.png', dpi=300)
plt.show()
plt.close()
def lats_lons(minimum, maximum, interval):
'''
Will predict lat/lon for a given time interval.
Returns time, lat, and lon
beginning: beginning of the time interval
end: end of interval
interval: time interval in minutes
Ex: lats_lons(10, 70, 10) will find the lat/lon
for the next hour every 10 minutes.
'''
minimum = minimum
maximum = maximum
interval = interval
arr = np.arange(minimum, maximum, interval)
my_time = []
for i in arr:
my_time.append(time[:10][-1] + np.timedelta64(str(i), 'm'))
my_new_time = np.array(my_time)
nts = ((my_new_time - np.datetime64('1970-01-01T00:00:00Z'))
/ np.timedelta64(1, 's'))
my_new_lat = fit_fn_lat(nts)
my_new_lon = fit_fn_lon(nts)
# print(my_new_time)
# print(my_new_lon)
# print(my_new_lat)
return my_new_time, my_new_lat, my_new_lon
#getting future lat/lon points
my_new_time, my_new_lat, my_new_lon = lats_lons(10,90,10)
#calculating the distance the center of a cell is from Argonne using the Haversine formula
#unit for distance is km
for i in range(8):
anl_lon = radians(-87.981810)
anl_lat = radians(41.713969)
storm_lon = radians(my_new_lon[i])
storm_lat = radians(my_new_lat[i])
pre_time = (my_new_time[i])
dlon = storm_lon - anl_lon
dlat = storm_lat - anl_lat
a = sin(dlat / 2)**2 + cos(anl_lat) * cos(storm_lat) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1-a))
R = 6373.0
distance = R * c
# print(distance)
#setting distance(s) to determine of ANL will be hit by a strom cell
#distance of 12km was used because the average radius of a storm is 12km
if distance <= 12:
print('At', pre_time, 'storm is forecsted to be at ANL')
#animating using matplotlib and pyart
def animate(nframe):
plt.clf()
localfile = tempfile.NamedTemporaryFile()
aws_radar.download_file(keys[11:20][nframe], localfile.name)
radar = pyart.io.read(localfile.name)
display = pyart.graph.RadarMapDisplay(radar)
# Delete radar after use to save memory.
del radar
display.plot_ppi_map('reflectivity', sweep=0, resolution='l',
vmin=-8, vmax=64, mask_outside=True,
fig=fig, width=85000, height=85000,
cmap=pyart.graph.cm.LangRainbow12)
display.basemap.drawcounties()
display.plot_line_geo(lons[-8:][:nframe], lats[-8:][:nframe], '-k', label='Observed storm path')
display.plot_line_geo(my_new_lon, my_new_lat, '--r', label='Forecasted storm path')
display.plot_point(-87.981810, 41.713969 , label_text='ANL', symbol='k*', label_offset=(-0.04,0.01))
plt.legend(loc=3)
fig = plt.figure(figsize=(12, 8))
#font = { 'family' : 'normal',
# 'size' : 15 }
anim_klot = animation.FuncAnimation(fig, animate,
frames=len(keys[11:20]))
anim_klot.save(out_path_dir + '/ref_track_animation_test.gif',
writer='imagemagick', fps=1)
plt.show()
plt.close()
#creating ppi image of last scan and plotting predicted and observed storm path
localfile = tempfile.NamedTemporaryFile()
fig=plt.figure(figsize=(12,8))
aws_radar.download_file(keys[-1], localfile.name)
radar = pyart.io.read(localfile.name)
display = pyart.graph.RadarMapDisplay(radar)
#font = { 'family' : 'normal',
# 'size' : 15 }
#matplotlib.rc('font', **font)
display.plot_ppi_map('reflectivity', sweep=0, resolution='l',
vmin=-8, vmax=64, mask_outside=True,
width=90000, height=90000,
cmap=pyart.graph.cm.LangRainbow12)
display.basemap.drawcounties()
display.plot_line_geo(lons[-8:], lats[-8:], '-k', label='Observed Storm Path')
display.plot_line_geo(my_new_lon, my_new_lat, '--r', label='Forecasted Storm Path')
display.plot_point(-87.981810, 41.713969 , label_text='ANL', symbol='k*', label_offset=(-0.04, 0.01))
plt.legend(loc=4)
plt.savefig(out_path_dir +'/reg_plot_radar.png', dpi=300, bbox_inches='tight')
plt.show()
plt.close()
``` |
{
"source": "jhemedin/SAPR-QVP-VAP",
"score": 2
} |
#### File: SAPR-QVP-VAP/qvp/config.py
```python
from .default_config import _DEFAULT_METADATA, _DEFAULT_PLOT_VALUES, _DEFAULT_FIELD_PARAMETERS
def get_metadata(radar):
"""
Return a dictonary of metadata for a given radar. An empty dictonary
will be returned if no metadata dictonary exists for parameter radar.
"""
if radar in _DEFAULT_METADATA:
return _DEFAULT_METADATA[radar].copy()
else:
return {}
def get_plot_values(radar):
"""
Return the values specific to a radar for plotting the radar fields.
"""
return _DEFAULT_PLOT_VALUES[radar].copy()
def get_field_parameters():
"""
Return the field titles for a specific radar field.
"""
return _DEFAULT_FIELD_PARAMETERS.copy()
```
#### File: SAPR-QVP-VAP/qvp/qvp_quicklooks.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import datetime
import xarray
import datetime
import os
from .config import get_plot_values, get_field_parameters
def quicklooks_1panel(file, field, config, image_directory=None, **kwargs):
"""
Quciklooks, produces a one panel image using a QVP object NetCDF file.
Parameters
----------
file : str
File path to the QVP NetCDF file
field : str
String of the radar field
config : str
A string of the radar name found from config.py that contains values
for writing, specific to that radar.
Optional Parameters
-------------------
image_directory : str
File path to the image folder to save the QVP image. If no
image file path is given, image path deafults to users home directory.
"""
if image_directory is None:
image_directory = os.path.expanduser('~')
plot_values = get_plot_values(config)
fld_params = get_field_parameters()
qvp = xarray.open_dataset(file)
time = qvp.time.data
z = qvp.height.data/1000
fld = qvp[field].data
date = pd.to_datetime(time[0]).strftime('%Y%m%d')
ts = datetime.datetime.strptime(date, '%Y%m%d')
fig = plt.figure(figsize=[25, 12])
font = {'family': 'normal', 'size': 20}
matplotlib.rc('font', **font)
matplotlib.rcParams.update({'font.size': 20})
matplotlib.rcParams.update({'axes.titlesize': 20})
img = plt.pcolormesh(time, z, fld.transpose(), cmap=plot_values['cmap'],
vmin=fld_params[field]['vmin'],
vmax=fld_params[field]['vmax'])
plt.xlim(ts, (ts + datetime.timedelta(days=1)))
plt.xticks(rotation=45)
plt.ylim(0,12)
plt.ylabel('Height (km)')
plt.xlabel('Time (UTC)')
plt.title(plot_values['title'] + ' ' + fld_params[field]['fld_title'] + ' '
+ plot_values['tilt'] + ' ' + str(ts)
+ '-' + str(ts + datetime.timedelta(days=1)))
cb = plt.colorbar(img, cmap=plot_values['cmap'])
cb.set_label(fld_params[field]['clb_title'])
plt.savefig(image_directory + '/' + plot_values['save_name']
+ '.' + str(date) + '.000000.png', bbox_inches='tight')
def quicklooks_4panel(file, fields, config, image_directory=None):
"""
Quciklooks, produces a four panel image using a QVP object NetCDF file.
Parameters
----------
file : str
File path to the QVP NetCDF file
fields : tuple/list
Tuple or list of strings of radar fields
config : str
A string of the radar name found from config.py that contains values
for writing, specific to that radar.
Optional Parameters
-------------------
image_directory : str
File path to the image folder to save the QVP image. If no
image file path is given, image path deafults to users home directory.
"""
if image_directory is None:
image_directory = os.path.expanduser('~')
plot_values = get_plot_values(config)
fld_params = get_field_parameters()
qvp = xarray.open_dataset(file)
cmap = plot_values['cmap']
time = qvp.time.data
z = qvp.height.data/1000
date = pd.to_datetime(time[0]).strftime('%Y%m%d')
ts = datetime.datetime.strptime(date, '%Y%m%d')
fld1 = qvp[fields[0]].data
fld2 = qvp[fields[1]].data
fld3 = qvp[fields[2]].data
fld4 = qvp[fields[3]].data
fig, ax = plt.subplots(nrows=4, ncols=1, sharex=True,
sharey=True, figsize=(50,37))
font = {'family': 'normal',
'size': 30}
matplotlib.rc('font', **font)
matplotlib.rcParams.update({'font.size': 30})
matplotlib.rcParams.update({'axes.titlesize': 30})
fig.suptitle(x=0.435, y=0.93, t=plot_values['title'] + ' '
+ plot_values['tilt'] + ' ' + str(ts)
+ '-' + str(ts + datetime.timedelta(days=1)),
fontsize=40)
fig.text(0.435, 0.065, 'Time (UTC)', ha='center', fontsize=30)
fig.text(0.09, 0.5, 'Height (km)', va='center',
rotation='vertical', fontsize=30)
ax = plt.subplot(411)
img = plt.pcolormesh(time, z, fld1.transpose(), cmap=cmap,
vmin=fld_params[fields[0]]['vmin'],
vmax=fld_params[fields[0]]['vmax'])
plt.ylim(0,12)
plt.xlim(ts, (ts + datetime.timedelta(days=1)))
plt.xticks([])
ax.set_title(fld_params[fields[0]]['fld_title'])
cb = plt.colorbar(img, cmap=cmap)
cb.set_label(fld_params[fields[0]]['clb_title'])
ax = plt.subplot(412)
img = plt.pcolormesh(time, z, fld2.transpose(), cmap=cmap,
vmin=fld_params[fields[1]]['vmin'],
vmax=fld_params[fields[1]]['vmax'])
plt.ylim(0,12)
plt.xlim(ts, (ts + datetime.timedelta(days=1)))
plt.xticks([])
ax.set_title(fld_params[fields[1]]['fld_title'])
cb = plt.colorbar(img, cmap=cmap)
cb.set_label(fld_params[fields[1]]['clb_title'])
ax = plt.subplot(413)
img = plt.pcolormesh(time, z, fld3.transpose(), cmap=cmap,
vmin=fld_params[fields[2]]['vmin'],
vmax=fld_params[fields[2]]['vmax'])
plt.ylim(0,12)
plt.xlim(ts, (ts + datetime.timedelta(days=1)))
plt.xticks([])
ax.set_title(fld_params[fields[2]]['fld_title'])
cb = plt.colorbar(img, cmap=cmap)
cb.set_label(fld_params[fields[2]]['clb_title'])
ax = plt.subplot(414)
img = plt.pcolormesh(time, z, fld4.transpose(), cmap=cmap,
vmin=fld_params[fields[3]]['vmin'],
vmax=fld_params[fields[3]]['vmax'])
plt.ylim(0,12)
plt.xlim(ts, (ts + datetime.timedelta(days=1)))
plt.xticks(rotation=45)
ax.set_title(fld_params[fields[3]]['fld_title'])
cb = plt.colorbar(img, cmap=cmap)
cb.set_label(fld_params[fields[3]]['clb_title'])
plt.savefig(image_directory + '/' + plot_values['save_name']
+ '.' + str(date) + '.000000.png', bbox_inches='tight')
``` |
{
"source": "jhemedin/SAPR-VAD-VAP",
"score": 3
} |
#### File: SAPR-VAD-VAP/vad/vad_quicklooks.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import datetime
import xarray
import datetime
import os
from .config import get_plot_values
def quicklooks(file, config, image_directory=None):
"""
Quicklook, produces a single image using a VAD object netCDF file.
Parameters
----------
file : str
File path to the VAD NetCDF file
config : str
A string of the radar name found from config.py that contains values
for writing, specific to that radar
Other Parameters
----------------
image_directory : str
File path to the image folder to save the VAD image. If no
image file path is given, image path deafults to users home directory.
"""
if image_directory is None:
image_directory = os.path.expanduser('~')
plot_values = get_plot_values(config)
vad = xarray.open_dataset(file)
u = vad.u_wind.data[::6,::5]/0.514444
v = vad.v_wind.data[::6,::5]/0.514444
z = vad.height.data[::5]/1000
C = vad.speed.data[::6,::5]/0.514444
t = vad.time[::6].data
date = pd.to_datetime(vad.time[0].data).strftime('%Y%m%d')
ts = datetime.datetime.strptime(date, '%Y%m%d')
fig = plt.figure(figsize=[25,12])
font = {'family': 'normal',
'size': 20}
matplotlib.rc('font', **font)
matplotlib.rcParams.update({'font.size': 20})
matplotlib.rcParams.update({'axes.titlesize': 20})
for i in range(len(t)):
Xq, Yq = np.meshgrid(t[i], z)
img = plt.barbs(Xq[:,0], Yq[:,0], u[i], v[i],
C[i], cmap = plot_values['cmap'],
norm=plot_values['norm'],
sizes=dict(emptybarb=0.1), rounding=False,
length=7, clip_on=False)
cb = plt.colorbar(img, cmap=plot_values['cmap'], norm=plot_values['norm'],
boundaries=plot_values['ticks'], ticks=plot_values['ticks'])
cb.set_label('Speed (kts)')
plt.title(plot_values['title'] + str(ts) + ' - '
+ str(ts + datetime.timedelta(days=1)))
plt.xlim(ts, (ts + datetime.timedelta(days=1)))
plt.xticks(rotation=45)
plt.ylim(0,10)
plt.ylabel('Height (km)')
plt.xlabel('Time (UTC)')
plt.savefig(image_directory + '/' + plot_values['save_name']
+ '.' + str(date) + '.000000.png', bbox_inches='tight')
``` |
{
"source": "jhemesonmotta/Shell-Sort",
"score": 4
} |
#### File: jhemesonmotta/Shell-Sort/shellsort.py
```python
def shellSort(nums):
n = len(nums)
h = 1
while h<=n/3:
h = h*3+1
while h > 0:
for i in range(h, n):
c = nums[i]
j = i
while j >= h and c < nums[j - h]:
nums[j] = nums[j - h]
j = j - h
nums[j] = c
h = int(h / 2.2)
return nums
#nums = [7, 1, 2, 3, 4];
#nums = [7,6,5,4,3,2,1]
#nums = [10,1,5,20,8,7,99,3,63,15]
#nums = [99,63,20,15,10,8,7,5,3,1]
#nums = [99, 9, 5, 7, 8]
nums = [16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1]
vet = shellSort(nums)
print(vet)
# ALGUNS PONTOS QUE VALEM A PENA DAR ATENÇÃO
# linhas 5 e 6: define tamanho das partições baseado na sequência de intervalos de Knuth.
#descobre o melhor h
# linha 8: o h representa o marco para o fim do intervalo
# linhas 12 a 15: Algoritmo Insertion Sort - Grupo 1
# linha 14: decrementa para percorrer outras partições
# linha 16: Isto é feito para diminuir o tamanho das partições até chegar a 0.
``` |
{
"source": "jhemmelg/scikit-image",
"score": 3
} |
#### File: skimage/segmentation/_felzenszwalb.py
```python
import numpy as np
from .._shared.utils import warn
from ._felzenszwalb_cy import _felzenszwalb_cython
def felzenszwalb(image, scale=1, sigma=0.8, min_size=20):
"""Computes Felsenszwalb's efficient graph based image segmentation.
Produces an oversegmentation of a multichannel (i.e. RGB) image
using a fast, minimum spanning tree based clustering on the image grid.
The parameter ``scale`` sets an observation level. Higher scale means
less and larger segments. ``sigma`` is the diameter of a Gaussian kernel,
used for smoothing the image prior to segmentation.
The number of produced segments as well as their size can only be
controlled indirectly through ``scale``. Segment size within an image can
vary greatly depending on local contrast.
For RGB images, the algorithm uses the euclidean distance between pixels in
color space.
Parameters
----------
image : (width, height, 3) or (width, height) ndarray
Input image.
scale : float
Free parameter. Higher means larger clusters.
sigma : float
Width of Gaussian kernel used in preprocessing.
min_size : int
Minimum component size. Enforced using postprocessing.
Returns
-------
segment_mask : (width, height) ndarray
Integer mask indicating segment labels.
References
----------
.. [1] Efficient graph-based image segmentation, <NAME>. and
<NAME>. International Journal of Computer Vision, 2004
Examples
--------
>>> from skimage.segmentation import felzenszwalb
>>> from skimage.data import coffee
>>> img = coffee()
>>> segments = felzenszwalb(img, scale=3.0, sigma=0.95, min_size=5)
"""
image = np.atleast_3d(image)
return _felzenszwalb_cython(image, scale=scale, sigma=sigma,
min_size=min_size)
``` |
{
"source": "jhemmingsson/prefect",
"score": 3
} |
#### File: tasks/snowflake/snowflake.py
```python
import snowflake.connector as sf
from prefect import Task
from prefect.utilities.tasks import defaults_from_attrs
class SnowflakeQuery(Task):
"""
Task for executing a query against a snowflake database.
Args:
- account (str): snowflake account name, see snowflake connector
package documentation for details
- user (str): user name used to authenticate
- password (str, optional): password used to authenticate.
password or private_lkey must be present
- private_key (bytes, optional): pem to authenticate.
password or private_key must be present
- database (str, optional): name of the default database to use
- schema (int, optional): name of the default schema to use
- role (str, optional): name of the default role to use
- warehouse (str, optional): name of the default warehouse to use
- query (str, optional): query to execute against database
- data (tuple, optional): values to use in query, must be specified using placeholder
is query string
- autocommit (bool, optional): set to True to autocommit, defaults to None, which
takes snowflake AUTOCOMMIT parameter
- **kwargs (dict, optional): additional keyword arguments to pass to the
Task constructor
"""
def __init__(
self,
account: str,
user: str,
password: str = None,
private_key: bytes = None,
database: str = None,
schema: str = None,
role: str = None,
warehouse: str = None,
query: str = None,
data: tuple = None,
autocommit: bool = None,
**kwargs
):
self.account = account
self.user = user
self.password = password
self.database = database
self.schema = schema
self.role = role
self.warehouse = warehouse
self.query = query
self.data = data
self.autocommit = autocommit
self.private_key = private_key
super().__init__(**kwargs)
@defaults_from_attrs("query", "data", "autocommit")
def run(self, query: str = None, data: tuple = None, autocommit: bool = None):
"""
Task run method. Executes a query against snowflake database.
Args:
- query (str, optional): query to execute against database
- data (tuple, optional): values to use in query, must be specified using
placeholder is query string
- autocommit (bool, optional): set to True to autocommit, defaults to None
which takes the snowflake AUTOCOMMIT parameter
Returns:
- None
Raises:
- ValueError: if query parameter is None or a blank string
- DatabaseError: if exception occurs when executing the query
"""
if not query:
raise ValueError("A query string must be provided")
# build the connection parameter dictionary
# we will remove `None` values next
connect_params = {
"account": self.account,
"user": self.user,
"password": <PASSWORD>,
"private_key": self.private_key,
"database": self.database,
"schema": self.schema,
"role": self.role,
"warehouse": self.warehouse,
"autocommit": self.autocommit,
}
# filter out unset values
connect_params = {
param: value
for (param, value) in connect_params.items()
if value is not None
}
# connect to database, open cursor
conn = sf.connect(**connect_params)
# try to execute query
# context manager automatically rolls back failed transactions
try:
with conn:
with conn.cursor() as cursor:
executed = cursor.execute(query, params=data).fetchall()
columns = cursor.description
conn.close()
return { "data": executed, "columns": columns }
# pass through error, and ensure connection is closed
except Exception as error:
conn.close()
raise error
``` |
{
"source": "jhemmmm/referer-spam-domains-blacklist",
"score": 3
} |
#### File: jhemmmm/referer-spam-domains-blacklist/remove-dead-domains.py
```python
import argparse
import asyncio
import collections
import errno
import itertools
import random
import resource
import aiodns
import tqdm
DNS_SERVERS = ("8.8.8.8", # Google DNS
"208.67.222.222", # OpenDNS
"172.16.31.10", # DNS.WATCH
"172.16.58.3", # Level3 DNS
"8.26.56.26") # Comodo Secure DNS
WEB_PORTS = (80, 443)
MAX_CONCURRENT_REQUESTS_PER_DNS_SERVER = 16
MAX_DNS_ATTEMPTS = 10
BASE_DNS_TIMEOUT_S = 0.5
async def dns_resolve(domain, dns_server, sem):
""" Return IP string if domain has a DNA A record on this DNS server, False otherwise. """
resolver = aiodns.DNSResolver(nameservers=(dns_server,))
timeout = BASE_DNS_TIMEOUT_S
for attempt in range(1, MAX_DNS_ATTEMPTS + 1):
coroutine = resolver.query(domain, "A")
try:
async with sem:
response = await asyncio.wait_for(coroutine, timeout=timeout)
except asyncio.TimeoutError:
jitter = random.randint(-20, 20) / 100
timeout = BASE_DNS_TIMEOUT_S + jitter
continue
except aiodns.error.DNSError:
return False
try:
ip = response[0].host
except IndexError:
return False
break
else:
# too many failed attemps
return False
return ip
async def dns_resolve_domain(domain, progress, sems):
""" Return IP string if domain has a DNA A record on this DNS server, False otherwise. """
dns_servers = list(DNS_SERVERS)
random.shuffle(dns_servers)
r = []
for dns_server in dns_servers:
ip = await dns_resolve(domain, dns_server, sems[dns_server])
r.append(ip or None)
progress.update(1)
return r
async def has_tcp_port_open(ip, port, progress):
""" Return True if domain is listening on a TCP port, False instead. """
r = True
coroutine = asyncio.open_connection(ip, port)
try:
_, writer = await asyncio.wait_for(coroutine, timeout=10)
except (ConnectionRefusedError, asyncio.TimeoutError):
r = False
except OSError as e:
if e.errno == errno.EHOSTUNREACH:
r = False
else:
raise
else:
writer.close()
progress.update(1)
return r
if __name__ == "__main__":
# parse args
arg_parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
arg_parser.add_argument("list_file",
help="Domain list file path")
args = arg_parser.parse_args()
# read list
with open(args.list_file, "rt") as list_file:
domains = tuple(map(str.rstrip, list_file.readlines()))
dead_domains = set()
# bump limits
soft_lim, hard_lim = resource.getrlimit(resource.RLIMIT_NOFILE)
if ((soft_lim != resource.RLIM_INFINITY) and
((soft_lim < hard_lim) or (hard_lim == resource.RLIM_INFINITY))):
resource.setrlimit(resource.RLIMIT_NOFILE, (hard_lim, hard_lim))
print("Max open files count set from %u to %u" % (soft_lim, hard_lim))
# resolve domains
sems = collections.defaultdict(lambda: asyncio.BoundedSemaphore(MAX_CONCURRENT_REQUESTS_PER_DNS_SERVER))
dns_check_futures = []
tcp_check_domain_ips = {}
with tqdm.tqdm(total=len(domains),
miniters=1,
smoothing=0,
desc="Domains checks",
unit=" domains") as progress:
for domain in domains:
coroutine = dns_resolve_domain(domain, progress, sems)
future = asyncio.ensure_future(coroutine)
dns_check_futures.append(future)
asyncio.get_event_loop().run_until_complete(asyncio.gather(*dns_check_futures))
for domain, future in zip(domains, dns_check_futures):
ips = future.result()
if not any(ips):
# all dns resolutions failed for this domain
dead_domains.add(domain)
elif not all(ips):
# at least one dns resolution failed, but at least one succeeded for this domain
tcp_check_domain_ips[domain] = ips
# for domains with at least one failed DNS resolution, check open ports
tcp_check_futures = collections.defaultdict(list)
with tqdm.tqdm(total=len(tcp_check_domain_ips) * len(WEB_PORTS),
miniters=1,
desc="TCP domain checks",
unit=" domains",
leave=True) as progress:
for domain, ips in tcp_check_domain_ips.items():
ip = next(filter(None, ips)) # take result of first successful resolution
for port in WEB_PORTS:
coroutine = has_tcp_port_open(ip, port, progress)
future = asyncio.ensure_future(coroutine)
tcp_check_futures[domain].append(future)
asyncio.get_event_loop().run_until_complete(asyncio.gather(*itertools.chain.from_iterable(tcp_check_futures.values())))
for domain, futures in tcp_check_futures.items():
status = tuple(future.result() for future in futures)
if not any(status):
# no web port open for this domain
dead_domains.add(domain)
# write new file
with open(args.list_file, "wt") as list_file:
for domain in domains:
if domain not in dead_domains:
list_file.write("%s\n" % (domain))
print("\n%u dead domain(s) removed" % (len(dead_domains)))
``` |
{
"source": "Jhengsh/tidyframe",
"score": 3
} |
#### File: tidyframe/tests/test_apply_cum.py
```python
import numpy as np
import pandas as pd
from tidyframe import apply_cum
series = np.random.randint(1, 6, 10)
cum_func = lambda x, y: x * y
judge_func = lambda x: x > 10
series = range(3, 8)
def test_apply_cum_basic():
apply_cum(series, cum_func, init_value=1)
def test_apply_cum_judge_func():
df = apply_cum(pd.Series(series),
cum_func,
judge_func=lambda x: x > 30,
init_value=1)
assert df['index_first'][0] and df['index_first'][
3], 'index_first is not work'
assert df['index_last'][2], 'index_last is not work'
def test_apply_cum_judge_func_2():
series = [10, 2, 3, 6, 3]
df = apply_cum(series, judge_func=lambda x: x > 9)
assert df['index_first'][0] and df['index_last'][
0], 'first value of index_first and index_last is not True'
```
#### File: tidyframe/tests/test_coalesce.py
```python
import pandas as pd
from tidyframe import coalesce
def test_coalesce_basic():
df = pd.DataFrame()
df['a'] = [None, pd.np.NaN, pd.np.nan, pd.np.nan]
df['b'] = [None, 4, 6, pd.np.nan]
df['c'] = [None, pd.np.NaN, 6, pd.np.nan]
coalesce(df, ['a', 'b', 'c'], default_value=10)
```
#### File: tidyframe/tests/test_combination.py
```python
import pandas as pd
from tidyframe import combination
df_a = pd.DataFrame({'a1': list('ABC'), 'a2': list('CDE')})
df_b = pd.DataFrame({'b1': list('01234'), 'b2': list('56789')})
df_c = pd.DataFrame({'c1': list('pq'), 'c2': list('rs')})
df_d = pd.DataFrame({'d1': list('abcd'), 'd2': list('efgh')})
df_d.index.name = 'index'
def test_combination_basic():
combination([df_a, df_b, df_c])
def test_combination_basic_2():
combination([df_a, df_b, df_d])
```
#### File: tidyframe/tests/test_fillna.py
```python
import pandas as pd
from tidyframe import fillna
def test_fillna_basic():
result = fillna([None] * 3, [1, pd.np.NaN, None], [1, 2, 3])
for x, y in zip(result, [1, 2, 3]):
assert x == y, "fillna result not equal"
```
#### File: tidyframe/tests/test_fit_dataframe_to_table_schema.py
```python
import pandas as pd
from sqlalchemy import create_engine, Table, MetaData
from sqlalchemy.types import NCHAR
from datetime import datetime
from tidyframe import fit_table_schema_type
engine = create_engine('sqlite:///testing_fit_table_schema_type.db')
df = pd.DataFrame()
df['a'] = list('abc')
df['b'] = 1
df['c'] = 1.3
df['d'] = [pd.np.nan, 10, 1.4]
df['e'] = ['adev', pd.NaT, '今天天氣']
df['f'] = [datetime.now(), None, datetime.now()]
df['g'] = [True, False, True]
df['h'] = 2147483647 * 2
df['i'] = [pd.np.nan, pd.np.nan, pd.np.nan]
df.to_sql('test_fit_dataframe', engine, index=False, dtype={'i': NCHAR(10)})
table = Table('test_fit_dataframe', MetaData(bind=engine), autoload=True)
def test_fit_table_schema_type_basic():
df = pd.DataFrame()
df['a'] = list('abc')
df['b'] = 1
df['c'] = 1.3
df['d'] = [pd.np.nan, 10, 1.4]
df['e'] = ['adev', pd.NaT, '今天天氣']
df['f'] = pd.to_datetime(
['2018-03-09 22:29:00+08:00', '2018-03-09 22:29:00+08:00', None])
df['g'] = [True, False, True]
df['h'] = 2147483647 * 2
df['i'] = [pd.np.nan, pd.np.nan, pd.np.nan]
fit_table_schema_type(df, table)
df.to_sql('test_fit_dataframe', engine, index=False, if_exists='append')
def test_fit_table_schema_type_null():
df = pd.DataFrame()
df['a'] = list('abc')
df['b'] = 1
df['c'] = 1.3
df['d'] = [pd.np.nan, 10, 1.4]
df['e'] = ['adev', pd.NaT, '今天天氣']
df['f'] = [None, None, None]
df['g'] = [True, False, True]
df['h'] = 2147483647 * 2
df['i'] = [pd.np.nan, pd.np.nan, pd.np.nan]
fit_table_schema_type(df, table)
df.to_sql('test_fit_dataframe', engine, index=False, if_exists='append')
```
#### File: tidyframe/tests/test_gather.py
```python
import pandas as pd
from sklearn import datasets
from tidyframe import gather
iris = datasets.load_iris()
df = pd.DataFrame(iris['data'], columns=iris.feature_names)
df['target'] = iris.target
df['target2'] = list(map(lambda x: 1 if x < 1 else 0, df.target))
col_gather = df.columns[:4]
df_short = df.head()
def test_gather_basic():
gather(df_short[col_gather].reset_index().head(8), col_gather).head()
def test_gather_without_index():
tmp = gather(df_short[col_gather])
assert tmp.columns[0] == 'index'
def test_gather_assign_col():
gather(df_short, col_gather)
def test_gather_str_key():
gather(df_short, 'target')
def test_gather_with_index_name():
df_short2 = df_short[col_gather]
df_short2.index.name = 'index_with_name'
assert gather(df_short2, col_gather).columns[0] == df_short2.index.name
```
#### File: tidyframe/tests/test_Possibly.py
```python
import math
import numpy as np
from tidyframe import Possibly
@Possibly()
def log_possibly(x):
return math.log(x)
def test_Possibly_basic_success():
assert np.isclose(log_possibly(10), math.log(10)), 'Must result is True'
def test_pPossibly_basic_fail():
assert np.isnan(log_possibly(-10)), 'Must result is True'
def test_Possibly_change_otherwise():
@Possibly(otherwise=-1)
def log_possibly(x):
return math.log(x)
assert np.isclose(log_possibly(-10), -1), 'Must result is True'
def test_Possibly_classmethod_basic_success():
assert np.isclose(Possibly.possibly(math.log)(10),
math.log(10)), 'Must result is True'
def test_Possibly_classmethod_basic_fail():
assert np.isnan(Possibly.possibly(math.log)(-1)), 'Must result is True'
def test_Possibly_classmethod_change_default():
Possibly.otherwise_all = 1
Possibly.quiet_all = False
assert np.isclose(Possibly.possibly(math.log)(-1),
1), 'Must result is True'
def test_Possibly_print_exception():
@Possibly(otherwise=-1, quiet=False)
def log_possibly(x):
return math.log(x)
log_possibly(-10)
```
#### File: tidyframe/tests/test_Safely.py
```python
import math
import numpy as np
from tidyframe import Safely
@Safely()
def log_safely(x):
return math.log(x)
def test_Safely_basic_success():
result_log = log_safely(10)
assert np.isclose(result_log['result'],
math.log(10)), 'Must result be True'
assert result_log['error'] is None, 'Must result be None'
def test_Safely_basic_faie():
result_log2 = log_safely(-10)
assert np.isnan(result_log2['result']), 'Must result is True'
assert result_log2['error'] is not None, 'Must result is True'
def test_Safely_classmethod_success():
result_log3 = Safely.safely(math.log)(10)
assert np.isclose(result_log3['result'],
math.log(10)), 'Must result is True'
assert result_log3['error'] is None, 'Must result is True'
def test_Safe_classmethod_fail():
result_log4 = Safely.safely(math.log)(-1)
assert np.isnan(result_log4['result']), 'Must result is True'
assert result_log4['error'] is not None, 'Must result is True'
def test_Safely_classmethod_change_default():
Safely.otherwise_all = -1
Safely.quiet_all = False
result_log5 = Safely.safely(math.log)(-1)
assert np.isclose(result_log5['result'], -1), 'Must result is True'
assert result_log5['error'] is not None, 'Must result is True'
def test_Safely_print_exception():
@Safely(otherwise=-1, quiet=False)
def log_safely(x):
return math.log(x)
log_safely(-10)
```
#### File: tidyframe/tests/test_separate.py
```python
import pandas as pd
import numpy as np
from tidyframe import separate
df = pd.DataFrame({'full_string': ['a b c d e z', 'f g h i']},
index=['row_1', 'row_2'])
series = df.full_string.str.split(' ')
def test_separate_basic():
separate(series)
def test_separate_index():
separate(series, index=[0, 4])
def test_separate_using_otherwise():
separate(series, index=[0, 4], otherwise='otherwise')
def test_separate_change_column_name():
separate(series, index=[0, 3], columns=['zero', 'three'])
def test_separate_list_to_dataframe():
separate([list('abc'), list('def')])
```
#### File: tidyframe/tests/test_strip_whitespace_include_newline.py
```python
from tidyframe.tools.string import whitespace, strip_whitespace_include_newline
def test_strip_whitespace_include_newline_base():
assert strip_whitespace_include_newline(' \n'.join(whitespace)) == ''
```
#### File: tidyframe/tests/test_try_expect_raw.py
```python
import pandas as pd
from tidyframe import try_expect_raw
def test_try_expect_raw_basic():
my_sum = try_expect_raw(lambda x, y: x + y)
assert my_sum(1, y='a') == 1
```
#### File: tidyframe/tools/database.py
```python
import concurrent.futures
from copy import deepcopy
from datetime import datetime
import pandas as pd
from sqlalchemy import (MetaData, Table, Column, BigInteger, Integer, Float,
NVARCHAR, CHAR, DATETIME, BOOLEAN)
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.schema import CreateTable
from funcy import chunks
def create_table(
df,
name,
con,
primary_key=[],
nvarchar_columns=[],
non_nullable_columns=[],
dtype=None,
create=True,
all_nvarchar=False,
base_char_type=CHAR(),
base_nchar_type=NVARCHAR(),
base_int_type=Integer(),
base_bigint_type=BigInteger(),
base_float_type=Float(),
base_boolean_type=BOOLEAN(),
):
"""
Create sqlalchemy Table object for create table in database
Parameters
----------
df : Pandas DataFrame
con : sqlalchemy.engine.Engine or sqlite3.Connection
name : string, name of SQL table
primary_key : list, primary key columns
nvarchar_columns : list, nvarchar columns
non_nullable_columns : list, non-nullable columns
dtype: dict, optional, specifying the datatype for columns. The keys should be the column names and the values should be the SQLAlchemy types or strings for the sqlite3 legacy mode.
all_nvarchar : Bool, all string column use NVARCHAR or not
create : Bool(default: False), direct create table in database
Returns
-------
sqlalchemy Table object or True
Example
-------
>>> import pandas as pd
>>> from sqlalchemy import create_engine
>>> from datetime import datetime
>>> from tidyframe import create_table
>>>
>>> engine = create_engine('sqlite:///test_for_create_table.db')
>>> df = pd.DataFrame()
>>> df['a'] = list('abc')
>>> df['b'] = 1
>>> df['c'] = 1.3
>>> df['d'] = [pd.np.nan, 10, 1.4]
>>> df['e'] = ['adev', pd.NaT, '今天天氣']
>>> df['f'] = [datetime.now(), None, datetime.now()]
>>> df['g'] = [True, False, True]
>>> df['h'] = 2147483647 * 2
>>> create_table(df,
... 'test_table',
... engine,
... primary_key=['a'],
... nvarchar_columns=['e'],
... non_nullable_columns=['d'],
... create=False)
Table('test_table', MetaData(bind=Engine(sqlite:///test_for_create_table.db)), Column('a', CHAR(length=1), table=<test_table>, primary_key=True, nullable=False), Column('b', Integer(), table=<test_table>), Column('c', Float(), table=<test_table>), Column('d', Float(), table=<test_table>, nullable=False), Column('e', NVARCHAR(length=8), table=<test_table>), Column('f', DATETIME(), table=<test_table>), Column('g', BOOLEAN(), table=<test_table>), Column('h', Integer(), table=<test_table>), schema=None)
>>>
>>> create_table(df,
... 'test_table_create',
... engine,
... primary_key=['a'],
... nvarchar_columns=['e'],
... non_nullable_columns=['d'],
... create=True)
True
"""
meta = MetaData(bind=con)
column_list = []
int_info = pd.np.iinfo(pd.np.int32)
for x in df:
if x in primary_key:
is_primary_key = True
nullable = False
else:
is_primary_key = False
if x in non_nullable_columns:
nullable = False
else:
nullable = True
try:
if dtype is not None and x in dtype:
each_column = Column(x,
dtype[x],
primary_key=is_primary_key,
nullable=nullable)
elif df[x].dtype.char == 'O':
length = df[x].fillna('').apply(lambda x: len(str(x))).max()
if x in nvarchar_columns or all_nvarchar:
nchar_type = deepcopy(base_nchar_type)
nchar_type.length = length * 2
each_column = Column(x,
nchar_type,
primary_key=is_primary_key,
nullable=nullable)
else:
char_type = deepcopy(base_char_type)
char_type.length = length
each_column = Column(x,
char_type,
primary_key=is_primary_key,
nullable=nullable)
elif df[x].dtype.char == 'M':
each_column = Column(x,
DATETIME(),
primary_key=is_primary_key,
nullable=nullable)
elif df[x].dtype.char == 'l':
max_column_value = df[x].max()
min_column_value = df[x].min()
if pd.notna(max_column_value) and pd.notna(
min_column_value
) and min_column_value <= int_info.min and max_column_value >= int_info.max:
each_column = Column(x,
base_bigint_type,
primary_key=is_primary_key,
nullable=nullable)
else:
each_column = Column(x,
base_int_type,
primary_key=is_primary_key,
nullable=nullable)
elif df[x].dtype.char == 'd':
each_column = Column(x, base_float_type, nullable=nullable)
elif df[x].dtype.str == '|b1':
each_column = Column(x,
base_boolean_type,
primary_key=is_primary_key,
nullable=nullable)
else:
each_column = Column(x,
NVARCHAR(255),
primary_key=is_primary_key,
nullable=nullable)
except Exception as e:
raise Exception('Column {}: {}'.format(x, str(e)))
column_list.append(each_column)
if create:
Table(name, meta, *column_list, extend_existing=True).create()
return True
else:
return Table(name, meta, *column_list, extend_existing=True)
def copy_table_schema(source_table,
target_table,
source_con,
target_con,
omit_collation=False,
create=True,
add_columns=[]):
"""
Copy table schema from database to another database
Parameters
----------
source_table : source table name in database
target_table : target table name
source_con : sqlalchemy.engine.Engine or sqlite3.Connection, source engine
target_con : sqlalchemy.engine.Engine or sqlite3.Connection, target engine
omit_collation : Bool(default: False), omit all char collation
create : Bool(default: True), direct create table in database
add_columns : list of column object
Returns
-------
sqlalchemy Table object or True
Examples
--------
>>> import pandas as pd
>>> from sqlalchemy import (create_engine, VARCHAR, Column, DateTime)
>>> from datetime import datetime
>>> from tidyframe import copy_table_schema
>>>
>>> engine = create_engine('sqlite:///source.db')
>>> engine_target = create_engine('sqlite:///target.db')
>>> df = pd.DataFrame()
>>> df['a'] = list('abc')
>>> df['b'] = 1
>>> df['c'] = 1.3
>>> df['d'] = [pd.np.nan, 10, 1.4]
>>> df['e'] = ['adev', pd.NaT, '今天天氣']
>>> df['f'] = [datetime.now(), None, datetime.now()]
>>> df['g'] = [True, False, True]
>>> df.shape
(3, 7)
>>> df.to_sql('raw_table', engine, index=False)
>>> copy_table_schema('raw_table',
... 'target_table',
... source_con=engine,
... target_con=engine_target,
... add_columns=[Column('last_maintain_date', DateTime())],
... omit_collation=True,
... create=True)
True
>>> pd.read_sql_table('target_table', engine_target).shape
(0, 8)
"""
meta_source = MetaData(bind=source_con)
meta_target = MetaData(bind=target_con)
table_object_source = Table(source_table, meta_source, autoload=True)
columns = [{'name': x.name, 'type': x.type} for x in table_object_source.c]
if omit_collation:
for x in columns:
try:
x['type'].collation = None
except:
pass
columns = [Column(x['name'], x['type']) for x in columns]
if add_columns:
columns.extend(add_columns)
table_object_target = Table(target_table,
meta_target,
*columns,
extend_existing=True)
if create:
table_object_target.create()
return True
else:
return table_object_target
def fit_table_schema_type(df, table):
"""
Fit DataFrame to table schema type, let you can use DataFrame.to_sql directly if table is exist.
Limit: Not tranform column dtype if python_type is str and column dtype is object
Parameters
----------
df : Pandas DataFrame
table : Table object
Returns
-------
None
"""
try:
for x in table.columns:
if (x.type.python_type == float and df[x.name].dtype == 'float64'
) or (x.type.python_type == int and df[x.name].dtype == 'int64'
) or (x.type.python_type == int
and df[x.name].dtype == 'int32') or (
x.type.python_type == bool
and df[x.name].dtype == 'bool') or (
x.type.python_type == datetime
and df[x.name].dtype == 'datetime64[ns]'):
pass
elif x.type.python_type == str:
df[x.name] = [
None if not isinstance(x, list) and pd.isna(x) else str(x)
for x in df[x.name]
]
elif x.type.python_type == float and df[
x.name].dtype != 'float64' and df[
x.name].dtype != 'float32':
df[x.name] = df[x.name].astype(float)
elif x.type.python_type == int and df[
x.name].dtype != 'int64' and df[x.name].dtype != 'int32':
df[x.name] = df[x.name].astype(int)
elif x.type.python_type == bool and df[x.name].dtype != 'bool':
df[x.name] = df[x.name].astype(bool)
elif x.type.python_type == datetime and df[
x.name].dtype != 'datetime64[ns]':
df[x.name] = pd.DatetimeIndex(df[x.name]).tz_localize(None)
else:
raise Exception(
'Column {} not deal with python_type {} and dtype {}'.
format(x.name, str(x.type.python_type), df[x.name].dtype))
return None
except Exception as e:
raise Exception('fit Column {} error: {}'.format(x.name, str(e)))
def load_table_schema(name, con):
"""
load table schema from database
Parameters
----------
name : string, name of SQL table
con : sqlalchemy.engine.Engine or sqlite3.Connection
Returns
-------
sqlalchemy Table object
Example
-------
>>> import pandas as pd
>>> from sqlalchemy import (create_engine, Table, MetaData)
>>> from tidyframe import (load_table_schema, create_table)
>>>
>>> engine = create_engine('sqlite:///load_table_schema.db')
>>> num_row = 100000
>>> df = pd.DataFrame()
>>> df['a'] = ['a'] * num_row
>>> df['b'] = ['b'] * num_row
>>> df['c'] = ['c'] * num_row
>>> create_table(df, 'test_table', engine, create=True)
True
>>> records = df.to_dict('record')
>>> table_b = load_table_schema('test_table', engine)
>>> table_b
Table('test_table', MetaData(bind=Engine(sqlite:///load_table_schema.db)), Column('a', CHAR(length=1), table=<test_table>), Column('b', CHAR(length=1), table=<test_table>), Column('c', CHAR(length=1), table=<test_table>), schema=None
"""
meta = MetaData(bind=con)
return Table(name, meta, autoload=True)
def drop_table(name, con):
"""
drop table from database
Parameters
----------
name : string, name of SQL table
con : sqlalchemy.engine.Engine or sqlite3.Connection
Returns
-------
True
Examples
--------
>>> import pandas as pd
>>> from sqlalchemy import create_engine
>>> from tidyframe import drop_table
>>>
>>> engine = create_engine("sqlite:///raw_table.db")
>>> df = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2}])
>>> df.to_sql("raw_table", engine)
>>> drop_table("raw_table", engine)
True
"""
table = load_table_schema(name, con)
table.drop()
return True
def get_create_table_script(table):
"""
get create table script
Parameters
----------
table : sqlalchemy Table object
Returns
-------
string which sqlalchemy create for create table
Examples
--------
>>> import pandas as pd
>>> from sqlalchemy import create_engine
>>> from tidyframe import create_table, get_create_table_script
>>>
>>> engine = create_engine('sqlite:///testing_get_create_table_script.db')
>>> df = pd.DataFrame()
>>> df['a'] = list('abc')
>>> df['b'] = 1
>>> df['c'] = 1.3
>>> table = create_table(df,
... 'test_table',
... engine,
... primary_key=['a'],
... nvarchar_columns=['e'],
... non_nullable_columns=['d'],
... create=False)
>>> create_table_script = get_create_table_script(table)
"""
return CreateTable(table).compile().string
def _insert_chunk_records(records, table, con):
with con.connect() as connection:
with connection.begin() as transaction:
try:
connection.execute(table.insert(), records)
except:
transaction.rollback()
return False
else:
transaction.commit()
return True
def _insert_chunk_records_for_thread(records, table, con):
Session = scoped_session(sessionmaker(bind=con))
session = Session()
try:
session.execute(table.insert(), records)
except Exception as e:
session.rollback()
Session.remove()
return records
else:
session.commit()
Session.remove()
return []
def bulk_insert(records,
table,
con,
batch_size=10000,
pool_size=1,
only_insert_fail=False):
"""
bulk insert records(list dict)
Parameters
----------
records : list of dict
table : sqlalchemy Table object(you can get from function load_table_schema)
con : sqlalchemy.engine.Engine or sqlite3.Connection
batch_size : batch size for bluk insert
pool_size : Int(default: 1), number of threads for insert records
only_insert_fail : Bool(default: False), only return record wihich insert fail
Returns
-------
list of record which insert fail in batch records or list of record which fail to insert database
Examples
--------
>>> import pandas as pd
>>> from sqlalchemy import create_engine
>>> from tidyframe import (create_table, load_table_schema, bulk_insert)
>>>
>>> engine = create_engine("mysql://root:[email protected]/test_db")
>>> df = pd.DataFrame()
>>> df["a"] = ["a"] * 10000
>>> df["b"] = [1] * 10000
>>> df["c"] = [1.3] * 10000
>>>
>>> create_table(df, "want_insert_table", engine, create=True)
True
>>> table = load_table_schema("want_insert_table", engine)
>>>
>>> df.iloc[0,0]= "abc"
>>> df.iloc[-1,0]= "abc"
>>>
>>> insert_fail_records = bulk_insert(df.to_dict("record"),
... table,
... engine,
... batch_size=100)
>>> len(insert_fail_records)
200
>>>
>>> insert_fail_records = bulk_insert(df.to_dict("record"),
... table,
... engine,
... batch_size=100,
... only_insert_fail=True)
>>> len(insert_fail_records)
2
"""
return_batch_error_record = []
if pool_size == 1:
list_error_batch = []
for each_batch_record in chunks(batch_size, records):
if not _insert_chunk_records(each_batch_record, table, con):
list_error_batch.append(each_batch_record)
for x in list_error_batch:
return_batch_error_record.extend(x)
else:
with concurrent.futures.ThreadPoolExecutor(
max_workers=pool_size) as executor:
sync_job = [
executor.submit(_insert_chunk_records_for_thread, i, table,
con) for i in chunks(batch_size, records)
]
for future in concurrent.futures.as_completed(sync_job):
return_batch_error_record.extend(future.result())
if not only_insert_fail:
return return_batch_error_record
else:
while (batch_size > 10):
batch_size = int(batch_size / 2)
list_error = bulk_insert(return_batch_error_record,
table,
con,
batch_size=batch_size)
return_list_insert_fail_record = []
for record in list_error:
insert_status = _insert_chunk_records(record, table, con)
if not insert_status:
return_list_insert_fail_record.append(record)
return return_list_insert_fail_record
```
#### File: tidyframe/tools/select.py
```python
import re
import numpy as np
from copy import copy, deepcopy
from funcy import chunks
def select(df,
columns=None,
columns_minus=None,
columns_between=None,
pattern=None,
copy=False):
"""
Select Pandas DataFrame Columns
Parameters
----------
df : Pandas DataFrame
columns_minus : column which want to remove
columns_between: list with two element, select columns bwtween two columns
pattern: regular expression or list of regular expression, return match columns
copy : whether return deep copy DataFrame
Returns
-------
Pandas DataFrame
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from tidyframe import select
>>> df = pd.DataFrame(np.array(range(10)).reshape(2, 5),
... columns=list('abcde'),
... index=['row_1', 'row_2'])
>>> select(df, columns=['b', 'd'])
b d
row_1 1 3
row_2 6 8
>>> select(df, columns_minus=['b', 'd'])
a c e
row_1 0 2 4
row_2 5 7 9
>>> select(df, pattern='[a|b]')
a b
row_1 0 1
row_2 5 6
"""
if columns:
df_return = df[columns]
if columns_minus:
raw_col = {value: i for i, value in enumerate(df.columns)}
for pop_key in columns_minus:
raw_col.pop(pop_key)
df_return = df[list(raw_col.keys())]
if columns_between:
columns_location = {column: i for i, column in enumerate(df.columns)}
assert columns_location[columns_between[0]] < columns_location[
columns_between[
1]], 'first column location must less than second column location'
df_return = df.iloc[:,
range(columns_location[columns_between[0]],
columns_location[columns_between[1]] + 1)]
if pattern and isinstance(pattern, str):
columns_want = list(filter(lambda x: re.search(pattern, x),
df.columns))
df_return = df[columns_want]
if pattern and isinstance(pattern, list):
columns_want = []
for each_pattern in pattern:
columns_want.extend(
list(filter(lambda x: re.search(each_pattern, x), df.columns)))
columns_want = list(set(columns_want))
columns_want.sort()
df_return = df[columns_want]
if copy:
return deepcopy(df_return)
else:
return df_return
def reorder_columns(df, columns=None, pattern=None, last_columns=None):
"""
reorder columns of pandas DataFrame
Parameters
----------
df : Pandas DataFrame
columns : list which want to head column name(non-use if pattern is not None)
pattern : regular expression pattern which let selected columns be at head columns
last_columns : list which want to last column name
Returns
-------
Pandas DataFrame
Examples
--------
>>> import pandas as pd
>>> from tidyframe import reorder_columns
>>> df = pd.DataFrame([{'a': 1, 'b': 1, 'c': 1, 'd': 1, 'e': 2}])
>>> df_reorder = reorder_columns(df, ['b', 'c'], last_columns=['a', 'd'])
>>> df_reorder
b c e a d
0 1 1 2 1 1
"""
if pattern:
reorder_columns = list(
filter(lambda x: re.search(pattern, x), df.columns))
else:
reorder_columns = copy(list(columns))
reorder_columns = [x for x in columns if df.columns.contains(x)]
raw_columns = df.columns.copy()
if last_columns:
center_columns = raw_columns.difference(reorder_columns).difference(
last_columns).tolist()
else:
center_columns = raw_columns.difference(reorder_columns).tolist()
reorder_columns.extend(center_columns)
if last_columns:
reorder_columns.extend(last_columns)
return df[reorder_columns]
def get_batch_dataframe(df, batch_size=100):
"""
split DataFrame to sub-DataDrame and each sub-DataDrame row size is batch_size
Parameters
----------
df : Pandas DataFrame
batch_size : number of records in each sub-dataframe(default: 100)
Returns
-------
DataFrame generator
Examples
--------
>>> import pandas as pd
>>> from tidyframe import get_batch_dataframe
>>> df = pd.DataFrame()
>>> df['col_1'] = list("abcde")
>>> df['col_2'] = [1, 2, 3, 4, 5]
>>> dfs = [ x for x in get_batch_dataframe(df,2)]
>>> dfs[-1]
col_1 col_2
4 e 5
>>> [ x.shape[0] for x in dfs]
[2, 2, 1]
"""
for min_batch in chunks(batch_size, range(df.shape[0])):
yield df.iloc[min_batch, :]
def select_index(x, i, otherwise=np.NaN):
"""
Select by index and Catch all Exception
Parameters
----------
x : array
i : index
otherwise : fill value if exist exception
Returns
-------
x[i] if not exception happen else return otherwise
"""
try:
return x[i]
except:
return otherwise
```
#### File: tidyframe/tidyframe/transform.py
```python
import copy as cp
from functools import partial
import pandas as pd
import numpy as np
def nest(df,
columns=[],
columns_minus=[],
columns_between=[],
key='data',
copy=False):
"""
Nest repeated values
Parameters
----------
df: DataFrameGroupBy or DataFrame
columns: list or index, nest columns
columns_minus: list or index, columns which do not want to nest
(must choose one of columns and columns_minus)
columns_between: list with length 2, assigin nest columns between to two columns
copy: False, return DataFrame using copy.deepcopy
"""
assert isinstance(df,
(pd.core.frame.DataFrame,
pd.core.groupby.DataFrameGroupBy)), "Must be DataFrame"
if len(columns) > 0:
assert len(columns_minus) == 0 and len(
columns_between
) == 0, "Using Parameter columns then columns_minus and column_between must not use"
if len(columns_minus) > 0:
assert len(columns) == 0 and len(
columns_between
) == 0, "Using Parameter columns_minus then columns and columns_between must not use"
if len(columns_between) > 0:
assert len(columns_between) == 2, "lenth of columns_between must be 2"
assert len(columns) == 0 and len(
columns_minus
) == 0, "Using Parameter columns_between then columns_minus and between must not use"
if isinstance(df, pd.core.frame.DataFrame):
if len(columns) > 0:
if isinstance(columns, pd.core.indexes.base.Index):
columns_nest = columns.tolist()
else:
columns_nest = columns
columns_group = df.columns.difference(columns_nest).tolist()
df_g = df.groupby(columns_group)
data = [[index, group[columns_nest]] for index, group in df_g]
elif len(columns_minus) > 0:
if isinstance(columns_minus, pd.core.indexes.base.Index):
columns_group = columns_minus.tolist()
else:
columns_group = columns_minus
columns_nest = df.columns.difference(columns_group).tolist()
df_g = df.groupby(columns_group)
data = [[index, group[columns_nest]] for index, group in df_g]
else:
index_start = np.where(df.columns == columns_between[0])[0][0]
index_end = np.where(df.columns == columns_between[1])[0][0]
assert index_start < index_end, "columns_between order error"
columns_nest = df.columns[index_start:(index_end + 1)].tolist()
columns_group = df.columns.difference(columns_nest).tolist()
df_g = df.groupby(columns_group)
data = [[index, group[columns_nest]] for index, group in df_g]
else:
columns_group = list(df.dtypes.index.names)
columns_nest = list(df.dtypes.columns)
data = [[index, group[columns_nest]] for index, group in df]
outer = list(map(lambda x: x[0], data))
df_return = pd.DataFrame(outer, columns=columns_group)
df_return[key] = list(map(lambda x: x[1][columns_nest], data))
if copy:
return cp.deepcopy(df_return)
else:
return df_return
def unnest(df, drop=[], copy=False):
"""
Inverse Nest DataFrame
Parameters
----------
df: DataFrame with Series of Dataframe
drop: list of column which do not return
"""
df_check = df.applymap(lambda x: isinstance(x, pd.DataFrame))
columns_nest = df_check.columns[df_check.sum() ==
df_check.shape[0]].tolist()
if len(columns_nest) > 0:
if len(columns_nest) == 1:
repeat_times = list(map(lambda x: x.shape[0], df[columns_nest[0]]))
columns_group = df_check.columns.difference(columns_nest)
df_return = pd.DataFrame(df[columns_group].values.repeat(
repeat_times, axis=0),
columns=columns_group)
df_return = pd.concat([
df_return.reset_index(drop=True),
pd.concat([*df[columns_nest[0]].tolist()
]).reset_index(drop=True)
],
axis=1)
if copy:
return cp.deepcopy(
df_return[df_return.columns.difference(drop)])
else:
return df_return[df_return.columns.difference(drop)]
else:
dict_col = {v: k + 1 for k, v in enumerate(df.columns)}
columns_value = df.columns.difference(columns_nest).tolist()
list_df_tmp = []
for x in df.itertuples():
df_tmp = pd.concat([x[dict_col[col]] for col in columns_nest],
axis=1)
for col in columns_value:
df_tmp[col] = x[dict_col[col]]
list_df_tmp.append(df_tmp)
df_return = pd.concat(list_df_tmp)
return df_return[pd.Index(columns_value).append(
df_return.columns.difference(columns_value))]
else:
column_series = df.columns[df.applymap(lambda x: isinstance(
x, (pd.Series, np.ndarray, list))).sum() > 0].tolist()
assert len(column_series) == 1, "Must exist one list of list Series"
repeat_times = df[column_series[0]].map(len)
columns_group = df.columns.difference(column_series)
df_return = pd.DataFrame(df[columns_group].values.repeat(repeat_times,
axis=0),
columns=columns_group)
df_series = pd.concat(
[*df[column_series[0]].map(lambda x: pd.DataFrame(x))], axis=0)
df_return[column_series[0]] = df_series[0].tolist()
if copy:
return cp.deepcopy(df_return[df_return.columns.difference(drop)])
else:
return df_return[df_return.columns.difference(drop)]
def apply_window(df, func, partition=None, columns=None):
""" apply window function in DataFrame
Parameters
----------
df: DataFrameGroupBy or DataFrame
func: list of function
partition: list of partition columns
columns: list of columns which need to apply func
Returns
-------
Pandas Series
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tidyframe import apply_window
>>>
>>> iris = datasets.load_iris()
>>> df = pd.DataFrame({"range":[1,2,3,4,5,6],"target":[1,1,1,2,2,2]})
>>> apply_window(df, np.mean, partition=['target'], columns=df.columns[1])
0 1
1 1
2 1
3 2
4 2
5 2
Name: target, dtype: int64
"""
if isinstance(df, pd.core.groupby.DataFrameGroupBy):
df_g = df
else:
df_g = df.groupby(partition)
if columns is not None:
if callable(func):
if isinstance(columns, str):
return df_g[columns].transform(func)
elif isinstance(columns, (list, pd.core.indexes.base.Index)):
df_return = df_g[columns].transform(func)
df_return.columns = df_return.columns + "_" + func.__name__
return df_return
if isinstance(func, list):
list_df = []
df_return = pd.DataFrame()
for v in func:
if isinstance(columns, str):
return df_g[columns].transform(v)
df_tmp = df_g[columns].transform(v)
df_tmp.columns = df_tmp.columns + "_" + v.__name__
list_df.append(df_tmp)
return pd.concat(list_df, axis=1)
if isinstance(func, dict):
df_return = pd.DataFrame()
for (k, v) in func.items():
if isinstance(v, list):
for each_fun in v:
df_return[k + '_' +
each_fun.__name__] = df_g[k].transform(each_fun)
else:
df_return[k + '_' + v.__name__] = df_g[k].transform(v)
if isinstance(df, pd.core.frame.DataFrame):
df_return.index = df.index
return df_return
def _series_to_dict(x, index_name='index'):
"""
Change Pandas Series to Dict With Index
Parameters
----------
x : pandas Series
index_name : return dict key of index name
"""
x_dict = x.to_dict()
x_dict[index_name] = x.name
return x_dict
def to_dataframe(data, index_name='index'):
"""
Change list of Pandas Serice to Pandas DataFrame
Parameters
----------
data : list of pandas Series
index_name : return index DataFrame column name
Returns
-------
Examples
--------
>>> import pandas as pd
>>> from tidyframe import to_dataframe
>>> list_series = [
... pd.Series([1, 2], index=['i_1', 'i_2']),
... pd.Series([3, 4], index=['i_1', 'i_2'])
... ]
>>> to_dataframe(list_series)
i_1 i_2 index
0 1 2 None
1 3 4 None
"""
p_series_to_dict = partial(_series_to_dict, index_name=index_name)
return pd.DataFrame(list(map(p_series_to_dict, data)))
def rolling(list_object, window_size, missing=np.NaN):
"""
Rolling list of object
Parameters
----------
list_object : list of objects
window_size : rolling windows size
missing : default value if missing value in rolling window
Returns
-------
list of list
Examples
--------
>>> import pandas as pd
>>> from tidyframe import rolling
>>> a = list(range(10))
>>> pd.DataFrame({'a': a, 'b': rolling(a, 3)})
a b
0 0 [nan, nan, 0]
1 1 [nan, 0, 1]
2 2 [0, 1, 2]
3 3 [1, 2, 3]
4 4 [2, 3, 4]
5 5 [3, 4, 5]
6 6 [4, 5, 6]
7 7 [5, 6, 7]
8 8 [6, 7, 8]
9 9 [7, 8, 9]
"""
assert isinstance(list_object,
list), "type of list_object must be equal list"
assert window_size != 0, "window_size must be not equal zero"
list_return = []
if window_size > 0:
for i, x in enumerate(list_object):
if i < (window_size - 1):
ele_list = [missing] * window_size
ele_list[-1 * (i + 1):] = list_object[0:(i + 1)]
list_return.append(ele_list.copy())
else:
list_return.append(list_object[(i - window_size + 1):i + 1])
return list_return
else:
len_object = len(list_object)
len_end = 1
for i, x in enumerate(list_object):
if i > (len_object + window_size):
ele_list = list_object[i:len_object].copy()
ele_list.extend([missing] * (len_end))
list_return.append(ele_list.copy())
len_end = len_end + 1
else:
list_return.append(list_object[i:i - window_size])
return list_return
def add_columns(df, columns, default=None, deepcopy=False):
"""
Add column if column is not exist
Parameters
----------
df : pandas DataFrame
columns : list, add column names
default : list or a object(defalut: NaN)
deepcopy: bool, deepcopy df or not(default: True)
Returns
-------
pandas DataFrame
Examples
--------
>>> import pandas as pd
>>> from tidyframe import add_columns
>>> df = pd.DataFrame()
>>> df['a'] = [1, 6]
>>> df['b'] = [2, 7]
>>> df['c'] = [3, 8]
>>> df['d'] = [4, 9]
>>> df['e'] = [5, 10]
>>> add_columns(df, columns=['a', 'f'], default=[30, [10, 11]])
>>> df
a b c d e f
0 1 2 3 4 5 10
1 6 7 8 9 10 11
"""
if deepcopy:
df_cp = cp.deepcopy(df)
else:
df_cp = df
for i, x in enumerate(columns):
if x not in df.columns:
if isinstance(default, list):
df_cp[x] = default[i]
else:
df_cp[x] = default
if deepcopy:
return df_cp
else:
return None
``` |
{
"source": "jhenin/alchemlyb",
"score": 2
} |
#### File: tests/parsing/test_namd.py
```python
from os.path import basename
from re import search
import bz2
import pytest
from alchemlyb.parsing.namd import extract_u_nk
from alchemtest.namd import load_tyr2ala
from alchemtest.namd import load_idws
from alchemtest.namd import load_restarted
from alchemtest.namd import load_restarted_reversed
# Indices of lambda values in the following line in NAMD fepout files:
# #NEW FEP WINDOW: LAMBDA SET TO 0.6 LAMBDA2 0.7 LAMBDA_IDWS 0.5
LAMBDA1_IDX_NEW = 6
LAMBDA2_IDX_NEW = 8
LAMBDA_IDWS_IDX_NEW = 10
# Indices of lambda values in the following type of line in NAMD fepout files:
# #Free energy change for lambda window [ 0.6 0.7 ] is 0.12345 ; net change until now is 0.12345
LAMBDA1_IDX_FREE = 7
LAMBDA2_IDX_FREE = 8
@pytest.fixture(scope="module")
def dataset():
return load_tyr2ala()
@pytest.mark.parametrize("direction,shape",
[('forward', (21021, 21)),
('backward', (21021, 21)),
])
def test_u_nk(dataset, direction, shape):
"""Test that u_nk has the correct form when extracted from files.
"""
for filename in dataset['data'][direction]:
u_nk = extract_u_nk(filename, T=300)
assert u_nk.index.names == ['time', 'fep-lambda']
assert u_nk.shape == shape
def test_u_nk_idws():
"""Test that u_nk has the correct form when extracted from files.
"""
filenames = load_idws()['data']['forward']
u_nk = extract_u_nk(filenames, T=300)
assert u_nk.index.names == ['time', 'fep-lambda']
assert u_nk.shape == (29252, 11)
@pytest.fixture(params=[load_restarted, load_restarted_reversed])
def restarted_dataset(request):
return request.param()
def _corrupt_fepout(fepout_in, params, tmp_path):
"""Corrupts specific lines in a fepout file according to each line's prefix,
using caller-supplied functions.
Parameters
----------
fepout_in: str
Path to fepout file to be modified. This file will not be overwritten.
params: list of tuples
For each tuple, the first element must be a str that will be passed to
startswith() to identify the line(s) to modify (e.g. "#NEW"). The
second element must be a function that accepts a list of strs which is
the output of running split() on the identified line and returns
a modified list of tokens that will be reassembled into the line to be
output.
Returns
-------
The name of a temporary file which pytest will unlink.
"""
fepout_out = tmp_path / basename(fepout_in)
with bz2.open(fepout_out, 'wt') as f_out:
with bz2.open(fepout_in, 'rt') as f_in:
for line in f_in:
for prefix, func in params:
if line.startswith(prefix):
line = ' '.join(func(line.split())) + '\n'
f_out.write(line)
return str(fepout_out)
@pytest.fixture
def restarted_dataset_inconsistent(restarted_dataset, tmp_path):
"""Returns intentionally messed up dataset where lambda1 and lambda2 at start and end of
a window are different."""
filenames = sorted(restarted_dataset['data']['both'])
changed = False
def func_free_line(l):
nonlocal changed
if float(l[7]) >= 0.7 and float(l[7]) < 0.9:
l[7] = str(float(l[7]) + 0.0001)
changed = True
return l
for i in range(len(filenames)):
restarted_dataset['data']['both'][i] = \
_corrupt_fepout(filenames[i], [('#Free', func_free_line)], tmp_path)
# Only actually modify one window so we don't trigger the wrong exception
if changed is True:
break
return restarted_dataset
@pytest.fixture
def restarted_dataset_idws_without_lambda_idws(restarted_dataset, tmp_path):
"""Returns intentionally messed up dataset where the first window has IDWS data
but no lambda_idws.
"""
# First window won't have any IDWS data so we just drop all its files and fudge the lambdas
# in the next window to include 0.0 or 1.0 (as appropriate) so we still have a nominally complete calculation
filenames = [x for x in sorted(restarted_dataset['data']['both']) if search('000[a-z]?.fepout', x) is None]
def func_new_line(l):
if float(l[LAMBDA1_IDX_NEW]) > 0.5: # 1->0 (reversed) calculation
l[LAMBDA1_IDX_NEW] == '1.0'
else: # regular 0->1 calculation
l[LAMBDA1_IDX_NEW] = '0.0'
# Drop the lambda_idws
return l[:9]
def func_free_line(l):
if float(l[LAMBDA1_IDX_FREE]) > 0.5: # 1->0 (reversed) calculation
l[LAMBDA1_IDX_FREE] == '1.0'
else: # regular 0->1 calculation
l[LAMBDA1_IDX_FREE] = '0.0'
return l
filenames[0] = _corrupt_fepout(filenames[0], [('#NEW', func_new_line), ('#Free', func_free_line)], tmp_path)
restarted_dataset['data']['both'] = filenames
return restarted_dataset
@pytest.fixture
def restarted_dataset_toomany_lambda2(restarted_dataset, tmp_path):
"""Returns intentionally messed up dataset, where there are too many lambda2 values for a
given lambda1."""
filenames = sorted(restarted_dataset['data']['both'])
# For the same l1 and lidws we retain old lambda2 values thus ensuring a collision
# Also, don't make a window where lambda1 >= lambda2 because this will trigger the
# "direction changed" exception instead
def func_new_line(l):
if float(l[LAMBDA2_IDX_NEW]) <= 0.2:
return l
l[LAMBDA1_IDX_NEW] = '0.2'
if len(l) > 9 and l[9] == 'LAMBDA_IDWS':
l[LAMBDA_IDWS_IDX_NEW] = '0.1'
return l
def func_free_line(l):
if float(l[LAMBDA2_IDX_FREE]) <= 0.2:
return l
l[LAMBDA1_IDX_FREE] = '0.2'
return l
for i in range(len(filenames)):
restarted_dataset['data']['both'][i] = \
_corrupt_fepout(filenames[i], [('#NEW', func_new_line), ('#Free', func_free_line)], tmp_path)
return restarted_dataset
@pytest.fixture
def restarted_dataset_toomany_lambda_idws(restarted_dataset, tmp_path):
"""Returns intentionally messed up dataset, where there are too many lambda2 values for a
given lambda1."""
filenames = sorted(restarted_dataset['data']['both'])
# For the same lambda1 and lambda2 we retain the first set of lambda1/lambda2 values
# and replicate them across all windows thus ensuring that there will be more than
# one lambda_idws value for a given lambda1 and lambda2
this_lambda1, this_lambda2 = None, None
def func_new_line(l):
nonlocal this_lambda1, this_lambda2
if this_lambda1 is None:
this_lambda1, this_lambda2 = l[LAMBDA1_IDX_NEW], l[LAMBDA2_IDX_NEW]
# Ensure that changing these lambda values won't cause a reversal in direction and trigger
# an exception we're not trying to test here
if len(l) > 9 and float(l[LAMBDA_IDWS_IDX_NEW]) < 0.5:
l[LAMBDA1_IDX_NEW], l[LAMBDA2_IDX_NEW] = this_lambda1, this_lambda2
return l
def func_free_line(l):
l[LAMBDA1_IDX_FREE], l[LAMBDA2_IDX_FREE] = this_lambda1, this_lambda2
return l
for i in range(len(filenames)):
restarted_dataset['data']['both'][i] = _corrupt_fepout(filenames[i], [('#NEW', func_new_line)], tmp_path)
return restarted_dataset
@pytest.fixture
def restarted_dataset_direction_changed(restarted_dataset, tmp_path):
"""Returns intentionally messed up dataset, with one window where the lambda values are reversed."""
filenames = sorted(restarted_dataset['data']['both'])
def func_new_line(l):
l[6], l[8], l[10] = l[10], l[8], l[6]
return l
def func_free_line(l):
l[7], l[8] = l[8], l[7]
return l
# Reverse the direction of lambdas for this window
idx_to_corrupt = filenames.index(sorted(filenames)[-3])
fname1 = _corrupt_fepout(filenames[idx_to_corrupt], [('#NEW', func_new_line), ('#Free', func_free_line)], tmp_path)
restarted_dataset['data']['both'][idx_to_corrupt] = fname1
return restarted_dataset
def test_u_nk_restarted():
"""Test that u_nk has the correct form when extracted from an IDWS
FEP run that includes terminations and restarts.
"""
filenames = load_restarted()['data']['both']
u_nk = extract_u_nk(filenames, T=300)
assert u_nk.index.names == ['time', 'fep-lambda']
assert u_nk.shape == (30061, 11)
def test_u_nk_restarted_reversed():
filenames = load_restarted_reversed()['data']['both']
u_nk = extract_u_nk(filenames, T=300)
assert u_nk.index.names == ['time', 'fep-lambda']
assert u_nk.shape == (30170, 11)
def test_u_nk_restarted_direction_changed(restarted_dataset_direction_changed):
"""Test that when lambda values change direction within a dataset, parsing throws an error."""
with pytest.raises(ValueError, match='Lambda values change direction'):
u_nk = extract_u_nk(restarted_dataset_direction_changed['data']['both'], T=300)
def test_u_nk_restarted_idws_without_lambda_idws(restarted_dataset_idws_without_lambda_idws):
"""Test that when the first window has IDWS data but no lambda_idws, parsing throws an error.
In this situation, the lambda_idws cannot be inferred, because there's no previous lambda
value available.
"""
with pytest.raises(ValueError, match='IDWS data present in first window but lambda_idws not included'):
u_nk = extract_u_nk(restarted_dataset_idws_without_lambda_idws['data']['both'], T=300)
def test_u_nk_restarted_inconsistent(restarted_dataset_inconsistent):
"""Test that when lambda values are inconsistent between start and end of a single window,
parsing throws an error.
"""
with pytest.raises(ValueError, match='Inconsistent lambda values within the same window'):
u_nk = extract_u_nk(restarted_dataset_inconsistent['data']['both'], T=300)
def test_u_nk_restarted_toomany_lambda_idws(restarted_dataset_toomany_lambda_idws):
"""Test that when there is more than one lambda_idws for a given lambda1, parsing throws an error."""
with pytest.raises(ValueError, match='More than one lambda_idws value for a particular lambda1'):
u_nk = extract_u_nk(restarted_dataset_toomany_lambda_idws['data']['both'], T=300)
def test_u_nk_restarted_toomany_lambda2(restarted_dataset_toomany_lambda2):
"""Test that when there is more than one lambda2 for a given lambda1, parsing throws an error."""
with pytest.raises(ValueError, match='More than one lambda2 value for a particular lambda1'):
u_nk = extract_u_nk(restarted_dataset_toomany_lambda2['data']['both'], T=300)
``` |
{
"source": "jhenkens/monoprice-6ch-esphome",
"score": 2
} |
#### File: monoprice_10761/binary_sensor/__init__.py
```python
from esphome.components import binary_sensor
import esphome.config_validation as cv
import esphome.codegen as cg
from esphome.const import CONF_ID
from .. import monoprice_10761_ns, CONF_MONOPRICE_10761_ID, CONF_ZONE, CONF_COMMAND, Monoprice10761
DEPENDENCIES = ["monoprice_10761"]
CODEOWNERS = ["@jhenkens"]
Monoprice10761BinarySensor = monoprice_10761_ns.class_("Monoprice10761BinarySensor", binary_sensor.BinarySensor, cg.Component)
CONFIG_SCHEMA = binary_sensor.BINARY_SENSOR_SCHEMA.extend(
{
cv.GenerateID(): cv.declare_id(Monoprice10761BinarySensor),
cv.GenerateID(CONF_MONOPRICE_10761_ID): cv.use_id(Monoprice10761),
cv.Required(CONF_ZONE): cv.int_range(min=11, max=36),
cv.Required(CONF_COMMAND): cv.one_of("LS", "PA", upper=True),
}
).extend(cv.COMPONENT_SCHEMA)
async def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(var, config)
await binary_sensor.register_binary_sensor(var, config)
paren = await cg.get_variable(config[CONF_MONOPRICE_10761_ID])
cg.add(var.set_parent(paren))
cg.add(var.set_zone(config[CONF_ZONE]))
cg.add(var.set_data_type(config[CONF_COMMAND]))
```
#### File: monoprice_10761/number/__init__.py
```python
from esphome.components import number
import esphome.config_validation as cv
import esphome.codegen as cg
from esphome.const import CONF_ID
from .. import monoprice_10761_ns, CONF_MONOPRICE_10761_ID, CONF_ZONE, CONF_COMMAND, Monoprice10761
DEPENDENCIES = ["monoprice_10761"]
CODEOWNERS = ["@jhenkens"]
Monoprice10761Number = monoprice_10761_ns.class_("Monoprice10761Number", number.Number, cg.Component)
CONFIG_SCHEMA = number.NUMBER_SCHEMA.extend(
{
cv.GenerateID(): cv.declare_id(Monoprice10761Number),
cv.GenerateID(CONF_MONOPRICE_10761_ID): cv.use_id(Monoprice10761),
cv.Required(CONF_ZONE): cv.int_range(min=11, max=36),
cv.Required(CONF_COMMAND): cv.one_of("VO", "TR", "BS", "BL", upper=True),
}
).extend(cv.COMPONENT_SCHEMA)
async def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(var, config)
min = 0
max = 38
offset = 0
command = config[CONF_COMMAND]
if command == "BL":
min = -10
max = 10
offset= -10
elif command == "TR" or command == "BS":
offset= -7
min = -7
max = 7
await number.register_number(var, config, min_value=min, max_value=max, step=1)
paren = await cg.get_variable(config[CONF_MONOPRICE_10761_ID])
cg.add(var.set_parent(paren))
cg.add(var.set_zone(config[CONF_ZONE]))
cg.add(var.set_data_type(config[CONF_COMMAND]))
cg.add(var.set_offset(offset))
``` |
{
"source": "jhennawi/pydl",
"score": 3
} |
#### File: pydl/goddard/math.py
```python
from __future__ import absolute_import
def flegendre(x, m):
"""Compute the first `m` Legendre polynomials.
Parameters
----------
x : array-like
Compute the Legendre polynomials at these abscissa values.
m : :class:`int`
The number of Legendre polynomials to compute. For example, if
:math:`m = 3`, :math:`P_0 (x)`, :math:`P_1 (x)` and :math:`P_2 (x)`
will be computed.
Returns
-------
:class:`numpy.ndarray`
"""
import numpy as np
from scipy.special import legendre
if isinstance(x, np.ndarray):
n = x.size
else:
n = 1
if m < 1:
raise ValueError('Number of Legendre polynomials must be at least 1.')
try:
dt = x.dtype
except AttributeError:
dt = np.float64
leg = np.ones((m, n), dtype=dt)
if m >= 2:
leg[1, :] = x
if m >= 3:
for k in range(2, m):
leg[k, :] = np.polyval(legendre(k), x)
return leg
```
#### File: pydl/photoop/photoobj.py
```python
def sdss_calibv():
"""Return calibration for velocities from pix/frame to deg/day.
Returns
-------
:class:`float`
The conversion from pixels per frame to degrees per day
Notes
-----
Assumes frame time difference of 71.72 seconds and pixel scale of
0.396 arcsec, both fixed. Also note that observations of the same part of
sky from adjacent bands are separated by *two* frame numbers,
so we multiply by a factor two.
"""
pixscale = 0.396 # arcsec
ftime = 71.72 # seconds
pixframe2degday = 2.0*pixscale/(3600.0) * (3600.0)*24.0/ftime
return pixframe2degday
def unwrap_objid(objid):
"""Unwrap CAS-style objID into run, camcol, field, id, rerun.
See :func:`~pydl.pydlutils.sdss.sdss_objid` for details on how the bits
within an objID are assigned.
Parameters
----------
objid : :class:`numpy.ndarray`
An array containing 64-bit integers or strings. If strings are passed,
they will be converted to integers internally.
Returns
-------
:class:`numpy.recarray`
A record array with the same length as `objid`, with the columns
'skyversion', 'rerun', 'run', 'camcol', 'firstfield', 'frame', 'id'.
Notes
-----
For historical reasons, the inverse of this function,
:func:`~pydl.pydlutils.sdss.sdss_objid` is not in the same namespace as
this function.
'frame' is used instead of 'field' because record arrays have a method
of the same name.
Examples
--------
>>> from numpy import array
>>> from pydl.photoop.photoobj import unwrap_objid
>>> unwrap_objid(array([1237661382772195474]))
rec.array([(2, 301, 3704, 3, 0, 91, 146)],
dtype=[('skyversion', '<i4'), ('rerun', '<i4'), ('run', '<i4'), ('camcol', '<i4'), ('firstfield', '<i4'), ('frame', '<i4'), ('id', '<i4')])
"""
import numpy as np
if objid.dtype.type is np.string_ or objid.dtype.type is np.unicode_:
tempobjid = objid.astype(np.int64)
elif objid.dtype.type is np.int64:
tempobjid = objid.copy()
else:
raise ValueError('Unrecognized type for objid!')
unwrap = np.recarray(objid.shape,
dtype=[('skyversion', 'i4'), ('rerun', 'i4'),
('run', 'i4'), ('camcol', 'i4'),
('firstfield', 'i4'),
('frame', 'i4'), ('id', 'i4')])
unwrap.skyversion = np.bitwise_and(tempobjid >> 59, 2**4 - 1)
unwrap.rerun = np.bitwise_and(tempobjid >> 48, 2**11 - 1)
unwrap.run = np.bitwise_and(tempobjid >> 32, 2**16 - 1)
unwrap.camcol = np.bitwise_and(tempobjid >> 29, 2**3 - 1)
unwrap.firstfield = np.bitwise_and(tempobjid >> 28, 2**1 - 1)
unwrap.frame = np.bitwise_and(tempobjid >> 16, 2**12 - 1)
unwrap.id = np.bitwise_and(tempobjid, 2**16 - 1)
return unwrap
```
#### File: pydlspec2d/tests/test_spec1d.py
```python
import numpy as np
import os
from astropy.tests.helper import raises
from astropy.utils.data import get_pkg_data_filename
from .. import Pydlspec2dException
from ..spec1d import (HMF, findspec, spec_append, spec_path, template_metadata,
wavevector)
class TestSpec1d(object):
"""Test the functions in pydl.pydlspec2d.spec1d.
"""
def setup(self):
self.env = {'BOSS_SPECTRO_REDUX': '/boss/spectro/redux',
'SPECTRO_REDUX': '/sdss/spectro/redux',
'RUN2D': 'v1_2_3',
'RUN1D': 'v1_2_3'}
self.original_env = dict()
for key in self.env:
if key in os.environ:
self.original_env[key] = os.environ[key]
else:
self.original_env[key] = None
os.environ[key] = self.env[key]
def teardown(self):
for key in self.original_env:
if self.original_env[key] is None:
del os.environ[key]
else:
os.environ[key] = self.original_env[key]
def test_findspec(self):
"""This is just a placeholder for now.
"""
# slist = findspec(infile='file.in', sdss=True)
assert True
def test_hmf_init(self):
"""Test initialization of HMF object
"""
spec = np.random.random((20, 100))
invvar = np.random.random((20, 100))
hmf = HMF(spec, invvar)
assert hmf.K == 4
assert hmf.log.level == 20 # INFO
hmf = HMF(spec, invvar, K=6, verbose=True)
assert hmf.K == 6
assert hmf.log.level == 10 # DEBUG
def test_spec_append(self):
spec1 = np.array([[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1]])
spec2 = np.array([[2, 2, 2, 2, 2],
[2, 2, 2, 2, 2]])
s = spec_append(spec1, spec2)
assert (s == np.array([[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[2, 2, 2, 2, 2]])).all()
spec2 = np.array([[2, 2, 2, 2],
[2, 2, 2, 2]])
s = spec_append(spec1, spec2)
assert (s == np.array([[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 0],
[2, 2, 2, 2, 0]])).all()
s = spec_append(spec1, spec2, 1)
assert (s == np.array([[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[0, 2, 2, 2, 2],
[0, 2, 2, 2, 2]])).all()
spec1 = np.array([[1, 1, 1],
[1, 1, 1]])
spec2 = np.array([[2, 2, 2, 2, 2],
[2, 2, 2, 2, 2]])
s = spec_append(spec1, spec2, -2)
assert (s == np.array([[0, 0, 1, 1, 1],
[0, 0, 1, 1, 1],
[2, 2, 2, 2, 2],
[2, 2, 2, 2, 2]])).all()
def test_spec_path(self):
bsr = self.env['BOSS_SPECTRO_REDUX']
run2d = self.env['RUN2D']
p = spec_path(123)
assert p[0] == os.path.join(bsr, run2d, '0123')
p = spec_path(1234)
assert p[0] == os.path.join(bsr, run2d, '1234')
p = spec_path(1234, topdir=bsr, run2d=run2d)
assert p[0] == os.path.join(bsr, run2d, '1234')
p = spec_path(np.array([1234, 5678]), topdir=bsr, run2d=run2d)
assert p[0] == os.path.join(bsr, run2d, '1234')
assert p[1] == os.path.join(bsr, run2d, '5678')
p = spec_path(1234, path=bsr)
assert p[0] == bsr
def test_template_metadata(self):
with raises(Pydlspec2dException):
slist, metadata = template_metadata('/no/such/file.par')
inputfile = get_pkg_data_filename('t/test_template_metadata.par')
slist, metadata = template_metadata(inputfile)
assert metadata['object'] == 'gal'
assert not metadata['nonnegative']
def test_wavevector(self):
l = wavevector(3, 4, binsz=0.1)
ll = np.array([3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4.0])
assert np.allclose(l, ll)
l = wavevector(3, 4, wavemin=3, binsz=0.1)
ll = np.array([3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4.0])
assert np.allclose(l, ll)
```
#### File: pydl/pydlutils/coord.py
```python
import numpy as np
import astropy.units as u
import astropy.coordinates as ac
class SDSSMuNu(ac.BaseCoordinateFrame):
"""SDSS Great Circle Coordinates
Attributes
----------
stripe
SDSS `Stripe Number`_ .
node
Node of the great circle with respect to the celestial equator.
In SDSS, this is almost always RA = 95.0 degrees.
incl
Inclination of the great circle with respect to the celestial
equator.
phi
Counter-clockwise position angle w.r.t. north for an arc
in the +nu direction.
Parameters
----------
mu : :class:`~astropy.coordinates.Angle`
Angle corresponding to longitude measured along a stripe.
nu : :class:`~astropy.coordinates.Angle`
Angle corresponding to latitude measured perpendicular to a stripe.
Notes
-----
http://www.sdss.org/dr12/algorithms/surveycoords/
.. _`Stripe Number`: http://www.sdss.org/dr14/help/glossary/#stripe
"""
default_representation = ac.SphericalRepresentation
frame_specific_representation_info = {
'spherical': [
ac.RepresentationMapping(reprname='lon', framename='mu',
defaultunit=u.deg),
ac.RepresentationMapping(reprname='lat', framename='nu',
defaultunit=u.deg)
]
}
frame_specific_representation_info['unitspherical'] = (
frame_specific_representation_info['spherical'])
stripe = ac.Attribute(default=0)
node = ac.QuantityAttribute(default=ac.Angle(95.0, unit=u.deg),
unit=u.deg)
# phi = ac.QuantityFrameAttribute(default=None, unit=u.deg)
@property
def incl(self):
return ac.Angle(stripe_to_incl(self.stripe), unit=u.deg)
def current_mjd():
"""Return the current MJD.
"""
from ..goddard.astro import get_juldate
return get_juldate() - 2400000.5
@ac.frame_transform_graph.transform(ac.FunctionTransform, SDSSMuNu, ac.ICRS)
def munu_to_radec(munu, icrs_frame):
"""Convert from SDSS great circle coordinates to equatorial coordinates.
Parameters
----------
munu : :class:`~pydl.pydlutils.coord.SDSSMuNu`
SDSS great circle coordinates (mu, nu).
Returns
-------
:class:`~astropy.coordinates.ICRS`
Equatorial coordinates (RA, Dec).
"""
# from pydlutils.coord import stripe_to_eta
# from pydlutils.goddard.misc import cirrange
# if 'stripe' in kwargs:
# node = 95.0
# incl = stripe_to_incl(kwargs['stripe'])
# elif 'node' in kwargs and 'incl' in kwargs:
# node = kwargs['node']
# incl = kwargs['incl']
# else:
# raise ValueError('Must specify either STRIPE or NODE,INCL!')
# if mu.size != nu.size:
# raise ValueError('Number of elements in MU and NU must agree!')
sinnu = np.sin(munu.nu.to(u.radian).value)
cosnu = np.cos(munu.nu.to(u.radian).value)
sini = np.sin(munu.incl.to(u.radian).value)
cosi = np.cos(munu.incl.to(u.radian).value)
sinmu = np.sin((munu.mu - munu.node).to(u.radian).value)
cosmu = np.cos((munu.mu - munu.node).to(u.radian).value)
xx = cosmu * cosnu
yy = sinmu * cosnu * cosi - sinnu * sini
zz = sinmu * cosnu * sini + sinnu * cosi
ra = ac.Angle(np.arctan2(yy, xx), unit=u.radian) + munu.node
dec = ac.Angle(np.arcsin(zz), unit=u.radian)
# if 'phi' in kwargs:
# phi = np.rad2deg(np.arctan2(cosmu * sini,
# (-sinmu * sinnu * sini + cosnu * cosi)*cosnu))
# return (ra, dec, phi)
# else:
# return (ra, dec)
return ac.ICRS(ra=ra, dec=dec).transform_to(icrs_frame)
@ac.frame_transform_graph.transform(ac.FunctionTransform, ac.ICRS, SDSSMuNu)
def radec_to_munu(icrs_frame, munu):
"""Convert from equatorial coordinates to SDSS great circle coordinates.
Parameters
----------
icrs_frame : :class:`~astropy.coordinates.ICRS`
Equatorial coordinates (RA, Dec).
Returns
-------
:class:`~pydl.pydlutils.coord.SDSSMuNu`
SDSS great circle coordinates (mu, nu).
"""
# from pydlutils.coord import stripe_to_eta
# from pydlutils.goddard.misc import cirrange
# if 'stripe' in kwargs:
# node = 95.0
# incl = stripe_to_incl(kwargs['stripe'])
# elif 'node' in kwargs and 'incl' in kwargs:
# node = kwargs['node']
# incl = kwargs['incl']
# else:
# raise ValueError('Must specify either STRIPE or NODE,INCL!')
# if ra.size != dec.size:
# raise ValueError('Number of elements in RA and DEC must agree!')
sinra = np.sin((icrs_frame.ra - munu.node).to(u.radian).value)
cosra = np.cos((icrs_frame.ra - munu.node).to(u.radian).value)
sindec = np.sin(icrs_frame.dec.to(u.radian).value)
cosdec = np.cos(icrs_frame.dec.to(u.radian).value)
sini = np.sin(munu.incl.to(u.radian).value)
cosi = np.cos(munu.incl.to(u.radian).value)
x1 = cosdec * cosra
y1 = cosdec * sinra
z1 = sindec
x2 = x1
y2 = y1 * cosi + z1 * sini
z2 = -y1 * sini + z1 * cosi
mu = ac.Angle(np.arctan2(y2, x2), unit=u.radian) + munu.node
nu = ac.Angle(np.arcsin(z2), unit=u.radian)
# if 'phi' in kwargs:
# sinnu = np.sin(np.deg2rad(nu))
# cosnu = np.cos(np.deg2rad(nu))
# sinmu = np.sin(np.deg2rad(mu-node))
# cosmu = np.cos(np.deg2rad(mu-node))
# phi = np.rad2deg(np.arctan2(cosmu * sini,
# (-sinmu * sinnu * sini + cosnu * cosi)*cosnu))
# return (ra, dec, phi)
# else:
# return (ra, dec)
return SDSSMuNu(mu=mu, nu=nu, stripe=munu.stripe)
def stripe_to_eta(stripe):
"""Convert from SDSS great circle coordinates to equatorial coordinates.
Parameters
----------
stripe : :class:`int` or :class:`numpy.ndarray`
SDSS Stripe number.
Returns
-------
:class:`float` or :class:`numpy.ndarray`
The eta value in the SDSS (lambda,eta) coordinate system.
"""
stripe_sep = 2.5
eta = stripe * stripe_sep - 57.5
if stripe > 46:
eta -= 180.0
return eta
def stripe_to_incl(stripe):
"""Convert from SDSS stripe number to an inclination relative to the
equator.
Parameters
----------
stripe : :class:`int` or :class:`numpy.ndarray`
SDSS Stripe number.
Returns
-------
:class:`float` or :class:`numpy.ndarray`
Inclination of the stripe relative to the equator (Dec = 0).
"""
dec_center = 32.5
eta_center = stripe_to_eta(stripe)
incl = eta_center + dec_center
return incl
```
#### File: pydl/pydlutils/misc.py
```python
from __future__ import print_function
import numpy as np
from . import PydlutilsException
def decode_mixed(x):
"""Convert bytes in Numpy arrays into strings. Leave other stuff alone.
Parameters
----------
x : object
Input object.
Returns
-------
object
If `x` has a ``decode()`` method, ``x.decode()`` will be returned.
Otherwise `x` will be returned unchanged.
"""
try:
return x.decode()
except:
return x
def djs_laxisgen(dims, iaxis=0):
"""Returns an integer array where each element of the array is set
equal to its index number along the specified axis.
Parameters
----------
dims : :class:`list`
Dimensions of the array to return.
iaxis : :class:`int`, optional
Index along this dimension.
Returns
-------
:class:`numpy.ndarray`
An array of indexes with ``dtype=int32``.
Raises
------
ValueError
If `iaxis` is greater than or equal to the number of dimensions.
Notes
-----
For two or more dimensions, there is no difference between this routine
and :func:`~pydl.pydlutils.misc.djs_laxisnum`.
Examples
--------
>>> from pydl.pydlutils.misc import djs_laxisgen
>>> print(djs_laxisgen([4,4]))
[[0 0 0 0]
[1 1 1 1]
[2 2 2 2]
[3 3 3 3]]
"""
ndimen = len(dims)
if ndimen == 1:
return np.arange(dims[0], dtype='i4')
return djs_laxisnum(dims, iaxis)
def djs_laxisnum(dims, iaxis=0):
"""Returns an integer array where each element of the array is set equal
to its index number in the specified axis.
Parameters
----------
dims : :class:`list`
Dimensions of the array to return.
iaxis : :class:`int`, optional
Index along this dimension.
Returns
-------
:class:`numpy.ndarray`
An array of indexes with ``dtype=int32``.
Raises
------
ValueError
If `iaxis` is greater than or equal to the number of dimensions, or
if number of dimensions is greater than three.
Notes
-----
For two or more dimensions, there is no difference between this routine
and :func:`~pydl.pydlutils.misc.djs_laxisgen`.
Examples
--------
>>> from pydl.pydlutils.misc import djs_laxisnum
>>> print(djs_laxisnum([4,4]))
[[0 0 0 0]
[1 1 1 1]
[2 2 2 2]
[3 3 3 3]]
"""
ndimen = len(dims)
result = np.zeros(dims, dtype='i4')
if ndimen == 1:
pass
elif ndimen == 2:
if iaxis == 0:
for k in range(dims[0]):
result[k, :] = k
elif iaxis == 1:
for k in range(dims[1]):
result[:, k] = k
else:
raise ValueError("Bad value for iaxis: {0:d}".format(iaxis))
elif ndimen == 3:
if iaxis == 0:
for k in range(dims[0]):
result[k, :, :] = k
elif iaxis == 1:
for k in range(dims[1]):
result[:, k, :] = k
elif iaxis == 2:
for k in range(dims[2]):
result[:, :, k] = k
else:
raise ValueError("Bad value for iaxis: {0:d}".format(iaxis))
else:
raise ValueError("{0:d} dimensions not supported.".format(ndimen))
return result
def hogg_iau_name(ra, dec, prefix='SDSS', precision=1):
"""Properly format astronomical source names to the IAU convention.
Parameters
----------
ra : :class:`float` or :class:`numpy.ndarray`
Right ascencion in decimal degrees
dec : :class:`float` or :class:`numpy.ndarray`
Declination in decimal degrees.
prefix : :class:`str`, optional
Add this prefix to the string, defaults to 'SDSS'.
precision : :class:`int`, optional
Display this many digits of precision on seconds, default 1.
Returns
-------
:class:`str` or :class:`list`
The IAU name for the coordinates.
Examples
--------
>>> from pydl.pydlutils.misc import hogg_iau_name
>>> hogg_iau_name(354.120375,-0.544777778)
'SDSS J233628.89-003241.2'
"""
import numpy as np
#
# Promote scalar values to arrays.
#
if isinstance(ra, float):
ra = np.array([ra])
if isinstance(dec, float):
dec = np.array([dec])
h = ra/15.0
rah = np.floor(h)
ram = np.floor(60.0*(h-rah))
ras = 60.0*(60.0*(h-rah) - ram)
ras = np.floor(ras*10.0**(precision+1))/10.0**(precision+1)
rasformat = "{{2:0{0:d}.{1:d}f}}".format(precision+4, precision+1)
rah = rah.astype(np.int32)
ram = ram.astype(np.int32)
desgn = np.array(list('+'*len(dec)))
desgn[dec < 0] = '-'
adec = np.absolute(dec)
ded = np.floor(adec)
dem = np.floor(60.0*(adec-ded))
des = 60.0*(60.0*(adec-ded) - dem)
des = np.floor(des*10.0**precision)/10.0**precision
desformat = "{{6:0{0:d}.{1:d}f}}".format(precision+3, precision)
if precision == 0:
desformat = "{6:02d}"
des = des.astype(np.int32)
ded = ded.astype(np.int32)
dem = dem.astype(np.int32)
adformat = "{{0:02d}}{{1:02d}}{ras}{{3:s}}{{4:02d}}{{5:02d}}{des}".format(
ras=rasformat, des=desformat)
adstr = [adformat.format(*x) for x in zip(
rah, ram, ras, desgn, ded, dem, des)]
if prefix == '':
jstr = 'J'
else:
jstr = ' J'
name = ["{0}{1}{2}".format(prefix, jstr, x) for x in adstr]
if len(ra) == 1:
return name[0]
else:
return name
def hogg_iau_name_main(): # pragma: no cover
from argparse import ArgumentParser
parser = ArgumentParser(description='Properly format astronomical ' +
'source names to the IAU convention.')
parser.add_argument('-P', '--precision', dest='precision', action='store',
metavar='N', default=1, type=int,
help='Digits of precision to add to the declination.')
parser.add_argument('-p', '--prefix', dest='prefix', action='store',
metavar='STR', default='SDSS',
help='Add this prefix to the name.')
parser.add_argument('ra', metavar='RA', type=float,
help='Right Ascension.')
parser.add_argument('dec', metavar='Dec', type=float,
help='Declination.')
options = parser.parse_args()
print(hogg_iau_name(options.ra, options.dec,
prefix=options.prefix, precision=options.precision))
return 0
def struct_print(array, filename=None, formatcodes=None, alias=None,
fdigit=5, ddigit=7, html=False, no_head=False,
silent=False):
"""Print a NumPy record array (analogous to an IDL structure) in a
nice way.
Parameters
----------
array : :class:`numpy.ndarray`
A record array to print.
filename : :class:`str` or file-like, optional
If supplied, write to this file.
formatcodes : :class:`dict`, optional
If supplied, use explicit format for certain columns.
alias : :class:`dict`, optional
If supplied, use this mapping of record array column names to printed
column names.
fdigit : :class:`int`, optional
Width of 32-bit floating point columns, default 5.
ddigit : :class:`int`, optional
Width of 64-bit floating point columns, default 7.
html : :class:`bool`, optional
If ``True``, print an html table.
no_head : :class:`bool`, optional
If ``True``, *don't* print a header line.
silent : :class:`bool`, optional
If ``True``, do not print the table, just return it.
Returns
-------
:func:`tuple`
A tuple containing a list of the lines in the table. If `html` is
``True``, also returns a list of lines of CSS for formatting the
table.
Examples
--------
>>> import numpy as np
>>> from pydl.pydlutils.misc import struct_print
>>> struct_print(np.array([(1,2.34,'five'),(2,3.456,'seven'),(3,4.5678,'nine')],dtype=[('a','i4'),('bb','f4'),('ccc','S5')]),silent=True)
(['a bb ccc ', '- ----------- -----', '1 2.34 five ', '2 3.456 seven', '3 4.5678 nine '], [])
"""
if html:
headstart = '<tr><th>'
headsep = '</th><th>'
headend = '</th></tr>'
colstart = '<tr><td>'
colsep = '</td><td>'
colend = '</td></tr>'
css = ['<style type="text/css">',
'table {',
' border-collapse: collapse;',
'}',
'th {',
' padding: 2px;',
' text-align: right;',
' border: 1px solid black;',
' font-weight: bold;',
'}',
'td {',
' padding: 2px;',
' text-align: right;',
' border: 1px solid black;',
'}',
'</style>']
else:
headstart = ''
headsep = ' '
headend = ''
colstart = ''
colsep = ' '
colend = ''
css = list()
#
# Alias should be a dictionary that maps structure names to column names
#
if alias is None:
#
# Create a dummy alias dictionary
#
alias = dict(list(zip(array.dtype.names, array.dtype.names)))
else:
#
# Fill in any missing values of the alias dictionary
#
for tag in array.dtype.names:
if tag not in alias:
alias[tag] = tag
#
# Formatcodes allows an override for certain columns.
#
if formatcodes is None:
formatcodes = dict()
#
# This dictionary will hold the number of characters in each column
#
nchar = dict()
#
# Construct format codes for each column
#
for k, tag in enumerate(array.dtype.names):
if tag in formatcodes:
thiscode = formatcodes[tag]
thisn = len(thiscode.format(array[tag][0]))
else:
d = array.dtype.fields[tag][0]
if d.kind == 'i' or d.kind == 'u':
thisn = max(max(len(str(array[tag].min())),
len(str(array[tag].max()))), len(tag))
thiscode = "{{{0:d}:{1:d}d}}".format(k, thisn)
elif d.kind == 'f':
if d.itemsize == 8:
prec = ddigit
else:
prec = fdigit
thisn = prec + 6
if array[tag].min() < 0:
thisn += 1
thiscode = "{{{0:d}:{1:d}.{2:d}g}}".format(k, thisn, prec)
elif d.kind == 'S' or d.kind == 'U':
thisn = max(d.itemsize, len(tag))
thiscode = "{{{0:d}:{1:d}s}}".format(k, thisn)
else:
raise PydlutilsException(
"Unsupported kind: {0}".format(d.kind))
formatcodes[tag] = thiscode
nchar[tag] = thisn
#
# Start building an array of lines
#
lines = list()
#
# Construct header lines
#
if html:
lines.append('<table>')
hdr1 = (headstart + headsep.join([alias[tag]
for tag in array.dtype.names]) + headend)
lines.append(hdr1)
else:
if not no_head:
hdr1 = (headstart + headsep.join([("{{0:{0:d}s}}".format(
nchar[tag])).format(alias[tag])
for tag in array.dtype.names]) + headend)
hdr2 = (headstart + headsep.join(['-' * nchar[tag]
for tag in array.dtype.names]) + headend)
lines.append(hdr1)
lines.append(hdr2)
#
# Create a format string for the data from the individual format codes
#
rowformat = (colstart + colsep.join([formatcodes[tag]
for tag in array.dtype.names]) + colend)
for k in range(array.size):
lines.append(rowformat.format(
*([decode_mixed(l) for l in array[k].tolist()])))
if html:
lines.append('</table>')
f = None # This variable will store a file handle
close_file = False
if filename is not None:
if hasattr(filename, 'write'):
f = filename
else:
f = open(filename, 'w+b')
close_file = True
if f is None:
if not silent: # pragma: no cover
print("\n".join(lines)+"\n")
else:
f.write(("\n".join(lines)+"\n").encode('utf-8'))
if close_file:
f.close()
return (lines, css)
```
#### File: pydl/pydlutils/spheregroup.py
```python
import numpy as np
from astropy.extern.six import string_types
from . import PydlutilsException, PydlutilsUserWarning
class chunks(object):
"""chunks class
Functions for creating and manipulating spherical chunks are implemented
as methods on this class.
"""
def __init__(self, ra, dec, minSize):
"""Init creates an object whose attributes are similar those created
by the setchunks() function in the spheregroup library.
"""
#
# Save the value of minSize
#
self.minSize = minSize
#
# Find maximum and minimum dec (in degrees)
#
decMin = dec.min()
decMax = dec.max()
decRange = decMax - decMin
#
# Find the declination boundaries; make them an integer multiple of
# minSize, with extra room (one cell) on the edges.
#
self.nDec = 3 + int(np.floor(decRange/minSize))
decRange = minSize*float(self.nDec)
decMin = decMin - 0.5*(decRange - decMax + decMin)
decMax = decMin + decRange
if decMin < -90.0 + 3.0*minSize:
decMin = -90.0
if decMax > 90.0 - 3.0*minSize:
decMax = 90.0
self.decBounds = decMin + ((decMax - decMin) * np.arange(self.nDec + 1,
dtype='d'))/float(self.nDec)
#
# Find ra offset which minimizes the range in ra (this should take care
# of the case that ra crosses zero in some parts
#
if abs(self.decBounds[self.nDec]) > abs(self.decBounds[0]):
cosDecMin = np.cos(np.deg2rad(self.decBounds[self.nDec]))
else:
cosDecMin = np.cos(np.deg2rad(self.decBounds[0]))
if cosDecMin <= 0.0:
raise PydlutilsException("cosDecMin={0:f} not positive in setchunks().".format(cosDecMin))
self.raRange, self.raOffset = self.rarange(ra, minSize/cosDecMin)
self.raMin, self.raMax = self.getraminmax(ra, self.raOffset)
#
# Isn't this redundant?
#
self.raRange = self.raMax - self.raMin
#
# For each declination slice, find the number of ra divisions
# necessary and set them
#
self.raBounds = list()
self.nRa = list()
for i in range(self.nDec):
#
# Get maximum declination and its cosine
#
if abs(self.decBounds[i]) > abs(self.decBounds[i+1]):
cosDecMin = np.cos(np.deg2rad(self.decBounds[i]))
else:
cosDecMin = np.cos(np.deg2rad(self.decBounds[i+1]))
if cosDecMin <= 0.0:
raise PydlutilsException("cosDecMin={0:f} not positive in setchunks().".format(cosDecMin))
#
# Get raBounds array for this declination array, leave an extra
# cell on each end
#
self.nRa.append(3 + int(np.floor(cosDecMin*self.raRange/minSize)))
raRangeTmp = minSize*float(self.nRa[i])/cosDecMin
raMinTmp = self.raMin - 0.5*(raRangeTmp-self.raMax+self.raMin)
raMaxTmp = raMinTmp + raRangeTmp
#
# If we cannot avoid the 0/360 point, embrace it
#
if (raRangeTmp >= 360.0 or
raMinTmp <= minSize/cosDecMin or
raMaxTmp >= 360.0 - minSize/cosDecMin or
abs(self.decBounds[i]) == 90.0):
raMinTmp = 0.0
raMaxTmp = 360.0
raRangeTmp = 360.0
if self.decBounds[i] == -90.0 or self.decBounds[i+1] == 90.0:
self.nRa[i] = 1
self.raBounds.append(raMinTmp +
(raMaxTmp - raMinTmp) * np.arange(self.nRa[i] + 1, dtype='d') /
float(self.nRa[i]))
#
# Create an empty set of lists to hold the output of self.assign()
#
self.chunkList = [[list() for j in range(self.nRa[i])] for i in range(self.nDec)]
#
# nChunkMax will be the length of the largest list in chunkList
# it is computed by chunks.assign()
#
self.nChunkMax = 0
return
def rarange(self, ra, minSize):
"""Finds the offset which yields the smallest raRange & returns both.
Notes
-----
.. warning:: This is not (yet) well-defined for the case of only one point.
"""
NRA = 6
raRangeMin = 361.0
raOffset = 0.0
EPS = 1.0e-5
for j in range(NRA):
raMin, raMax = self.getraminmax(ra, 360.0*float(j)/float(NRA))
raRange = raMax-raMin
if (2.0*(raRange-raRangeMin)/(raRange+raRangeMin) < -EPS and
raMin > minSize and raMax < 360.0 - minSize):
raRangeMin = raRange
raOffset = 360.0*float(j)/float(NRA)
return (raRangeMin, raOffset)
def getraminmax(self, ra, raOffset):
"""Utility function used by rarange.
"""
currRa = np.fmod(ra + raOffset, 360.0)
return (currRa.min(), currRa.max())
def cosDecMin(self, i):
"""Frequently used utility function.
"""
if abs(self.decBounds[i]) > abs(self.decBounds[i+1]):
return np.cos(np.deg2rad(self.decBounds[i]))
else:
return np.cos(np.deg2rad(self.decBounds[i+1]))
def assign(self, ra, dec, marginSize):
"""Take the objects and the chunks (already defined in the constructor)
and assign the objects to the appropriate chunks, with some leeway
given by the parameter marginSize. Basically, at the end, each
chunk should be associated with a list of the objects that belong
to it.
"""
if marginSize >= self.minSize:
raise PydlutilsException("marginSize>=minSize ({0:f}={1:f}) in chunks.assign().".format(marginSize, self.minSize))
chunkDone = [[False for j in range(self.nRa[i])] for i in range(self.nDec)]
for i in range(ra.size):
currRa = np.fmod(ra[i] + self.raOffset, 360.0)
try:
raChunkMin, raChunkMax, decChunkMin, decChunkMax = self.getbounds(currRa, dec[i], marginSize)
except PydlutilsException:
continue
#
# Reset chunkDone. This is silly, but is necessary to
# reproduce the logic.
#
for decChunk in range(decChunkMin, decChunkMax+1):
for raChunk in range(raChunkMin[decChunk-decChunkMin]-1, raChunkMax[decChunk-decChunkMin]+2):
if raChunk < 0:
currRaChunk = (raChunk+self.nRa[decChunk]) % self.nRa[decChunk]
elif raChunk > self.nRa[decChunk]-1:
currRaChunk = (raChunk-self.nRa[decChunk]) % self.nRa[decChunk]
else:
currRaChunk = raChunk
if currRaChunk >= 0 and currRaChunk <= self.nRa[decChunk]-1:
chunkDone[decChunk][currRaChunk] = False
for decChunk in range(decChunkMin, decChunkMax+1):
for raChunk in range(raChunkMin[decChunk-decChunkMin], raChunkMax[decChunk-decChunkMin]+1):
if raChunk < 0:
currRaChunk = (raChunk+self.nRa[decChunk]) % self.nRa[decChunk]
elif raChunk > self.nRa[decChunk]-1:
currRaChunk = (raChunk-self.nRa[decChunk]) % self.nRa[decChunk]
else:
currRaChunk = raChunk
if currRaChunk >= 0 and currRaChunk <= self.nRa[decChunk]-1:
if not chunkDone[decChunk][currRaChunk]:
self.chunkList[decChunk][currRaChunk].append(i)
#
# Update nChunkMax
#
if len(self.chunkList[decChunk][currRaChunk]) > self.nChunkMax:
self.nChunkMax = len(self.chunkList[decChunk][currRaChunk])
chunkDone[decChunk][currRaChunk] = True
return
def getbounds(self, ra, dec, marginSize):
"""Find the set of chunks a point (with margin) belongs to.
"""
#
# Find the declination slice without regard to marginSize
#
decChunkMin = int(np.floor((dec - self.decBounds[0]) *
float(self.nDec) /
(self.decBounds[self.nDec]-self.decBounds[0])))
decChunkMax = decChunkMin
if decChunkMin < 0 or decChunkMin > self.nDec - 1:
raise PydlutilsException("decChunkMin out of range in chunks.getbounds().")
#
# Set minimum and maximum bounds of dec
#
while dec - self.decBounds[decChunkMin] < marginSize and decChunkMin > 0:
decChunkMin -= 1
while self.decBounds[decChunkMax+1] - dec < marginSize and decChunkMax < self.nDec - 1:
decChunkMax += 1
#
# Find ra chunk bounds for each dec chunk
#
raChunkMin = np.zeros(decChunkMax-decChunkMin+1, dtype='i4')
raChunkMax = np.zeros(decChunkMax-decChunkMin+1, dtype='i4')
for i in range(decChunkMin, decChunkMax+1):
cosDecMin = self.cosDecMin(i)
raChunkMin[i-decChunkMin] = int(np.floor((ra - self.raBounds[i][0]) *
float(self.nRa[i]) /
(self.raBounds[i][self.nRa[i]] - self.raBounds[i][0])))
raChunkMax[i-decChunkMin] = raChunkMin[i-decChunkMin]
if raChunkMin[i-decChunkMin] < 0 or raChunkMin[i-decChunkMin] > self.nRa[i]-1:
raise PydlutilsException("raChunkMin out of range in chunks.getbounds().")
#
# Set minimum and maximum bounds of ra
#
raCheck = raChunkMin[i-decChunkMin]
keepGoing = True
while keepGoing and raCheck > -1:
if raCheck >= 0 and raCheck < self.nRa[i]:
keepGoing = (ra - self.raBounds[i][raCheck])*cosDecMin < marginSize
else:
keepGoing = False
if keepGoing:
raCheck -= 1
raChunkMin[i-decChunkMin] = raCheck
raCheck = raChunkMax[i-decChunkMin]
keepGoing = True
while keepGoing and raCheck < self.nRa[i]:
if raCheck >= 0 and raCheck < self.nRa[i]:
keepGoing = (self.raBounds[i][raCheck+1]-ra)*cosDecMin < marginSize
else:
keepGoing = False
if keepGoing:
raCheck += 1
raChunkMax[i-decChunkMin] = raCheck
return (raChunkMin, raChunkMax, decChunkMin, decChunkMax)
def get(self, ra, dec):
"""Find the chunk to which a given point belongs.
"""
#
# Find dec chunk
#
decChunk = int(np.floor((dec - self.decBounds[0]) *
float(self.nDec) /
(self.decBounds[self.nDec]-self.decBounds[0])))
#
# Find ra chunk
#
if decChunk < self.nDec and decChunk >= 0:
raChunk = int(np.floor((ra - self.raBounds[decChunk][0]) *
float(self.nRa[decChunk]) /
(self.raBounds[decChunk][self.nRa[decChunk]] - self.raBounds[decChunk][0])))
if raChunk < 0 or raChunk > self.nRa[decChunk]-1:
raise PydlutilsException("raChunk out of range in chunks.get()")
else:
raChunk = -1
return (raChunk, decChunk)
def friendsoffriends(self, ra, dec, linkSep):
"""Friends-of-friends using chunked data.
"""
nPoints = ra.size
inGroup = np.zeros(nPoints, dtype='i4') - 1
#
# mapGroups contains an equivalency mapping of groups. mapGroup[i]=j
# means i and j are actually the same group. j<=i always, by design.
# The largest number of groups you can get
# (assuming linkSep < marginSize < minSize) is 9 times the number of
# targets
#
mapGroups = np.zeros(9*nPoints, dtype='i4') - 1
nMapGroups = 0
for i in range(self.nDec):
for j in range(self.nRa[i]):
if len(self.chunkList[i][j]) > 0:
chunkGroup = self.chunkfriendsoffriends(ra, dec, self.chunkList[i][j], linkSep)
for k in range(chunkGroup.nGroups):
minEarly = 9*nPoints
l = chunkGroup.firstGroup[k]
while l != -1:
if inGroup[self.chunkList[i][j][l]] != -1:
checkEarly = inGroup[self.chunkList[i][j][l]]
while mapGroups[checkEarly] != checkEarly:
checkEarly = mapGroups[checkEarly]
minEarly = min(minEarly, checkEarly)
else:
inGroup[self.chunkList[i][j][l]] = nMapGroups
l = chunkGroup.nextGroup[l]
if minEarly == 9*nPoints:
mapGroups[nMapGroups] = nMapGroups
else:
mapGroups[nMapGroups] = minEarly
l = chunkGroup.firstGroup[k]
while l != -1:
checkEarly = inGroup[self.chunkList[i][j][l]]
while mapGroups[checkEarly] != checkEarly:
tmpEarly = mapGroups[checkEarly]
mapGroups[checkEarly] = minEarly
checkEarly = tmpEarly
mapGroups[checkEarly] = minEarly
l = chunkGroup.nextGroup[l]
nMapGroups += 1
#
# Now all groups which are mapped to themselves are the real groups
# Make sure the mappings are set up to go all the way down.
#
nGroups = 0
for i in range(nMapGroups):
if mapGroups[i] != -1:
if mapGroups[i] == i:
mapGroups[i] = nGroups
nGroups += 1
else:
mapGroups[i] = mapGroups[mapGroups[i]]
else:
raise PydlutilsException("MapGroups[{0:d}]={1:d} in chunks.friendsoffriends().".format(i, mapGroups[i]))
for i in range(nPoints):
inGroup[i] = mapGroups[inGroup[i]]
firstGroup = np.zeros(nPoints, dtype='i4') - 1
nextGroup = np.zeros(nPoints, dtype='i4') - 1
multGroup = np.zeros(nPoints, dtype='i4')
for i in range(nPoints-1, -1, -1):
nextGroup[i] = firstGroup[inGroup[i]]
firstGroup[inGroup[i]] = i
for i in range(nGroups):
j = firstGroup[i]
while j != -1:
multGroup[i] += 1
j = nextGroup[j]
return (inGroup, multGroup, firstGroup, nextGroup, nGroups)
def chunkfriendsoffriends(self, ra, dec, chunkList, linkSep):
"""Does friends-of-friends on the ra, dec that are defined by
chunkList.
"""
#
# Convert ra, dec into something that can be digested by the
# groups object.
#
x = np.deg2rad(np.vstack((ra[chunkList], dec[chunkList])))
radLinkSep = np.deg2rad(linkSep)
group = groups(x, radLinkSep, 'sphereradec')
return group
class groups(object):
"""Group a set of objects (a list of coordinates in some space) based on
a friends-of-friends algorithm
"""
@staticmethod
def euclid(x1, x2):
"""Pythagorean theorem in Euclidean space with arbitrary number
of dimensions.
"""
return np.sqrt(((x1-x2)**2).sum())
@staticmethod
def sphereradec(x1, x2):
"""Separation of two points on a 2D-sphere, assuming they are in
longitude-latitude or right ascension-declination form. Assumes
everything is already in radians.
"""
from ..goddard.astro import gcirc
return gcirc(x1[0], x1[1], x2[0], x2[1], units=0)
def __init__(self, coordinates, distance, separation='euclid'):
"""Init creates an object and performs the friends-of-friends
algorithm. The coordinates can have arbitrary dimensions, with each
column representing one of the dimensions. Each row defines an object.
If separation is not defined it defaults to Euclidean space.
"""
#
# Find a separation function
#
if callable(separation):
self.separation = separation
elif isinstance(separation, string_types):
if separation == 'euclid':
self.separation = self.euclid
elif separation == 'sphereradec':
self.separation = self.sphereradec
else:
raise PydlutilsException("Unknown separation function: {0}.".format(separation))
else:
raise PydlutilsException("Improper type for separation!")
#
# Save information about the coordinates.
#
nGroups = 0
nTargets = coordinates.shape[1]
multGroup = np.zeros(nTargets, dtype='i4')
firstGroup = np.zeros(nTargets, dtype='i4') - 1
nextGroup = np.zeros(nTargets, dtype='i4') - 1
inGroup = np.arange(nTargets, dtype='i4')
#
# Find all the other targets associated with each target
#
for i in range(nTargets):
nTmp = 0
minGroup = nGroups
for j in range(nTargets):
sep = self.separation(coordinates[:, i], coordinates[:, j])
if sep <= distance:
multGroup[nTmp] = j
minGroup = min(minGroup, inGroup[j])
nTmp += 1
#
# Use this minimum for all
#
for j in range(nTmp):
if inGroup[multGroup[j]] < nTargets:
k = firstGroup[inGroup[multGroup[j]]]
while k != -1:
inGroup[k] = minGroup
k = nextGroup[k]
inGroup[multGroup[j]] = minGroup
#
# If it is a new group (no earlier groups), increment nGroups
#
if minGroup == nGroups:
nGroups += 1
for j in range(i+1):
firstGroup[j] = -1
for j in range(i, -1, -1):
nextGroup[j] = firstGroup[inGroup[j]]
firstGroup[inGroup[j]] = j
#
# Renumber to get rid of the numbers which were skipped
#
renumbered = np.zeros(nTargets, dtype='bool')
nTmp = nGroups
nGroups = 0
for i in range(nTargets):
if not renumbered[i]:
j = firstGroup[inGroup[i]]
while j != -1:
inGroup[j] = nGroups
renumbered[j] = True
j = nextGroup[j]
nGroups += 1
#
# Reset the values of firstGroup and inGroup
#
firstGroup[:] = -1
for i in range(nTargets-1, -1, -1):
nextGroup[i] = firstGroup[inGroup[i]]
firstGroup[inGroup[i]] = i
#
# Get the multiplicity
#
for i in range(nGroups):
multGroup[i] = 0
j = firstGroup[i]
while j != -1:
multGroup[i] += 1
j = nextGroup[j]
#
# Set attributes
#
self.nGroups = nGroups
self.nTargets = nTargets
self.inGroup = inGroup
self.multGroup = multGroup
self.firstGroup = firstGroup
self.nextGroup = nextGroup
return
def spheregroup(ra, dec, linklength, chunksize=None):
"""Perform friends-of-friends grouping given ra/dec coordinates.
Parameters
----------
ra, dec : :class:`numpy.ndarray`
Arrays of coordinates to group in decimal degrees.
linklength : :class:`float`
Linking length for the groups in decimal degrees.
chunksize : :class:`float`, optional
Break up the sphere into chunks of this size in decimal degrees.
Returns
-------
:func:`tuple`
A tuple containing the group number of each object, the multiplicity
of each group, the first member of each group, and the next
member of the group for each object.
Raises
------
PydlutilsException
If the array of coordinates only contains one point.
Notes
-----
It is important that `chunksize` >= 4 * `linklength`. This is enforced.
.. warning:: Behavior at the poles is not well tested.
"""
from warnings import warn
npoints = ra.size
if npoints == 1:
raise PydlutilsException("Cannot group only one point!")
#
# Define the chunksize
#
if chunksize is not None:
if chunksize < 4.0*linklength:
chunksize = 4.0*linklength
warn("chunksize changed to {0:.2f}.".format(chunksize), PydlutilsUserWarning)
else:
chunksize = max(4.0*linklength, 0.1)
#
# Initialize chunks
#
chunk = chunks(ra, dec, chunksize)
chunk.assign(ra, dec, linklength)
#
# Run friends-of-friends
#
ingroup, multgroup, firstgroup, nextgroup, ngroups = chunk.friendsoffriends(ra, dec, linklength)
#
# Renumber the groups in order of appearance
#
renumbered = np.zeros(npoints, dtype='bool')
iclump = 0
for i in range(npoints):
if not renumbered[i]:
j = firstgroup[ingroup[i]]
while j != -1:
ingroup[j] = iclump
renumbered[j] = True
j = nextgroup[j]
iclump += 1
#
# Reset the index lists
#
firstgroup[:] = -1
for i in range(npoints-1, -1, -1):
nextgroup[i] = firstgroup[ingroup[i]]
firstgroup[ingroup[i]] = i
#
# Reset the multiplicities
#
multgroup[:] = 0
for i in range(ngroups):
j = firstgroup[i]
while j != -1:
multgroup[i] += 1
j = nextgroup[j]
return (ingroup, multgroup, firstgroup, nextgroup)
def spherematch(ra1, dec1, ra2, dec2, matchlength, chunksize=None,
maxmatch=1):
"""Match points on a sphere.
Parameters
----------
ra1, dec1, ra2, dec2 : :class:`numpy.ndarray`
The sets of coordinates to match. Assumed to be in decimal degrees
matchlength : :class:`float`
Two points closer than this separation are matched. Assumed to be in decimal degrees.
chunksize : :class:`float`, optional
Value to pass to chunk assignment.
maxmatch : :class:`int`, optional
Allow up to `maxmatch` matches per coordinate. Default 1. If set to zero,
All possible matches will be returned.
Returns
-------
:func:`tuple`
A tuple containing the indices into the first set of points, the
indices into the second set of points and the match distance in
decimal degrees.
Notes
-----
If you have sets of coordinates that differ in size, call this function
with the larger list first. This exploits the inherent asymmetry in the
underlying code to reduce memory use.
.. warning:: Behavior at the poles is not well tested.
"""
from ..goddard.astro import gcirc
#
# Set default values
#
if chunksize is None:
chunksize = max(4.0*matchlength, 0.1)
#
# Check input size
#
if ra1.size == 1:
raise PydlutilsException("Change the order of the sets of coordinates!")
#
# Initialize chunks
#
chunk = chunks(ra1, dec1, chunksize)
chunk.assign(ra2, dec2, matchlength)
#
# Create return arrays
#
match1 = list()
match2 = list()
distance12 = list()
for i in range(ra1.size):
currra = np.fmod(ra1[i]+chunk.raOffset, 360.0)
rachunk, decchunk = chunk.get(currra, dec1[i])
jmax = len(chunk.chunkList[decchunk][rachunk])
if jmax > 0:
for j in range(jmax):
k = chunk.chunkList[decchunk][rachunk][j]
sep = gcirc(ra1[i], dec1[i], ra2[k], dec2[k], units=2)/3600.0
if sep < matchlength:
match1.append(i)
match2.append(k)
distance12.append(sep)
#
# Sort distances
#
omatch1 = np.array(match1)
omatch2 = np.array(match2)
odistance12 = np.array(distance12)
s = odistance12.argsort()
#
# Retain only desired matches
#
if maxmatch > 0:
gotten1 = np.zeros(ra1.size, dtype='i4')
gotten2 = np.zeros(ra2.size, dtype='i4')
nmatch = 0
for i in range(omatch1.size):
if (gotten1[omatch1[s[i]]] < maxmatch and
gotten2[omatch2[s[i]]] < maxmatch):
gotten1[omatch1[s[i]]] += 1
gotten2[omatch2[s[i]]] += 1
nmatch += 1
match1 = np.zeros(nmatch, dtype='i4')
match2 = np.zeros(nmatch, dtype='i4')
distance12 = np.zeros(nmatch, dtype='d')
gotten1[:] = 0
gotten2[:] = 0
nmatch = 0
for i in range(omatch1.size):
if (gotten1[omatch1[s[i]]] < maxmatch and
gotten2[omatch2[s[i]]] < maxmatch):
gotten1[omatch1[s[i]]] += 1
gotten2[omatch2[s[i]]] += 1
match1[nmatch] = omatch1[s[i]]
match2[nmatch] = omatch2[s[i]]
distance12[nmatch] = odistance12[s[i]]
nmatch += 1
else:
match1 = omatch1[s]
match2 = omatch2[s]
distance12 = odistance12[s]
return (match1, match2, distance12)
```
#### File: pydlutils/tests/test_misc.py
```python
from os import remove
import numpy as np
import tempfile
from astropy.tests.helper import raises
from .. import PydlutilsException
from ..misc import djs_laxisgen, djs_laxisnum, hogg_iau_name, struct_print
class TestMisc(object):
"""Test the functions in pydl.pydlutils.misc.
"""
def setup(self):
pass
def teardown(self):
pass
def test_djs_laxisgen(self):
#
# 1d
#
assert (np.arange(4, dtype='i4') == djs_laxisgen((4,))).all()
#
# 2d
#
l = np.array([[0, 0, 0, 0],
[1, 1, 1, 1],
[2, 2, 2, 2],
[3, 3, 3, 3]],
dtype='i4')
assert (l == djs_laxisgen((4, 4))).all()
assert (l.T == djs_laxisgen((4, 4), iaxis=1)).all()
with raises(ValueError):
foo = djs_laxisgen((4, 4), iaxis=2)
#
# 3d
#
l = np.zeros((4, 4, 4), dtype='i4')
l[1, :, :] = 1
l[2, :, :] = 2
l[3, :, :] = 3
assert (l == djs_laxisgen((4, 4, 4))).all()
assert (l.swapaxes(0, 1) == djs_laxisgen((4, 4, 4), iaxis=1)).all()
assert (l.swapaxes(0, 2) == djs_laxisgen((4, 4, 4), iaxis=2)).all()
with raises(ValueError):
foo = djs_laxisgen((4, 4, 4), iaxis=3)
#
# More d
#
with raises(ValueError):
foo = djs_laxisgen((4, 4, 4, 4))
def test_djs_laxisnum(self):
#
# 1d
#
assert (np.zeros((4,), dtype='i4') == djs_laxisnum((4,))).all()
#
# 2d
#
l = np.array([[0, 0, 0, 0],
[1, 1, 1, 1],
[2, 2, 2, 2],
[3, 3, 3, 3]],
dtype='i4')
assert (l == djs_laxisnum((4, 4))).all()
assert (l.T == djs_laxisnum((4, 4), iaxis=1)).all()
with raises(ValueError):
foo = djs_laxisnum((4, 4), iaxis=2)
#
# 3d
#
l = np.zeros((4, 4, 4), dtype='i4')
l[1, :, :] = 1
l[2, :, :] = 2
l[3, :, :] = 3
assert (l == djs_laxisnum((4, 4, 4))).all()
assert (l.swapaxes(0, 1) == djs_laxisnum((4, 4, 4), iaxis=1)).all()
assert (l.swapaxes(0, 2) == djs_laxisnum((4, 4, 4), iaxis=2)).all()
with raises(ValueError):
foo = djs_laxisnum((4, 4, 4), iaxis=3)
#
# More d
#
with raises(ValueError):
foo = djs_laxisnum((4, 4, 4, 4))
def test_hogg_iau_name(self):
assert (hogg_iau_name(354.120375, -0.544777778) ==
'SDSS J233628.89-003241.2')
assert (hogg_iau_name(354.120375, -0.544777778, prefix='2MASS') ==
'2MASS J233628.89-003241.2')
assert (hogg_iau_name(354.120375, -0.544777778, prefix='') ==
'J233628.89-003241.2')
assert (hogg_iau_name(354.120375, -0.544777778, precision=0) ==
'SDSS J233628.8-003241')
assert (hogg_iau_name(354.120375, -0.544777778, precision=2) ==
'SDSS J233628.890-003241.20')
ra = np.array([354.120375, 7.89439, 36.31915, 110.44730])
dec = np.array([-0.544777778, -0.35157, 0.47505, 39.35352])
names = hogg_iau_name(ra, dec)
assert tuple(names) == ('SDSS J233628.89-003241.2',
'SDSS J003134.65-002105.6',
'SDSS J022516.59+002830.1',
'SDSS J072147.35+392112.6')
def test_struct_print(self):
slist = np.zeros((5,), dtype=[('a', 'c16'), ('b', np.bool)])
with raises(PydlutilsException):
lines, css = struct_print(slist, silent=True)
slist = np.array([(1, 2.34, 'five'),
(2, 3.456, 'seven'),
(3, -4.5678, 'nine')],
dtype=[('a', 'i4'), ('bb', 'f4'), ('ccc', 'S5')])
lines, css = struct_print(slist, silent=True)
assert lines[0] == 'a bb ccc '
assert lines[1] == '- ------------ -----'
assert lines[2] == '1 2.34 five '
assert lines[3] == '2 3.456 seven'
assert lines[4] == '3 -4.5678 nine '
assert len(css) == 0
lines, css = struct_print(slist, silent=True, alias={'ccc': 'c'})
assert lines[0] == 'a bb c '
assert lines[1] == '- ------------ -----'
assert lines[2] == '1 2.34 five '
assert lines[3] == '2 3.456 seven'
assert lines[4] == '3 -4.5678 nine '
assert len(css) == 0
lines, css = struct_print(slist, silent=True,
formatcodes={'a': '{0:02d}'})
assert lines[0] == 'a bb ccc '
assert lines[1] == '-- ------------ -----'
assert lines[2] == '01 2.34 five '
assert lines[3] == '02 3.456 seven'
assert lines[4] == '03 -4.5678 nine '
assert len(css) == 0
lines, css = struct_print(slist, silent=True, fdigit=3)
assert lines[0] == 'a bb ccc '
assert lines[1] == '- ---------- -----'
assert lines[2] == '1 2.34 five '
assert lines[3] == '2 3.46 seven'
assert lines[4] == '3 -4.57 nine '
assert len(css) == 0
lines, css = struct_print(slist, silent=True, html=True)
assert lines[0] == '<table>'
assert lines[1] == '<tr><th>a</th><th>bb</th><th>ccc</th></tr>'
assert lines[2] == '<tr><td>1</td><td> 2.34</td><td>five </td></tr>'
assert lines[3] == '<tr><td>2</td><td> 3.456</td><td>seven</td></tr>'
assert lines[4] == '<tr><td>3</td><td> -4.5678</td><td>nine </td></tr>'
assert lines[5] == '</table>'
assert css[0] == '<style type="text/css">'
assert css[1] == 'table {'
assert css[2] == ' border-collapse: collapse;'
assert css[3] == '}'
assert css[4] == 'th {'
assert css[5] == ' padding: 2px;'
assert css[6] == ' text-align: right;'
assert css[7] == ' border: 1px solid black;'
assert css[8] == ' font-weight: bold;'
assert css[9] == '}'
assert css[10] == 'td {'
assert css[11] == ' padding: 2px;'
assert css[12] == ' text-align: right;'
assert css[13] == ' border: 1px solid black;'
assert css[14] == '}'
assert css[15] == '</style>'
slist = np.array([(1, 2.34, 'five'),
(2, 3.456, 'seven'),
(3, -4.5678, 'nine')],
dtype=[('a', 'i4'), ('bb', 'f8'), ('ccc', 'S5')])
lines, css = struct_print(slist, silent=True, ddigit=3)
assert lines[0] == 'a bb ccc '
assert lines[1] == '- ---------- -----'
assert lines[2] == '1 2.34 five '
assert lines[3] == '2 3.46 seven'
assert lines[4] == '3 -4.57 nine '
assert len(css) == 0
with tempfile.NamedTemporaryFile(delete=False) as spf1:
spf1_name = spf1.name
lines, css = struct_print(slist, silent=True,
filename=spf1_name)
with open(spf1_name, 'rb') as f:
data = f.read().decode('utf-8')
assert "\n".join(lines)+"\n" == data
remove(spf1_name)
with tempfile.TemporaryFile() as spf2:
lines, css = struct_print(slist, silent=True, filename=spf2)
spf2.seek(0)
data = spf2.read().decode('utf-8')
assert "\n".join(lines)+"\n" == data
```
#### File: pydl/tests/test_pydl.py
```python
import numpy as np
import glob
try:
from astropy.tests.compat import assert_allclose
except ImportError:
from numpy.testing.utils import assert_allclose
from astropy.tests.helper import raises
from astropy.utils.data import get_pkg_data_filename
from os.path import basename, dirname, join
from ..file_lines import file_lines
from ..median import median
from ..pcomp import pcomp
from ..rebin import rebin
from ..smooth import smooth
from ..uniq import uniq
class TestPydl(object):
"""Test the top-level pydl functions.
"""
def setup(self):
pass
def teardown(self):
pass
def test_file_lines(self):
#
# Find the test files
#
line_numbers = (1, 42, 137)
plainfiles = [get_pkg_data_filename('t/this-file-contains-{0:d}-lines.txt'.format(l)) for l in line_numbers]
gzfiles = [get_pkg_data_filename('t/this-file-contains-{0:d}-lines.txt.gz'.format(l)) for l in line_numbers]
for i, p in enumerate(plainfiles):
n = file_lines(p)
assert n == line_numbers[i]
for i, p in enumerate(gzfiles):
n = file_lines(p, compress=True)
assert n == line_numbers[i]
#
# Test list passing
#
n = file_lines(plainfiles)
assert tuple(n) == line_numbers
n = file_lines(gzfiles, compress=True)
assert tuple(n) == line_numbers
#
# Make sure empty files work
#
n = file_lines(get_pkg_data_filename('t/this-file-is-empty.txt'))
assert n == 0
def test_median(self):
odd_data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
dtype=np.float32)
even_data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=np.float32)
assert median(odd_data) == 7
assert median(odd_data, even=True) == 7
assert median(even_data) == 7
assert median(even_data, even=True) == 6.5
assert (median(odd_data, 3) == odd_data).all()
with raises(ValueError):
foo = median(np.ones((9, 9, 9)), 3)
odd_data2 = np.vstack((odd_data, odd_data, odd_data, odd_data, odd_data))
assert (median(odd_data2, 3) == odd_data2).all()
assert (median(odd_data2, axis=0) == odd_data).all()
assert (median(odd_data2, axis=1) ==
7*np.ones((odd_data2.shape[0],), dtype=odd_data2.dtype)).all()
def test_pcomp(self):
test_data_file = get_pkg_data_filename('t/pcomp_data.txt')
test_data = np.loadtxt(test_data_file, dtype='d', delimiter=',')
with raises(ValueError):
foo = pcomp(np.arange(10))
pcomp_data = test_data[0:20, :]
m = 4
n = 20
means = np.tile(pcomp_data.mean(0), n).reshape(pcomp_data.shape)
newarray = pcomp_data - means
foo = pcomp(newarray, covariance=True)
#
# This array is obtained from the IDL version of PCOMP.
# It is only accurate up to an overall sign on each column.
#
derived = test_data[20:40, :]
for k in range(m):
assert_allclose(abs(foo.derived[:, k]), abs(derived[:, k]), 1e-4)
coefficients = test_data[40:44, :]
coefficientsT = coefficients.T
for k in range(m):
assert_allclose(abs(foo.coefficients[:, k]),
abs(coefficientsT[:, k]),
1e-4)
eigenvalues = test_data[44, :]
assert_allclose(foo.eigenvalues, eigenvalues, 1e-4)
variance = test_data[45, :]
assert_allclose(foo.variance, variance, 1e-4)
#
# Test the standardization.
#
foo = pcomp(pcomp_data, standardize=True, covariance=True)
# for k in range(m):
# assert_allclose(abs(foo.derived[:, k]), abs(derived[:, k]), 1e-4)
# for k in range(m):
# assert_allclose(abs(foo.coefficients[:, k]),
# abs(coefficientsT[:, k]),
# 1e-4)
eigenvalues = test_data[46, :]
assert_allclose(foo.eigenvalues, eigenvalues, 1e-4)
variance = test_data[47, :]
assert_allclose(foo.variance, variance, 1e-4)
# assert_allclose(foo.derived[0, :], np.array([-1.64153312,
# -9.12322038,
# 1.41790708,
# -8.29359322]))
#
# Make sure correlation is working at least.
#
foo = pcomp(pcomp_data, standardize=True)
assert_allclose(foo.eigenvalues, np.array([2.84968632e+00,
1.00127640e+00,
1.48380121e-01,
6.57156222e-04]))
assert_allclose(foo.variance, np.array([7.12421581e-01,
2.50319100e-01,
3.70950302e-02,
1.64289056e-04]))
def test_rebin(self):
x = np.arange(40)
with raises(ValueError):
r = rebin(x, d=(10, 10))
with raises(ValueError):
r = rebin(x, d=(70,))
with raises(ValueError):
r = rebin(x, d=(30,))
x = np.array([[1.0, 2.0], [2.0, 3.0]])
rexpect = np.array([[1.0, 2.0], [1.5, 2.5], [2.0, 3.0], [2.0, 3.0]])
r = rebin(x, d=(4, 2))
assert np.allclose(r, rexpect)
rexpect = np.array([[1.0, 1.5, 2.0, 2.0], [2.0, 2.5, 3.0, 3.0]])
r = rebin(x, d=(2, 4))
assert np.allclose(r, rexpect)
rexpect = np.array([[1.0, 2.0], [1.0, 2.0], [2.0, 3.0], [2.0, 3.0]])
r = rebin(x, d=(4, 2), sample=True)
assert np.allclose(r, rexpect)
rexpect = np.array([[1.0, 1.0, 2.0, 2.0], [2.0, 2.0, 3.0, 3.0]])
r = rebin(x, d=(2, 4), sample=True)
assert np.allclose(r, rexpect)
x = np.arange(10)
rexpect = np.array([0.0, 2.0, 4.0, 6.0, 8.0])
r = rebin(x, d=(5,), sample=True)
assert np.allclose(r, rexpect)
x = np.array([[1.0, 2.0, 3.0, 4.0],
[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[4.0, 5.0, 6.0, 7.0]])
rexpect = np.array([[2.0, 4.0], [4.0, 6.0]])
r = rebin(x, d=(2, 2))
assert np.allclose(r, rexpect)
rexpect = np.array([[1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 4.5],
[3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 6.5]])
r = rebin(x, d=(2, 8))
assert np.allclose(r, rexpect)
def test_smooth(self):
test_data_file = get_pkg_data_filename('t/smooth_data.txt')
noise = np.loadtxt(test_data_file, dtype='d')
#
# Test smooth function
#
x = 8.0*np.arange(100)/100.0 - 4.0
y = np.sin(x) + 0.1*noise
s = smooth(y, 5)
assert s.shape == (100,)
s_edge = smooth(y, 5, True)
assert s_edge.shape == (100,)
s_w = smooth(y, 1)
assert (s_w == y).all()
def test_uniq(self):
items = np.array([1, 2, 3, 1, 5, 6, 1, 7, 3, 2, 5, 9, 11, 1])
items_sorted = np.sort(items)
items_argsorted = np.argsort(items)
#
# Test pre-sorted array.
#
u1 = uniq(items_sorted)
assert (u1 == np.array([3, 5, 7, 9, 10, 11, 12, 13])).all()
#
# Test arg-sorted array.
#
u2 = uniq(items, items_argsorted)
assert (u2 == np.array([13, 9, 8, 10, 5, 7, 11, 12])).all()
assert (items_sorted[u1] == items[u2]).all()
#
# Test degenerate case of all identical items.
#
identical_items = np.ones((10,), dtype=items.dtype)
u = uniq(identical_items)
assert (u == np.array([9])).all()
u = uniq(identical_items, np.arange(10, dtype=items.dtype))
assert (u == np.array([9])).all()
``` |
{
"source": "jhennies/pybdv",
"score": 2
} |
#### File: pybdv/pybdv/converter.py
```python
import os
import numpy as np
from warnings import warn
from concurrent import futures
from tqdm import tqdm
from .util import (blocking, open_file, get_key,
HDF5_EXTENSIONS, N5_EXTENSIONS, XML_EXTENSIONS)
from .metadata import (get_setup_ids, get_timeponts,
validate_affine, validate_attributes,
write_h5_metadata, write_xml_metadata, write_n5_metadata)
from .downsample import downsample
from .dtypes import convert_to_bdv_dtype, get_new_dtype
OVERWRITE_OPTIONS = ('skip', 'data', 'metadata', 'all')
def handle_setup_id(setup_id, xml_path, timepoint, overwrite, is_h5):
# check if we have this setup_id and timepoint already
have_timepoint = False
if os.path.exists(xml_path):
setup_ids = get_setup_ids(xml_path)
if setup_id is None:
setup_id = max(setup_ids) + 1
timepoints = []
else:
timepoints = get_timeponts(xml_path, setup_id)
else:
setup_ids = []
timepoints = []
if setup_id is None:
setup_id = 0
# note: have_timepoint implies have_setup
have_setup = setup_id in setup_ids
have_timepoint = timepoint in timepoints
if overwrite == 'skip':
overwrite_data_set, overwrite_meta_set = False, False
elif overwrite == 'data':
overwrite_data_set, overwrite_meta_set = True, False
elif overwrite == 'metadata':
overwrite_data_set, overwrite_meta_set = False, True
else:
overwrite_data_set, overwrite_meta_set = True, True
overwrite_data, overwrite_metadata, skip = False, False, False
# we have different scenarios for over-writing:
# 0: the setup id is not present, we can just write data and metadata
# 1: setup id and time-point are present and over-write mode is 'skip' -> skip this setup id
# 2: setup id and time-point are present and over-write mode is 'all' -> over-write data and metadata
# 3: setup id and time-point are present and over-write mode is 'data' -> over-write data, don't over-write metadata
# 4: setup id and time-point are present and over-write mode is 'metadata' -> don't over-write data,
# over-write metadata
# 5: setup id is present, timepoint is not present, over-write mode is 'skip' or 'data' -> write data,
# don't over-write metadata
# 6: setup id is present, timepoint is not present, over-write mode is 'metadata' or 'all' -> write data,
# over-write metadata
msg = None
# we have both the setup-id and the data for this timepoint
if have_timepoint:
msg = "Setup %i and timepoint %i are present;" % (setup_id, timepoint)
if (not overwrite_data_set) and (not overwrite_meta_set):
msg += " no action will be taken."
skip = True
if overwrite_data_set:
overwrite_data = True
msg += " will over-write data;"
if overwrite_meta_set:
overwrite_metadata = True
msg += " will over-write metadata;"
# we have this setup id already, but not yet the timepoint data
elif have_setup and not have_timepoint:
msg = "Setup %i is present;" % setup_id
if overwrite_meta_set:
overwrite_metadata = True
msg += " will over-write metadata"
else:
msg += "will not over-write metadata"
# otherwise, we don't need to change the defaults
# raise warning if data or metadata was found
if msg is not None:
warn(msg)
if is_h5 and setup_id >= 100:
raise ValueError("Only up to 100 set-ups are supported")
return setup_id, overwrite_data, overwrite_metadata, skip
def copy_dataset(input_path, input_key, output_path, output_key, is_h5,
convert_dtype=False, chunks=None, n_threads=1, overwrite=False):
with open_file(input_path, 'r') as f_in, open_file(output_path, 'a') as f_out:
ds_in = f_in[input_key]
shape = ds_in.shape
have_data = output_key in f_out
if have_data and not overwrite:
return True
# validate chunks
if chunks is None:
chunks_ = True if is_h5 else None
else:
chunks_ = tuple(min(ch, sh) for sh, ch in zip(shape, chunks))
if convert_dtype:
out_dtype = get_new_dtype(ds_in.dtype)
else:
out_dtype = ds_in.dtype
if overwrite:
del f_out[output_key]
# create the output dataset and get the effective chunks
ds_out = f_out.create_dataset(output_key, shape=shape, chunks=chunks_,
compression='gzip', dtype=out_dtype)
ds_chunks = ds_out.chunks
def copy_chunk(bb):
data = ds_in[bb]
# skip empty chunks
if data.sum() == 0:
return
if convert_dtype:
data = convert_to_bdv_dtype(data)
ds_out[bb] = data
print("Copy initial dataset from: %s:%s to %s:%s" % (input_path, input_key,
output_path, output_key))
blocks = list(blocking(shape, ds_chunks))
if n_threads > 1:
with futures.ThreadPoolExecutor(n_threads) as tp:
list(tqdm(tp.map(copy_chunk, blocks), total=len(blocks)))
else:
for bb in tqdm(blocks, total=len(blocks)):
copy_chunk(bb)
return False
def normalize_output_path(output_path):
# construct hdf5 output path and xml output path from output path
base_path, ext = os.path.splitext(output_path)
is_h5 = True
if ext == '':
data_path = output_path + '.h5'
xml_path = output_path + '.xml'
elif ext.lower() in HDF5_EXTENSIONS:
data_path = output_path
xml_path = base_path + '.xml'
elif ext.lower() in XML_EXTENSIONS:
data_path = base_path + '.h5'
xml_path = output_path
elif ext.lower() in N5_EXTENSIONS:
data_path = output_path
xml_path = base_path + '.xml'
is_h5 = False
else:
raise ValueError("File extension %s not supported" % ext)
return data_path, xml_path, is_h5
def make_scales(data_path, downscale_factors, downscale_mode,
ndim, setup_id, is_h5,
chunks=None, n_threads=1, timepoint=0, overwrite=False):
ds_modes = ('nearest', 'mean', 'max', 'min', 'interpolate')
if downscale_mode not in ds_modes:
raise ValueError("Invalid downscale mode %s, choose one of %s" % downscale_mode, str(ds_modes))
if not all(isinstance(factor, (int, tuple, list)) for factor in downscale_factors):
raise ValueError("Invalid downscale factor")
if not all(len(factor) == 3 for factor in downscale_factors
if isinstance(factor, (tuple, list))):
raise ValueError("Invalid downscale factor")
# normalize all factors to be tuple or list
factors = [ndim*[factor] if isinstance(factor, int) else factor
for factor in downscale_factors]
# run single downsampling stages
for scale, factor in enumerate(factors):
in_key = get_key(is_h5, timepoint=timepoint, setup_id=setup_id, scale=scale)
out_key = get_key(is_h5, timepoint=timepoint, setup_id=setup_id, scale=scale + 1)
print("Downsample scale %i / %i" % (scale + 1, len(factors)))
downsample(data_path, in_key, out_key, factor, downscale_mode, n_threads,
overwrite=overwrite)
# add first level to factors
factors = [[1, 1, 1]] + factors
return factors
def convert_to_bdv(input_path, input_key, output_path,
downscale_factors=None, downscale_mode='nearest',
resolution=[1., 1., 1.], unit='pixel',
setup_id=None, timepoint=0,
setup_name=None, affine=None, attributes={'channel': {'id': None}},
overwrite='skip', convert_dtype=None, chunks=None, n_threads=1):
""" Convert hdf5 volume to BigDatViewer format.
Optionally downscale the input volume and write it to BigDataViewer scale pyramid.
Note that the default axis conventions of numpy and the native BDV implementation are
different. Numpy uses C-axis order, BDV uses F-axis order. Hence the shape of the
input data (Z,Y,X) will be stored as (X,Y,Z) in the metada. This also applies
to the values for the parameters resolution and downscale_factors: they need
to be passed as (Z,Y,X) and will be stored as (X,Y,Z).
Arguments:
input_path (str): path to hdf5 input volume
input_key (str): path in hdf5 input file
output_path (str): output path to bdv file
downscale_factors (tuple or list): factors tused to create multi-scale pyramid.
The factors need to be specified per dimension and are interpreted relative to the previous factor.
If no argument is passed, pybdv does not create a multi-scale pyramid. (default: None)
downscale_mode (str): mode used for downscaling.
Can be 'mean', 'max', 'min', 'nearest' or 'interpolate' (default:'nerarest').
resolution(list or tuple): resolution of the data
unit (str): unit of measurement
setup_id (int): id of this view set-up. By default, the next free id is chosen (default: None).
timepoint (int): time point id to write (default: 0)
setup_name (str): name of this view set-up (default: None)
affine (list[float] or dict[str, list[float]]): affine view transformation(s) for this setup.
Can either be a list for a single transformation or a dictionary for multiple transformations.
Each transformation needs to be given in the bdv convention, i.e. using XYZ axis convention
unlike the other parameters of pybdv, that expect ZYX axis convention. (default: None)
attributes (dict[str, dict]): attributes associated with the view setups. Expects a dictionary
which maps the attribute anmes to their settings (also dict).
The setting dictionaries must contain the entry id is None.
If this entry's value is None, it will be set to the current highest id + 1.
(default: {'channel': {'id': None}})
overwrite (str): whether to over-write or skip existing data and/or metadta. Can be one of
- 'skip': don't over-write data or metadata
- 'data': over-write data, don't over-write metadata
- 'metadata': don't over-write data, over-write metadata
- 'all': over-write both data and metadta
(default: 'skip')
convert_dtype (bool): convert the datatype to value range that is compatible with BigDataViewer.
This will map unsigned types to signed and fail if the value range is too large. (default: None)
chunks (tuple): chunks for the output dataset.
By default the h5py auto chunks are used (default: None)
n_threads (int): number of chunks used for copying and downscaling (default: 1)
"""
# validate input data arguments
if not os.path.exists(input_path):
raise ValueError("Input file %s does not exist" % input_path)
with open_file(input_path, 'r') as f:
if input_key not in f:
raise ValueError("%s not in %s" % (input_key, input_path))
shape = f[input_key].shape
ndim = len(shape)
if ndim != 3 or len(resolution) != ndim:
raise ValueError("Invalid input dimensionality")
if affine is not None:
validate_affine(affine)
# validate over-write
if overwrite not in OVERWRITE_OPTIONS:
raise ValueError("Invalid overwrite mode %s, expected one of %s" % (overwrite,
', '.join(OVERWRITE_OPTIONS)))
data_path, xml_path, is_h5 = normalize_output_path(output_path)
setup_id, overwrite_data, overwrite_metadata, skip = handle_setup_id(setup_id,
xml_path,
timepoint,
overwrite,
is_h5)
if skip:
return
# validate the attributes
# if overwrite_data or overwrite_metadata was set, we do not enforce consistency of the attributes
enforce_consistency = not (overwrite_data or overwrite_metadata)
attributes_ = validate_attributes(xml_path, attributes, setup_id, enforce_consistency)
# we need to convert the dtype only for the hdf5 based storage
if convert_dtype is None:
convert_dtype = is_h5
# copy the initial dataset
base_key = get_key(is_h5, timepoint=timepoint, setup_id=setup_id, scale=0)
skip_downscaling = copy_dataset(input_path, input_key,
data_path, base_key, is_h5, convert_dtype=convert_dtype,
chunks=chunks, n_threads=n_threads, overwrite=overwrite_data)
# downsample if needed
if downscale_factors is None:
# set single level downscale factor
factors = [[1, 1, 1]]
elif skip_downscaling:
factors = [[1, 1, 1]] + list(downscale_factors)
else:
factors = make_scales(data_path, downscale_factors, downscale_mode,
ndim, setup_id, is_h5,
n_threads=n_threads, chunks=chunks, timepoint=timepoint,
overwrite=overwrite_data)
# write the format specific metadata in the output container
if is_h5:
write_h5_metadata(data_path, factors, setup_id, timepoint,
overwrite=overwrite_data)
else:
write_n5_metadata(data_path, factors, resolution, setup_id, timepoint,
overwrite=overwrite_data)
# write bdv xml metadata
write_xml_metadata(xml_path, data_path, unit,
resolution, is_h5,
setup_id=setup_id,
timepoint=timepoint,
setup_name=setup_name,
affine=affine,
attributes=attributes_,
overwrite=overwrite_metadata,
overwrite_data=overwrite_data,
enforce_consistency=enforce_consistency)
def write_initial_dataset(data_path, setup_id, timepoint, data, chunks,
is_h5, overwrite, n_threads):
base_key = get_key(is_h5, timepoint=timepoint, setup_id=setup_id, scale=0)
with open_file(data_path, 'a') as f:
have_data = base_key in f
if have_data and not overwrite:
return True
# need to remove the previous data-set if we over-write
if have_data and overwrite:
del f[base_key]
ds = f.create_dataset(base_key, shape=data.shape, compression='gzip',
chunks=chunks, dtype=data.dtype)
# if we have z5py, this will trigger multi-threaded write (otherwise no effect)
ds.n_threads = n_threads
ds[:] = data
return False
def make_bdv(data, output_path,
downscale_factors=None, downscale_mode='nearest',
resolution=[1., 1., 1.], unit='pixel',
setup_id=None, timepoint=0, setup_name=None,
affine=None, attributes={'channel': {'id': None}},
overwrite='skip', convert_dtype=None, chunks=None, n_threads=1):
""" Write data in BigDatViewer file format for one view setup and timepoint.
Optionally downscale the input volume and write it to BigDataViewer scale pyramid.
Note that the default axis conventions of numpy and the native BDV implementation are
different. Numpy uses C-axis order, BDV uses F-axis order. Hence the shape of the
input data (Z,Y,X) will be stored as (X,Y,Z) in the metada. This also applies
to the values for the parameters resolution and downscale_factors: they need
to be passed as (Z,Y,X) and will be stored as (X,Y,Z).
Arguments:
data (np.ndarray): input data
output_path (str): output path to bdv file
downscale_factors (tuple or list): factors tused to create multi-scale pyramid.
The factors need to be specified per dimension and are interpreted relative to the previous factor.
If no argument is passed, pybdv does not create a multi-scale pyramid. (default: None)
downscale_mode (str): mode used for downscaling.
Can be 'mean', 'max', 'min', 'nearest' or 'interpolate' (default:'nerarest').
resolution(list or tuple): resolution of the data
unit (str): unit of measurement
setup_id (int): id of this view set-up. By default, the next free id is chosen (default: None).
timepoint (int): time point id to write (default: 0)
setup_name (str): name of this view set-up (default: None)
affine (list[float] or dict[str, list[float]]): affine view transformation(s) for this setup.
Can either be a list for a single transformation or a dictionary for multiple transformations.
Each transformation needs to be given in the bdv convention, i.e. using XYZ axis convention
unlike the other parameters of pybdv, that expect ZYX axis convention. (default: None)
attributes (dict[str, dict]): attributes associated with the view setups. Expects a dictionary
which maps the attribute anmes to their settings (also dict).
The setting dictionaries must contain the entry id is None.
If this entry's value is None, it will be set to the current highest id + 1.
(default: {'channel': {'id': None}})
overwrite (str): whether to over-write or skip existing data and/or metadta. Can be one of
- 'skip': don't over-write data or metadata
- 'data': over-write data, don't over-write metadata
- 'metadata': don't over-write data, over-write metadata
- 'all': over-write both data and metadta
(default: 'skip')
convert_dtype (bool): convert the datatype to value range that is compatible with BigDataViewer.
This will map unsigned types to signed and fail if the value range is too large. (default: None)
chunks (tuple): chunks for the output dataset.
By default the h5py auto chunks are used (default: None)
n_threads (int): number of chunks used for writing and downscaling (default: 1)
"""
# validate input arguments
if not isinstance(data, np.ndarray):
raise ValueError("Input needs to be numpy array, got %s" % type(data))
ndim = data.ndim
if ndim != 3 or len(resolution) != ndim:
raise ValueError("Invalid input dimensionality")
if affine is not None:
validate_affine(affine)
data_path, xml_path, is_h5 = normalize_output_path(output_path)
setup_id, overwrite_data, overwrite_metadata, skip = handle_setup_id(setup_id,
xml_path,
timepoint,
overwrite,
is_h5)
if skip:
return
# validate the attributes
# if overwrite_data or overwrite_metadata was set, we do not enforce consistency of the attributes
enforce_consistency = not (overwrite_data or overwrite_metadata)
attributes_ = validate_attributes(xml_path, attributes, setup_id, enforce_consistency)
# we need to convert the dtype only for the hdf5 based storage
if convert_dtype is None:
convert_dtype = is_h5
if convert_dtype:
data = convert_to_bdv_dtype(data)
# set proper chunks
if chunks is None:
chunks_ = True if is_h5 else None
else:
chunks_ = tuple(min(ch, sh) for sh, ch in zip(data.shape, chunks))
# write initial dataset
skip_downscaling = write_initial_dataset(data_path, setup_id, timepoint, data, chunks_,
is_h5, overwrite_data, n_threads)
# downsample if needed
if downscale_factors is None:
# set single level downscale factor
factors = [[1, 1, 1]]
elif skip_downscaling:
factors = [[1, 1, 1]] + list(downscale_factors)
else:
factors = make_scales(data_path, downscale_factors, downscale_mode,
ndim, setup_id, is_h5,
n_threads=n_threads, chunks=chunks, timepoint=timepoint,
overwrite=overwrite_data)
# write the format specific metadata in the output container
if is_h5:
write_h5_metadata(data_path, factors, setup_id, timepoint,
overwrite=overwrite_data)
else:
write_n5_metadata(data_path, factors, resolution, setup_id, timepoint,
overwrite=overwrite_data)
# write bdv xml metadata
write_xml_metadata(xml_path, data_path, unit,
resolution, is_h5,
setup_id=setup_id,
timepoint=timepoint,
setup_name=setup_name,
affine=affine,
attributes=attributes_,
overwrite=overwrite_metadata,
overwrite_data=overwrite_data,
enforce_consistency=enforce_consistency)
``` |
{
"source": "jhenriquetdg/devablog",
"score": 4
} |
#### File: site-packages/guess_language/__main__.py
```python
import argparse
import locale
import os
import sys
import guess_language.console_mode #@UnusedImport
def parse_args():
parser = argparse.ArgumentParser(
description=__doc__.strip(),
prog="{} -m {}".format(os.path.basename(sys.executable),
"guess_language")
)
parser.add_argument("file",
help="plain text file or “-” for stdin")
parser.add_argument("-c", "--encoding",
help="input encoding")
parser.add_argument("--disable-enchant", dest="use_enchant",
action="store_false",
help="disable enchant")
return parser.parse_args()
def main():
args = parse_args()
if args.file == "-":
file = sys.stdin.fileno()
encoding = args.encoding or (
sys.stdin.encoding if sys.stdin.isatty()
else locale.getpreferredencoding()
)
else:
file = args.file
encoding = args.encoding or "utf-8"
with open(file, encoding=encoding) as f:
text = "".join(f.readlines())
if not args.use_enchant:
guess_language.use_enchant(False)
tag = guess_language.guess_language(text)
print(tag)
return 0 if tag else 1
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "jhenriquezs/django-cqrs",
"score": 2
} |
#### File: django-cqrs/dj_cqrs/mixins.py
```python
import logging
from dj_cqrs.constants import (
ALL_BASIC_FIELDS,
FIELDS_TRACKER_FIELD_NAME,
TRACKED_FIELDS_ATTR_NAME,
)
from dj_cqrs.managers import MasterManager, ReplicaManager
from dj_cqrs.metas import MasterMeta, ReplicaMeta
from dj_cqrs.signals import MasterSignals, post_bulk_create, post_update
from django.conf import settings
from django.db import router, transaction
from django.db.models import DateField, DateTimeField, F, IntegerField, Manager, Model
from django.db.models.expressions import CombinedExpression
from django.utils.module_loading import import_string
logger = logging.getLogger('django-cqrs')
class RawMasterMixin(Model):
"""Base class for MasterMixin. **Users shouldn't use this
class directly.**"""
CQRS_ID = None
"""Unique CQRS identifier for all microservices."""
CQRS_PRODUCE = True
"""If false, no cqrs data is sent through the transport."""
CQRS_FIELDS = ALL_BASIC_FIELDS
"""
List of fields to include in the CQRS payload.
You can also set the fields attribute to the special value '__all__'
to indicate that all fields in the model should be used.
"""
CQRS_SERIALIZER = None
"""
Optional serializer used to create the instance representation.
Must be expressed as a module dotted path string like
`mymodule.serializers.MasterModelSerializer`.
"""
CQRS_TRACKED_FIELDS = None
"""
List of fields of the main model for which you want to track the changes
and send the previous values via transport. You can also set the field
attribute to the special value "__all__" to indicate that all fields in
the model must be used.
"""
objects = Manager()
cqrs = MasterManager()
"""Manager that adds needed CQRS queryset methods."""
cqrs_revision = IntegerField(
default=0, help_text="This field must be incremented on any model update. "
"It's used to for CQRS sync.",
)
cqrs_updated = DateTimeField(
auto_now=True, help_text="This field must be incremented on every model update. "
"It's used to for CQRS sync.",
)
class Meta:
abstract = True
@property
def cqrs_saves_count(self):
"""Shows how many times this instance has been saved within the transaction."""
return getattr(self, '_cqrs_saves_count', 0)
@property
def is_initial_cqrs_save(self):
"""This flag is used to check if instance has already been registered for CQRS update."""
return self.cqrs_saves_count < 2
def reset_cqrs_saves_count(self):
"""This method is used to automatically reset instance CQRS counters on transaction commit.
But this can also be used to control custom behaviour within transaction
or in case of rollback,
when several sequential transactions are used to change the same instance.
"""
if hasattr(self, '_cqrs_saves_count'):
self._cqrs_saves_count = 0
def save(self, *args, **kwargs):
update_fields = kwargs.pop('update_fields', None)
update_cqrs_fields = kwargs.pop('update_cqrs_fields', self._update_cqrs_fields_default)
using = kwargs.get('using') or router.db_for_write(self.__class__, instance=self)
connection = transaction.get_connection(using)
if connection.in_atomic_block:
_cqrs_saves_count = self.cqrs_saves_count
self._cqrs_saves_count = _cqrs_saves_count + 1
else:
self.reset_cqrs_saves_count()
if (not update_fields) and self.is_initial_cqrs_save and (not self._state.adding):
self.cqrs_revision = F('cqrs_revision') + 1
elif update_fields and update_cqrs_fields:
self.cqrs_revision = F('cqrs_revision') + 1
update_fields = set(update_fields)
update_fields.update({'cqrs_revision', 'cqrs_updated'})
kwargs['update_fields'] = update_fields
self.save_tracked_fields()
return super(RawMasterMixin, self).save(*args, **kwargs)
def save_tracked_fields(self):
if hasattr(self, FIELDS_TRACKER_FIELD_NAME):
tracker = getattr(self, FIELDS_TRACKER_FIELD_NAME)
if self.is_initial_cqrs_save:
if self._state.adding:
data = tracker.changed_initial()
else:
data = tracker.changed()
setattr(self, TRACKED_FIELDS_ATTR_NAME, data)
@property
def _update_cqrs_fields_default(self):
return settings.CQRS['master']['CQRS_AUTO_UPDATE_FIELDS']
def to_cqrs_dict(self, using=None, sync=False):
"""CQRS serialization for transport payload.
:param using: The using argument can be used to force the database
to use, defaults to None
:type using: str, optional
:type sync: bool, optional
:return: The serialized instance data.
:rtype: dict
"""
if self.CQRS_SERIALIZER:
data = self._class_serialization(using, sync=sync)
else:
self._refresh_f_expr_values(using)
data = self._common_serialization(using)
return data
def get_tracked_fields_data(self):
"""CQRS serialization for tracked fields to include
in the transport payload.
:return: Previous values for tracked fields.
:rtype: dict
"""
return getattr(self, TRACKED_FIELDS_ATTR_NAME, None)
def cqrs_sync(self, using=None, queue=None):
"""Manual instance synchronization.
:param using: The using argument can be used to force the database
to use, defaults to None
:type using: str, optional
:param queue: Syncing can be executed just for a single queue, defaults to None
(all queues)
:type queue: str, optional
:return: True if instance can be synced, False otherwise.
:rtype: bool
"""
if self._state.adding:
return False
if not self.CQRS_SERIALIZER:
try:
self.refresh_from_db()
except self._meta.model.DoesNotExist:
return False
MasterSignals.post_save(
self._meta.model, instance=self, using=using, queue=queue, sync=True,
)
return True
def is_sync_instance(self):
"""
This method can be overridden to apply syncing only to instances by some rules.
For example, only objects with special status or after some creation date, etc.
:return: True if this instance needs to be synced, False otherwise
:rtype: bool
"""
return True
@classmethod
def relate_cqrs_serialization(cls, queryset):
"""
This method shoud be overriden to optimize database access
for example using `select_related` and `prefetch_related`
when related models must be included into the master model
representation.
:param queryset: The initial queryset.
:type queryset: django.db.models.QuerySet
:return: The optimized queryset.
:rtype: django.db.models.QuerySet
"""
return queryset
def get_custom_cqrs_delete_data(self):
""" This method should be overridden when additional data is needed in DELETE payload. """
pass
@classmethod
def call_post_bulk_create(cls, instances, using=None):
""" Post bulk create signal caller (django doesn't support it by default).
.. code-block:: python
# Used automatically by cqrs.bulk_create()
instances = model.cqrs.bulk_create(instances)
"""
post_bulk_create.send(cls, instances=instances, using=using)
@classmethod
def call_post_update(cls, instances, using=None):
""" Post bulk update signal caller (django doesn't support it by default).
.. code-block:: python
# Used automatically by cqrs.bulk_update()
qs = model.objects.filter(k1=v1)
model.cqrs.bulk_update(qs, k2=v2)
"""
post_update.send(cls, instances=instances, using=using)
def _common_serialization(self, using):
opts = self._meta
if isinstance(self.CQRS_FIELDS, str) and self.CQRS_FIELDS == ALL_BASIC_FIELDS:
included_fields = None
else:
included_fields = self.CQRS_FIELDS
data = {}
for f in opts.fields:
if included_fields and (f.name not in included_fields):
continue
value = f.value_from_object(self)
if value is not None and isinstance(f, (DateField, DateTimeField)):
value = str(value)
data[f.name] = value
# We need to include additional fields for synchronisation, f.e. to prevent de-duplication
data['cqrs_revision'] = self.cqrs_revision
data['cqrs_updated'] = str(self.cqrs_updated)
return data
def _class_serialization(self, using, sync=False):
if sync:
instance = self
else:
db = using if using is not None else self._state.db
qs = self.__class__._default_manager.using(db).filter(pk=self.pk)
instance = self.relate_cqrs_serialization(qs).first()
if not instance:
raise RuntimeError("Couldn't serialize CQRS class ({0}).".format(self.CQRS_ID))
data = self._cqrs_serializer_cls(instance).data
data['cqrs_revision'] = instance.cqrs_revision
data['cqrs_updated'] = str(instance.cqrs_updated)
return data
def _refresh_f_expr_values(self, using):
opts = self._meta
fields_to_refresh = []
if isinstance(self.cqrs_revision, CombinedExpression):
fields_to_refresh.append('cqrs_revision')
if isinstance(self.CQRS_FIELDS, str) and self.CQRS_FIELDS == ALL_BASIC_FIELDS:
included_fields = None
else:
included_fields = self.CQRS_FIELDS
for f in opts.fields:
if included_fields and (f.name not in included_fields):
continue
value = f.value_from_object(self)
if value is not None and isinstance(value, CombinedExpression):
fields_to_refresh.append(f.name)
if fields_to_refresh:
self.refresh_from_db(fields=fields_to_refresh)
@property
def _cqrs_serializer_cls(self):
""" Serialization class loader. """
if hasattr(self.__class__, '_cqrs_serializer_class'):
return self.__class__._cqrs_serializer_class
try:
serializer = import_string(self.CQRS_SERIALIZER)
self.__class__._cqrs_serializer_class = serializer
return serializer
except ImportError:
raise ImportError(
"Model {0}: CQRS_SERIALIZER can't be imported.".format(self.__class__),
)
class MasterMixin(RawMasterMixin, metaclass=MasterMeta):
"""
Mixin for the master CQRS model, that will send data updates to it's replicas.
"""
class Meta:
abstract = True
class ReplicaMixin(Model, metaclass=ReplicaMeta):
"""
Mixin for the replica CQRS model, that will receive data updates from master. Models, using
this mixin should be readonly, but this is not enforced (f.e. for admin).
"""
CQRS_ID = None
"""Unique CQRS identifier for all microservices."""
CQRS_MAPPING = None
"""Mapping of master data field name to replica model field name."""
CQRS_CUSTOM_SERIALIZATION = False
"""Set it to True to skip default data check."""
CQRS_SELECT_FOR_UPDATE = False
"""Set it to True to acquire lock on instance creation/update."""
CQRS_NO_DB_OPERATIONS = False
"""Set it to True to disable any default DB operations for this model."""
objects = Manager()
cqrs = ReplicaManager()
"""Manager that adds needed CQRS queryset methods."""
cqrs_revision = IntegerField()
cqrs_updated = DateTimeField()
class Meta:
abstract = True
@classmethod
def cqrs_save(cls, master_data, previous_data=None, sync=False):
""" This method saves (creates or updates) model instance from CQRS master instance data.
This method must not be overridden. Otherwise, sync checks need to be implemented manually.
:param dict master_data: CQRS master instance data.
:param dict previous_data: Previous values for tracked fields.
:param bool sync: Sync package flag.
:return: Model instance.
:rtype: django.db.models.Model
"""
if cls.CQRS_NO_DB_OPERATIONS:
raise NotImplementedError
return cls.cqrs.save_instance(master_data, previous_data, sync)
@classmethod
def cqrs_create(cls, sync, mapped_data, previous_data=None):
""" This method creates model instance from CQRS mapped instance data. It must be overridden
by replicas of master models with custom serialization.
:param bool sync: Sync package flag.
:param dict mapped_data: CQRS mapped instance data.
:param dict previous_data: Previous mapped values for tracked fields.
:return: Model instance.
:rtype: django.db.models.Model
"""
return cls._default_manager.create(**mapped_data)
def cqrs_update(self, sync, mapped_data, previous_data=None):
""" This method updates model instance from CQRS mapped instance data. It must be overridden
by replicas of master models with custom serialization.
:param bool sync: Sync package flag.
:param dict mapped_data: CQRS mapped instance data.
:param dict previous_data: Previous mapped values for tracked fields.
:return: Model instance.
:rtype: django.db.models.Model
"""
for key, value in mapped_data.items():
setattr(self, key, value)
self.save()
return self
@classmethod
def cqrs_delete(cls, master_data):
""" This method deletes model instance from mapped CQRS master instance data.
:param dict master_data: CQRS master instance data.
:return: Flag, if delete operation is successful (even if nothing was deleted).
:rtype: bool
"""
if cls.CQRS_NO_DB_OPERATIONS:
raise NotImplementedError
return cls.cqrs.delete_instance(master_data)
@staticmethod
def should_retry_cqrs(current_retry, exception=None):
"""Checks if we should retry the message after current attempt.
:param current_retry: Current number of message retries.
:type current_retry: int
:param exception: Exception instance raised during message consume.
:type exception: Exception, optional
:return: True if message should be retried, False otherwise.
:rtype: bool
"""
max_retries = settings.CQRS['replica']['CQRS_MAX_RETRIES']
if max_retries is None:
# Infinite
return True
return current_retry < max_retries
@staticmethod
def get_cqrs_retry_delay(current_retry):
"""Returns number of seconds to wait before requeuing the message.
:param current_retry: Current number of message retries.
:type current_retry: int
:return: Delay in seconds.
:rtype: int
"""
return settings.CQRS['replica']['CQRS_RETRY_DELAY']
``` |
{
"source": "Jhenry019/cit-blockchain",
"score": 3
} |
#### File: cit-blockchain/student_records/models.py
```python
from django.db import models
class StudentRecord(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
address = models.CharField(max_length=100)
dob = models.CharField(max_length=50)
phone = models.CharField(max_length=50)
email = models.CharField(max_length=100)
time_pref = models.CharField(max_length=50, default="Day class")
empl_status = models.TextField(default="None")
commitments = models.TextField(default="None")
problem_solving = models.TextField(default="None")
data_bg = models.TextField(default="None")
mean_med_mode = models.IntegerField(default=1)
programming_exp = models.IntegerField(default=1)
sql_exp = models.IntegerField(default=1)
job_goal = models.CharField(max_length=100, default="Other")
events = models.TextField(default="None")
other = models.TextField(default="None")
how_find_codeit = models.TextField(default="None")
has_laptop = models.CharField(max_length=20, default="Yes")
operating_sys = models.CharField(max_length=100, default="Windows")
bio = models.TextField(default=f"Enter a bio")
def __str__(self):
return f"{self.first_name} {self.last_name}"
``` |
{
"source": "jhenry82/salt",
"score": 3
} |
#### File: salt/utils/network.py
```python
import socket
import subprocess
import re
import logging
import os
from string import ascii_letters, digits
# Attempt to import wmi
try:
import wmi
import salt.utils.winapi
except ImportError:
pass
# Import salt libs
import salt.utils
log = logging.getLogger(__name__)
# pylint: disable=C0103
def sanitize_host(host):
'''
Sanitize host string.
'''
return ''.join([
c for c in host[0:255] if c in (ascii_letters + digits + '.-')
])
def isportopen(host, port):
'''
Return status of a port
CLI Example:
.. code-block:: bash
salt '*' network.isportopen 127.0.0.1 22
'''
if not 1 <= int(port) <= 65535:
return False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
out = sock.connect_ex((sanitize_host(host), int(port)))
return out
def host_to_ip(host):
'''
Returns the IP address of a given hostname
CLI Example:
.. code-block:: bash
salt '*' network.host_to_ip example.com
'''
try:
family, socktype, proto, canonname, sockaddr = socket.getaddrinfo(
host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM)[0]
if family == socket.AF_INET:
ip, port = sockaddr
elif family == socket.AF_INET6:
ip, port, flow_info, scope_id = sockaddr
except Exception:
ip = None
return ip
def _filter_localhost_names(name_list):
'''
Returns list without local hostnames and ip addresses.
'''
h = []
re_filters = [
'localhost.*',
'ip6-.*',
'127.*',
r'0\.0\.0\.0',
'::1.*',
'fe00::.*',
'fe02::.*',
]
for name in name_list:
filtered = False
for f in re_filters:
if re.match(f, name):
filtered = True
break
if not filtered:
h.append(name)
return h
def _sort_hostnames(hostname_list):
'''
sort minion ids favoring in order of:
- FQDN
- public ipaddress
- localhost alias
- private ipaddress
'''
# punish matches in order of preference
punish = [
'localhost.localdomain',
'localhost.my.domain',
'localhost4.localdomain4',
'localhost',
'ip6-localhost',
'ip6-loopback',
'127.0.2.1',
'127.0.1.1',
'127.0.0.1',
'0.0.0.0',
'::1',
'fe00::',
'fe02::',
]
def _cmp_hostname(a, b):
# should never have a space in hostname
if ' ' in a:
return 1
if ' ' in b:
return -1
# punish localhost list
if a in punish:
if b in punish:
return punish.index(a) - punish.index(b)
return 1
if b in punish:
return -1
# punish ipv6
if ':' in a or ':' in b:
return a.count(':') - b.count(':')
# punish ipv4
a_is_ipv4 = a.count('.') == 3 and not any(c.isalpha() for c in a)
b_is_ipv4 = b.count('.') == 3 and not any(c.isalpha() for c in b)
if a_is_ipv4 and a.startswith('127.'):
return 1
if b_is_ipv4 and b.startswith('127.'):
return -1
if a_is_ipv4 and not b_is_ipv4:
return 1
if a_is_ipv4 and b_is_ipv4:
return 0
if not a_is_ipv4 and b_is_ipv4:
return -1
# favor hosts with more dots
diff = b.count('.') - a.count('.')
if diff != 0:
return diff
# favor longest fqdn
return len(b) - len(a)
return sorted(hostname_list, cmp=_cmp_hostname)
def get_hostnames():
'''
Get list of hostnames using multiple strategies
'''
h = []
h.append(socket.gethostname())
h.append(socket.getfqdn())
# try socket.getaddrinfo
try:
addrinfo = socket.getaddrinfo(
socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM,
socket.SOL_TCP, socket.AI_CANONNAME
)
for info in addrinfo:
# info struct [family, socktype, proto, canonname, sockaddr]
if len(info) >= 4:
h.append(info[3])
except socket.gaierror:
pass
# try /etc/hostname
try:
name = ''
with salt.utils.fopen('/etc/hostname') as hfl:
name = hfl.read()
h.append(name)
except (IOError, OSError):
pass
# try /etc/hosts
try:
with salt.utils.fopen('/etc/hosts') as hfl:
for line in hfl:
names = line.split()
try:
ip = names.pop(0)
except IndexError:
continue
if ip.startswith('127.') or ip == '::1':
for name in names:
h.append(name)
except (IOError, OSError):
pass
# try windows hosts
if salt.utils.is_windows():
try:
windir = os.getenv('WINDIR')
with salt.utils.fopen(windir + r'\system32\drivers\etc\hosts') as hfl:
for line in hfl:
# skip commented or blank lines
if line[0] == '#' or len(line) <= 1:
continue
# process lines looking for '127.' in first column
try:
entry = line.split()
if entry[0].startswith('127.'):
for name in entry[1:]: # try each name in the row
h.append(name)
except IndexError:
pass # could not split line (malformed entry?)
except (IOError, OSError):
pass
# strip spaces and ignore empty strings
hosts = []
for name in h:
name = name.strip()
if len(name) > 0:
hosts.append(name)
# remove duplicates
hosts = list(set(hosts))
return hosts
def generate_minion_id():
'''
Returns a minion id after checking multiple sources for a FQDN.
If no FQDN is found you may get an ip address
CLI Example:
.. code-block:: bash
salt '*' network.generate_minion_id
'''
possible_ids = get_hostnames()
ip_addresses = [IPv4Address(addr) for addr
in salt.utils.network.ip_addrs(include_loopback=True)
if not addr.startswith('127.')]
# include public and private ipaddresses
for addr in ip_addresses:
possible_ids.append(str(addr))
possible_ids = _filter_localhost_names(possible_ids)
# if no minion id
if len(possible_ids) == 0:
return 'noname'
hosts = _sort_hostnames(possible_ids)
return hosts[0]
def get_fqhostname():
'''
Returns the fully qualified hostname
CLI Example:
.. code-block:: bash
salt '*' network.get_fqhostname
'''
l = []
l.append(socket.getfqdn())
# try socket.getaddrinfo
try:
addrinfo = socket.getaddrinfo(
socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM,
socket.SOL_TCP, socket.AI_CANONNAME
)
for info in addrinfo:
# info struct [family, socktype, proto, canonname, sockaddr]
if len(info) >= 4:
l.append(info[3])
except socket.gaierror:
pass
l = _sort_hostnames(l)
if len(l) > 0:
return l[0]
return None
def ip_to_host(ip):
'''
Returns the hostname of a given IP
CLI Example:
.. code-block:: bash
salt '*' network.ip_to_host 8.8.8.8
'''
try:
hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ip)
except Exception:
hostname = None
return hostname
# pylint: enable=C0103
def cidr_to_ipv4_netmask(cidr_bits):
'''
Returns an IPv4 netmask
'''
try:
cidr_bits = int(cidr_bits)
if not 1 <= cidr_bits <= 32:
return ''
except ValueError:
return ''
netmask = ''
for idx in range(4):
if idx:
netmask += '.'
if cidr_bits >= 8:
netmask += '255'
cidr_bits -= 8
else:
netmask += '{0:d}'.format(256 - (2 ** (8 - cidr_bits)))
cidr_bits = 0
return netmask
def _number_of_set_bits_to_ipv4_netmask(set_bits): # pylint: disable=C0103
'''
Returns an IPv4 netmask from the integer representation of that mask.
Ex. 0xffffff00 -> '255.255.255.0'
'''
return cidr_to_ipv4_netmask(_number_of_set_bits(set_bits))
# pylint: disable=C0103
def _number_of_set_bits(x):
'''
Returns the number of bits that are set in a 32bit int
'''
#Taken from http://stackoverflow.com/a/4912729. Many thanks!
x -= (x >> 1) & 0x55555555
x = ((x >> 2) & 0x33333333) + (x & 0x33333333)
x = ((x >> 4) + x) & 0x0f0f0f0f
x += x >> 8
x += x >> 16
return x & 0x0000003f
# pylint: enable=C0103
def _interfaces_ip(out):
'''
Uses ip to return a dictionary of interfaces with various information about
each (up/down state, ip address, netmask, and hwaddr)
'''
ret = dict()
def parse_network(value, cols):
'''
Return a tuple of ip, netmask, broadcast
based on the current set of cols
'''
brd = None
if '/' in value: # we have a CIDR in this address
ip, cidr = value.split('/') # pylint: disable=C0103
else:
ip = value # pylint: disable=C0103
cidr = 32
if type_ == 'inet':
mask = cidr_to_ipv4_netmask(int(cidr))
if 'brd' in cols:
brd = cols[cols.index('brd') + 1]
elif type_ == 'inet6':
mask = cidr
return (ip, mask, brd)
groups = re.compile('\r?\n\\d').split(out)
for group in groups:
iface = None
data = dict()
for line in group.splitlines():
if ' ' not in line:
continue
match = re.match(r'^\d*:\s+([\w.\-]+)(?:@)?([\w.\-]+)?:\s+<(.+)>', line)
if match:
iface, parent, attrs = match.groups()
if 'UP' in attrs.split(','):
data['up'] = True
else:
data['up'] = False
if parent:
data['parent'] = parent
continue
cols = line.split()
if len(cols) >= 2:
type_, value = tuple(cols[0:2])
iflabel = cols[-1:][0]
if type_ in ('inet', 'inet6'):
if 'secondary' not in cols:
ipaddr, netmask, broadcast = parse_network(value, cols)
if type_ == 'inet':
if 'inet' not in data:
data['inet'] = list()
addr_obj = dict()
addr_obj['address'] = ipaddr
addr_obj['netmask'] = netmask
addr_obj['broadcast'] = broadcast
addr_obj['label'] = iflabel
data['inet'].append(addr_obj)
elif type_ == 'inet6':
if 'inet6' not in data:
data['inet6'] = list()
addr_obj = dict()
addr_obj['address'] = ipaddr
addr_obj['prefixlen'] = netmask
data['inet6'].append(addr_obj)
else:
if 'secondary' not in data:
data['secondary'] = list()
ip_, mask, brd = parse_network(value, cols)
data['secondary'].append({
'type': type_,
'address': ip_,
'netmask': mask,
'broadcast': brd,
'label': iflabel,
})
del ip_, mask, brd
elif type_.startswith('link'):
data['hwaddr'] = value
if iface:
ret[iface] = data
del iface, data
return ret
def _interfaces_ifconfig(out):
'''
Uses ifconfig to return a dictionary of interfaces with various information
about each (up/down state, ip address, netmask, and hwaddr)
'''
ret = dict()
piface = re.compile(r'^([^\s:]+)')
pmac = re.compile('.*?(?:HWaddr|ether|address:|lladdr) ([0-9a-fA-F:]+)')
pip = re.compile(r'.*?(?:inet addr:|inet )(.*?)\s')
pip6 = re.compile('.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)')
pmask = re.compile(r'.*?(?:Mask:|netmask )(?:((?:0x)?[0-9a-fA-F]{8})|([\d\.]+))')
pmask6 = re.compile(r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+)).*')
pupdown = re.compile('UP')
pbcast = re.compile(r'.*?(?:Bcast:|broadcast )([\d\.]+)')
groups = re.compile('\r?\n(?=\\S)').split(out)
for group in groups:
data = dict()
iface = ''
updown = False
for line in group.splitlines():
miface = piface.match(line)
mmac = pmac.match(line)
mip = pip.match(line)
mip6 = pip6.match(line)
mupdown = pupdown.search(line)
if miface:
iface = miface.group(1)
if mmac:
data['hwaddr'] = mmac.group(1)
if mip:
if 'inet' not in data:
data['inet'] = list()
addr_obj = dict()
addr_obj['address'] = mip.group(1)
mmask = pmask.match(line)
if mmask:
if mmask.group(1):
mmask = _number_of_set_bits_to_ipv4_netmask(
int(mmask.group(1), 16))
else:
mmask = mmask.group(2)
addr_obj['netmask'] = mmask
mbcast = pbcast.match(line)
if mbcast:
addr_obj['broadcast'] = mbcast.group(1)
data['inet'].append(addr_obj)
if mupdown:
updown = True
if mip6:
if 'inet6' not in data:
data['inet6'] = list()
addr_obj = dict()
addr_obj['address'] = mip6.group(1) or mip6.group(2)
mmask6 = pmask6.match(line)
if mmask6:
addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2)
data['inet6'].append(addr_obj)
data['up'] = updown
ret[iface] = data
del data
return ret
def linux_interfaces():
'''
Obtain interface information for *NIX/BSD variants
'''
ifaces = dict()
ip_path = salt.utils.which('ip')
ifconfig_path = None if ip_path else salt.utils.which('ifconfig')
if ip_path:
cmd1 = subprocess.Popen(
'{0} link show'.format(ip_path),
shell=True,
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()[0]
cmd2 = subprocess.Popen(
'{0} addr show'.format(ip_path),
shell=True,
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()[0]
ifaces = _interfaces_ip(cmd1 + '\n' + cmd2)
elif ifconfig_path:
cmd = subprocess.Popen(
'{0} -a'.format(ifconfig_path),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()[0]
ifaces = _interfaces_ifconfig(cmd)
return ifaces
def _interfaces_ipconfig(out):
'''
Returns a dictionary of interfaces with various information about each
(up/down state, ip address, netmask, and hwaddr)
NOTE: This is not used by any function and may be able to be removed in the
future.
'''
ifaces = dict()
iface = None
adapter_iface_regex = re.compile(r'adapter (\S.+):$')
for line in out.splitlines():
if not line:
continue
# TODO what does Windows call Infiniband and 10/40gige adapters
if line.startswith('Ethernet'):
iface = ifaces[adapter_iface_regex.search(line).group(1)]
iface['up'] = True
addr = None
continue
if iface:
key, val = line.split(',', 1)
key = key.strip(' .')
val = val.strip()
if addr and key == 'Subnet Mask':
addr['netmask'] = val
elif key in ('IP Address', 'IPv4 Address'):
if 'inet' not in iface:
iface['inet'] = list()
addr = {'address': val.rstrip('(Preferred)'),
'netmask': None,
'broadcast': None} # TODO find the broadcast
iface['inet'].append(addr)
elif 'IPv6 Address' in key:
if 'inet6' not in iface:
iface['inet'] = list()
# XXX What is the prefixlen!?
addr = {'address': val.rstrip('(Preferred)'),
'prefixlen': None}
iface['inet6'].append(addr)
elif key == 'Physical Address':
iface['hwaddr'] = val
elif key == 'Media State':
# XXX seen used for tunnel adaptors
# might be useful
iface['up'] = (val != 'Media disconnected')
def win_interfaces():
'''
Obtain interface information for Windows systems
'''
with salt.utils.winapi.Com():
c = wmi.WMI()
ifaces = {}
for iface in c.Win32_NetworkAdapterConfiguration(IPEnabled=1):
ifaces[iface.Description] = dict()
if iface.MACAddress:
ifaces[iface.Description]['hwaddr'] = iface.MACAddress
if iface.IPEnabled:
ifaces[iface.Description]['up'] = True
for ip in iface.IPAddress:
if '.' in ip:
if 'inet' not in ifaces[iface.Description]:
ifaces[iface.Description]['inet'] = []
item = {'address': ip,
'label': iface.Description}
if iface.DefaultIPGateway:
broadcast = next((i for i in iface.DefaultIPGateway if '.' in i), '')
if broadcast:
item['broadcast'] = broadcast
if iface.IPSubnet:
netmask = next((i for i in iface.IPSubnet if '.' in i), '')
if netmask:
item['netmask'] = netmask
ifaces[iface.Description]['inet'].append(item)
if ':' in ip:
if 'inet6' not in ifaces[iface.Description]:
ifaces[iface.Description]['inet6'] = []
item = {'address': ip}
if iface.DefaultIPGateway:
broadcast = next((i for i in iface.DefaultIPGateway if ':' in i), '')
if broadcast:
item['broadcast'] = broadcast
if iface.IPSubnet:
netmask = next((i for i in iface.IPSubnet if ':' in i), '')
if netmask:
item['netmask'] = netmask
ifaces[iface.Description]['inet6'].append(item)
else:
ifaces[iface.Description]['up'] = False
return ifaces
def interfaces():
'''
Return a dictionary of information about all the interfaces on the minion
'''
if salt.utils.is_windows():
return win_interfaces()
else:
return linux_interfaces()
def get_net_start(ipaddr, netmask):
ipaddr_octets = ipaddr.split('.')
netmask_octets = netmask.split('.')
net_start_octets = [str(int(ipaddr_octets[x]) & int(netmask_octets[x]))
for x in range(0, 4)]
return '.'.join(net_start_octets)
def get_net_size(mask):
binary_str = ''
for octet in mask.split('.'):
binary_str += bin(int(octet))[2:].zfill(8)
return len(binary_str.rstrip('0'))
def calculate_subnet(ipaddr, netmask):
return '{0}/{1}'.format(get_net_start(ipaddr, netmask),
get_net_size(netmask))
def _ipv4_to_bits(ipaddr):
'''
Accepts an IPv4 dotted quad and returns a string representing its binary
counterpart
'''
return ''.join([bin(int(x))[2:].rjust(8, '0') for x in ipaddr.split('.')])
def hw_addr(iface):
'''
Return the hardware address (a.k.a. MAC address) for a given interface
'''
return interfaces().get(iface, {}).get('hwaddr', '')
def interface(iface):
'''
Return the interface details
'''
return interfaces().get(iface, {}).get('inet', '')
def interface_ip(iface):
'''
Return the interface details
'''
return interfaces().get(iface, {}).get('inet', {})[0].get('address', {})
def subnets():
'''
Returns a list of subnets to which the host belongs
'''
ifaces = interfaces()
subnetworks = []
for ipv4_info in ifaces.values():
for ipv4 in ipv4_info.get('inet', []):
if ipv4['address'] == '127.0.0.1':
continue
network = calculate_subnet(ipv4['address'], ipv4['netmask'])
subnetworks.append(network)
return subnetworks
def in_subnet(cidr, addrs=None):
'''
Returns True if host is within specified subnet, otherwise False
'''
try:
netstart, netsize = cidr.split('/')
netsize = int(netsize)
except Exception:
log.error('Invalid CIDR \'{0}\''.format(cidr))
return False
netstart_bin = _ipv4_to_bits(netstart)
if netsize < 32 and len(netstart_bin.rstrip('0')) > netsize:
log.error('Invalid network starting IP \'{0}\' in CIDR '
'\'{1}\''.format(netstart, cidr))
return False
netstart_leftbits = netstart_bin[0:netsize]
if addrs is None:
addrs = ip_addrs()
for ip_addr in addrs:
if netsize == 32:
if netstart == ip_addr:
return True
else:
ip_leftbits = _ipv4_to_bits(ip_addr)[0:netsize]
if netstart_leftbits == ip_leftbits:
return True
return False
def ip_addrs(interface=None, include_loopback=False):
'''
Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is
ignored, unless 'include_loopback=True' is indicated. If 'interface' is
provided, then only IP addresses from that interface will be returned.
'''
ret = set()
ifaces = interfaces()
if interface is None:
target_ifaces = ifaces
else:
target_ifaces = dict([(k, v) for k, v in ifaces.iteritems()
if k == interface])
if not target_ifaces:
log.error('Interface {0} not found.'.format(interface))
for ipv4_info in target_ifaces.values():
for ipv4 in ipv4_info.get('inet', []):
loopback = in_subnet('127.0.0.0/8', [ipv4.get('address')]) or ipv4.get('label') == 'lo'
if not loopback or include_loopback:
ret.add(ipv4['address'])
for secondary in ipv4_info.get('secondary', []):
addr = secondary.get('address')
if addr and secondary.get('type') == 'inet':
if include_loopback or (not include_loopback and not in_subnet('127.0.0.0/8', [addr])):
ret.add(addr)
return sorted(list(ret))
def ip_addrs6(interface=None, include_loopback=False):
'''
Returns a list of IPv6 addresses assigned to the host. ::1 is ignored,
unless 'include_loopback=True' is indicated. If 'interface' is provided,
then only IP addresses from that interface will be returned.
'''
ret = set()
ifaces = interfaces()
if interface is None:
target_ifaces = ifaces
else:
target_ifaces = dict([(k, v) for k, v in ifaces.iteritems()
if k == interface])
if not target_ifaces:
log.error('Interface {0} not found.'.format(interface))
for ipv6_info in target_ifaces.values():
for ipv6 in ipv6_info.get('inet6', []):
if include_loopback or ipv6['address'] != '::1':
ret.add(ipv6['address'])
for secondary in ipv6_info.get('secondary', []):
addr = secondary.get('address')
if addr and secondary.get('type') == 'inet6':
if include_loopback or addr != '::1':
ret.add(addr)
return sorted(list(ret))
def hex2ip(hex_ip, invert=False):
'''
Convert a hex string to an ip, if a failure occurs the original hex is
returned
'''
try:
hip = int(hex_ip, 16)
except ValueError:
return hex_ip
if invert:
return '{3}.{2}.{1}.{0}'.format(hip >> 24 & 255,
hip >> 16 & 255,
hip >> 8 & 255,
hip & 255)
return '{0}.{1}.{2}.{3}'.format(hip >> 24 & 255,
hip >> 16 & 255,
hip >> 8 & 255,
hip & 255)
def active_tcp():
'''
Return a dict describing all active tcp connections as quickly as possible
'''
ret = {}
if os.path.isfile('/proc/net/tcp'):
with open('/proc/net/tcp', 'rb') as fp_:
for line in fp_:
if line.strip().startswith('sl'):
continue
ret.update(_parse_tcp_line(line))
return ret
return ret
def local_port_tcp(port):
'''
Return a set of remote ip addrs attached to the specified local port
'''
ret = set()
if os.path.isfile('/proc/net/tcp'):
with open('/proc/net/tcp', 'rb') as fp_:
for line in fp_:
if line.strip().startswith('sl'):
continue
iret = _parse_tcp_line(line)
sl = iter(iret).next()
if iret[sl]['local_port'] == port:
ret.add(iret[sl]['remote_addr'])
return ret
else: # Fallback to use 'lsof' if /proc not available
ret = remotes_on_local_tcp_port(port)
return ret
def remote_port_tcp(port):
'''
Return a set of ip addrs the current host is connected to on given port
'''
ret = set()
if os.path.isfile('/proc/net/tcp'):
with open('/proc/net/tcp', 'rb') as fp_:
for line in fp_:
if line.strip().startswith('sl'):
continue
iret = _parse_tcp_line(line)
sl = iter(iret).next()
if iret[sl]['remote_port'] == port:
ret.add(iret[sl]['remote_addr'])
return ret
else: # Fallback to use 'lsof' if /proc not available
ret = remotes_on_remote_tcp_port(port)
return ret
def _parse_tcp_line(line):
'''
Parse a single line from the contents of /proc/net/tcp
'''
ret = {}
comps = line.strip().split()
sl = comps[0].rstrip(':')
ret[sl] = {}
l_addr, l_port = comps[1].split(':')
r_addr, r_port = comps[2].split(':')
ret[sl]['local_addr'] = hex2ip(l_addr, True)
ret[sl]['local_port'] = int(l_port, 16)
ret[sl]['remote_addr'] = hex2ip(r_addr, True)
ret[sl]['remote_port'] = int(r_port, 16)
return ret
def remotes_on_local_tcp_port(port):
'''
Returns set of ipv4 host addresses of remote established connections
on local tcp port port.
Parses output of shell 'lsof' to get connections
$ sudo lsof -i4TCP:4505 -n
COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
Python 9971 root 35u IPv4 0x18a8464a29ca329d 0t0 TCP *:4505 (LISTEN)
Python 9971 root 37u IPv4 0x18a8464a29b2b29d 0t0 TCP 127.0.0.1:4505->127.0.0.1:55703 (ESTABLISHED)
Python 10152 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP 127.0.0.1:55703->127.0.0.1:4505 (ESTABLISHED)
'''
port = int(port)
remotes = set()
try:
data = subprocess.check_output(['lsof', '-i4TCP:{0:d}'.format(port), '-n'])
except subprocess.CalledProcessError as ex:
log.error('Failed "lsof" with returncode = {0}'.format(ex.returncode))
raise
lines = data.split('\n')
for line in lines:
chunks = line.split()
if not chunks:
continue
# ['Python', '9971', 'root', '37u', 'IPv4', '0x18a8464a29b2b29d', '0t0',
# 'TCP', '127.0.0.1:4505->127.0.0.1:55703', '(ESTABLISHED)']
#print chunks
if 'COMMAND' in chunks[0]:
continue # ignore header
if 'ESTABLISHED' not in chunks[-1]:
continue # ignore if not ESTABLISHED
# '127.0.0.1:4505->127.0.0.1:55703'
local, remote = chunks[8].split('->')
lhost, lport = local.split(':')
if int(lport) != port: # ignore if local port not port
continue
rhost, rport = remote.split(':')
remotes.add(rhost)
return remotes
def remotes_on_remote_tcp_port(port):
'''
Returns set of ipv4 host addresses which the current host is connected
to on given port
Parses output of shell 'lsof' to get connections
$ sudo lsof -i4TCP:4505 -n
COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
Python 9971 root 35u IPv4 0x18a8464a29ca329d 0t0 TCP *:4505 (LISTEN)
Python 9971 root 37u IPv4 0x18a8464a29b2b29d 0t0 TCP 127.0.0.1:4505->127.0.0.1:55703 (ESTABLISHED)
Python 10152 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP 127.0.0.1:55703->127.0.0.1:4505 (ESTABLISHED)
'''
port = int(port)
remotes = set()
try:
data = subprocess.check_output(['lsof', '-i4TCP:{0:d}'.format(port), '-n'])
except subprocess.CalledProcessError as ex:
log.error('Failed "lsof" with returncode = {0}'.format(ex.returncode))
raise
lines = data.split('\n')
for line in lines:
chunks = line.split()
if not chunks:
continue
# ['Python', '9971', 'root', '37u', 'IPv4', '0x18a8464a29b2b29d', '0t0',
# 'TCP', '127.0.0.1:4505->127.0.0.1:55703', '(ESTABLISHED)']
#print chunks
if 'COMMAND' in chunks[0]:
continue # ignore header
if 'ESTABLISHED' not in chunks[-1]:
continue # ignore if not ESTABLISHED
# '127.0.0.1:4505->127.0.0.1:55703'
local, remote = chunks[8].split('->')
rhost, rport = remote.split(':')
if int(rport) != port: # ignore if local port not port
continue
rhost, rport = remote.split(':')
remotes.add(rhost)
return remotes
class IPv4Address(object):
'''
A very minimal subset of the IPv4Address object in the ip_address module.
'''
def __init__(self, address_str):
self.address_str = address_str
octets = self.address_str.split('.')
if len(octets) != 4:
raise ValueError(
'IPv4 addresses must be in dotted-quad form.'
)
try:
self.dotted_quad = [int(octet) for octet in octets]
except ValueError as err:
raise ValueError(
'IPv4 addresses must be in dotted-quad form. {0}'.format(err)
)
def __str__(self):
return self.address_str
def __repr__(self):
return 'IPv4Address("{0}")'.format(str(self))
def __cmp__(self, other):
return cmp(self.dotted_quad, other.dotted_quad)
@property
def is_private(self):
'''
:return: Returns True if the address is a non-routable IPv4 address.
Otherwise False.
'''
if 10 == self.dotted_quad[0]:
return True
if 172 == self.dotted_quad[0]:
return 16 <= self.dotted_quad[1] <= 31
if 192 == self.dotted_quad[0]:
return 168 == self.dotted_quad[1]
return False
@property
def is_loopback(self):
'''
:return: True if the address is a loopback address. Otherwise False.
'''
return 127 == self.dotted_quad[0]
``` |
{
"source": "JHenrysson/dice_game",
"score": 3
} |
#### File: dice_game/dice/die_test.py
```python
import unittest
import die
class MyTestCase(unittest.TestCase):
"""Tests for die class."""
def test_not_less_than_one(self):
"""Check roll value is greater than or equal to 1."""
self.assertGreaterEqual(die.Die().roll(), 1)
def test_not_greater_than_six(self):
"""Check roll value is less than or equal to six."""
self.assertLessEqual(die.Die().roll(), 6)
if __name__ == '__main__':
unittest.main()
```
#### File: dice_game/dice/player.py
```python
class Player:
"""Player class."""
name = None
highscore = None # Winning the game in least amount of rolls
scores = []
def __init__(self, name):
"""Initialise the object."""
self.name = name
def get_name(self):
"""Return players name."""
return self.name
def set_name(self, name):
"""Update players name."""
self.name = name
def get_score(self):
"""Return players score for current game."""
return sum(self.scores)
def get_turns(self):
"""Return total turns player has taken."""
return len(self.scores)
def set_score(self, score):
"""Update players score for current game."""
self.scores.append(score)
def reset_score(self):
"""Reset object score to empty list."""
self.scores = []
def update_highscore(self, score):
"""Check if new highscore and update if it is."""
if self.highscore is None:
self.highscore = score
elif self.highscore < score:
self.highscore = score
```
#### File: dice_game/dice/player_test.py
```python
import unittest
import player
class PlayerTest(unittest.TestCase):
"""Test player class."""
def test_init_default_object(self):
"""Instantiate object and check properties."""
res = player.Player('Paul')
exp = player.Player
self.assertIsInstance(res, exp)
def test_get_name(self):
"""Get players name and check it is correct."""
test_player = player.Player('Paul')
res = test_player.get_name()
exp = 'Paul'
self.assertEqual(res, exp)
def test_set_name(self):
"""Update players name and check its value."""
test_player = player.Player('Paul')
test_player.set_name('Joselyn')
res = test_player.get_name()
exp = 'Joselyn'
self.assertEqual(res, exp)
def test_set_score(self):
"""Check the scores list is updated correctly."""
test_player = player.Player('Paul')
test_player.set_score(0)
res = test_player.scores[-1]
exp = 0
self.assertEqual(res, exp)
def test_update_highscore(self):
"""Check the players highscore is updated correctly."""
a_player = player.Player('test')
a_player.update_highscore(100)
res = a_player.highscore
exp = 100
self.assertEqual(res, exp)
a_player.update_highscore(110)
res = a_player.highscore
exp = 110
self.assertEqual(res, exp)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jhensley707/request-headers",
"score": 2
} |
#### File: request-headers/app/views.py
```python
from datetime import datetime
from flask import Flask, render_template, request
from . import app
@app.route("/")
def home():
remote_addr = dict(remote_addr=request.remote_addr)
headers = {**remote_addr, **request.headers}
return render_template("home.html", data=headers)
``` |
{
"source": "jheo4/ffmpeg_streaming",
"score": 2
} |
#### File: python/pyas/variant_generator.py
```python
import sys
import os
# ffmpeg
import ffmpeg
# pyas
from pyas.error import raise_exception
from pyas.prober import Prober
class VariantGenerator:
__instance = None
prober = Prober.get_instance()
@staticmethod
def get_instance():
if VariantGenerator.__instance is None:
VariantGenerator.__instance = VariantGenerator()
return VariantGenerator.__instance
def __init__(self):
if VariantGenerator.__instance is not None:
raise Exception("singleton violation, use get_instance")
def calculate_bitrate_variants(self, original_bitrate, base_bitrates):
if(original_bitrate > base_bitrates):
offset = int(base_bitrates/4)
max_bitrate = base_bitrates
else:
offset = int(original_bitrate/4)
max_bitrate = original_bitrate
half_offset = int(offset/2)
results = []
for i in range (3, -1, -1):
br = max_bitrate - (offset * i)
results.append(str(br))
return results
def get_variant_list(self, video_name):
width, height, framerate, bitrate = self.prober.get_video_info(video_name)
resolutions = []
zipped_bitrates = []
base_bitrates = {
'480': 1000000,
'720': 2000000,
'1080':4000000,
'2160':8000000
}
if(height >= 480): # 480
resolutions.append("720x480")
bitrates = self.calculate_bitrate_variants(bitrate, base_bitrates['480'])
zipped_bitrates.append(bitrates)
if(height >= 720): # 720
resolutions.append("1280x720")
bitrates = self.calculate_bitrate_variants(bitrate, base_bitrates['720'])
zipped_bitrates.append(bitrates)
if(height >= 1080): # 1080
resolutions.append("1920x1080")
bitrates = \
self.calculate_bitrate_variants(bitrate, base_bitrates['1080'])
zipped_bitrates.append(bitrates)
if(height >= 2160): # 4K
resolutions.append("3840x2160")
bitrates = \
self.calculate_bitrate_variants(bitrate, base_bitrates['2160'])
zipped_bitrates.append(bitrates)
return resolutions, zipped_bitrates
def get_dash_variant_list(self, input):
width, height, framerate, bitrate = self.prober.get_video_info(input)
resolutions = []
bitrates = []
if framerate > 35:
base_bitrates = {
'480': 4000000,
'720': 7500000,
'1080': 12000000,
'1440': 24000000,
'2160': 50000000,
}
else:
base_bitrates = {
'480': 2500000,
'720': 5000000,
'1080': 8000000,
'1440': 16000000,
'2160': 32000000,
}
if(height >= 480): # 480
resolutions.append("720x480")
br = bitrate if bitrate < base_bitrates['480'] else base_bitrates['480']
bitrates.append(str(br))
if(height >= 720): # 720
resolutions.append("1280x720")
br = bitrate if bitrate < base_bitrates['720'] else base_bitrates['720']
bitrates.append(str(br))
if(height >= 1080): # 1080
resolutions.append("1920x1080")
br = bitrate if bitrate < base_bitrates['1080'] else base_bitrates['1080']
bitrates.append(str(br))
if(height >= 1440): # 1440
resolutions.append("2560x1440")
br = bitrate if bitrate < base_bitrates['1440'] else base_bitrates['1440']
bitrates.append(str(br))
if(height >= 2160): # 2160
resolutions.append("3840x2160")
br = bitrate if bitrate < base_bitrates['2160'] else base_bitrates['2160']
bitrates.append(str(br))
return resolutions, bitrates
if __name__ == "__main__":
import time
repo_home = os.environ['REPO_HOME']
input_path = os.path.join(repo_home, "input/5sec.mp4")
variant_generator = VariantGenerator.get_instance()
start_time = time.time()
resolutions, zipped_bitrates = variant_generator.get_variant_list(input_path)
execution_time = (time.time() - start_time) * 1000
print("* * * * * * * * * * * *")
print("variant_generator execution time: {0:.3f}ms".format(execution_time))
print("\t", resolutions)
print("\t", zipped_bitrates)
print("* * * * * * * * * * * *")
start_time = time.time()
resolutions, zipped_bitrates = \
variant_generator.get_dash_variant_list(input_path)
execution_time = (time.time() - start_time) * 1000
print("* * * * * * * * * * * *")
print("dash variants texecution time: {0:.3f}ms".format(execution_time))
print("\t", resolutions)
print("\t", zipped_bitrates)
print("* * * * * * * * * * * *")
``` |
{
"source": "jheo4/incubator-tvm",
"score": 2
} |
#### File: apps/benchmark/mobile_gpu_imagenet_bench.py
```python
import argparse
import numpy as np
import tvm
from tvm.contrib.util import tempdir
import tvm.contrib.graph_runtime as runtime
from tvm import relay
from util import get_network, print_progress
def evaluate_network(network, target, target_host, dtype, repeat):
# connect to remote device
tracker = tvm.rpc.connect_tracker(args.host, args.port)
remote = tracker.request(args.rpc_key)
print_progress(network)
net, params, input_shape, output_shape = get_network(network, batch_size=1, dtype=dtype)
print_progress("%-20s building..." % network)
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(
net, target=target, target_host=target_host, params=params)
tmp = tempdir()
if 'android' in str(target) or 'android' in str(target_host):
from tvm.contrib import ndk
filename = "%s.so" % network
lib.export_library(tmp.relpath(filename), ndk.create_shared)
else:
filename = "%s.tar" % network
lib.export_library(tmp.relpath(filename))
# upload library and params
print_progress("%-20s uploading..." % network)
ctx = remote.context(str(target), 0)
remote.upload(tmp.relpath(filename))
rlib = remote.load_module(filename)
module = runtime.create(graph, rlib, ctx)
data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))
module.set_input('data', data_tvm)
module.set_input(**params)
# evaluate
print_progress("%-20s evaluating..." % network)
ftimer = module.module.time_evaluator("run", ctx, number=1, repeat=repeat)
prof_res = np.array(ftimer().results) * 1000 # multiply 1000 for converting to millisecond
print("%-20s %-19s (%s)" % (network, "%.2f ms" % np.mean(prof_res), "%.2f ms" % np.std(prof_res)))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--network", type=str, choices=
['resnet-18', 'resnet-34', 'resnet-50',
'vgg-16', 'vgg-19', 'densenet-121', 'inception_v3',
'mobilenet', 'squeezenet_v1.0', 'squeezenet_v1.1'],
help='The name of neural network')
parser.add_argument("--model", type=str, choices=
['rk3399'], default='rk3399',
help="The model of the test device. If your device is not listed in "
"the choices list, pick the most similar one as argument.")
parser.add_argument("--host", type=str, default='localhost')
parser.add_argument("--port", type=int, default=9190)
parser.add_argument("--rpc-key", type=str, required=True)
parser.add_argument("--repeat", type=int, default=30)
parser.add_argument("--dtype", type=str, default='float32')
args = parser.parse_args()
if args.network is None:
networks = ['squeezenet_v1.1', 'mobilenet', 'resnet-18', 'vgg-16']
else:
networks = [args.network]
target = tvm.target.mali(model=args.model)
target_host = tvm.target.arm_cpu(model=args.model)
print("--------------------------------------------------")
print("%-20s %-20s" % ("Network Name", "Mean Inference Time (std dev)"))
print("--------------------------------------------------")
for network in networks:
evaluate_network(network, target, target_host, args.dtype, args.repeat)
```
#### File: tvm/exec/rpc_server.py
```python
from __future__ import absolute_import
import argparse
import ast
import multiprocessing
import sys
import logging
import tvm
from tvm import micro
from .. import rpc
def main(args):
"""Main function
Parameters
----------
args : argparse.Namespace
parsed args from command-line invocation
"""
if args.tracker:
url, port = args.tracker.rsplit(":", 1)
port = int(port)
tracker_addr = (url, port)
if not args.key:
raise RuntimeError(
"Need key to present type of resource when tracker is available")
else:
tracker_addr = None
if args.utvm_dev_config or args.utvm_dev_id:
init_utvm(args)
server = rpc.Server(args.host,
args.port,
args.port_end,
key=args.key,
tracker_addr=tracker_addr,
load_library=args.load_library,
custom_addr=args.custom_addr,
silent=args.silent)
server.proc.join()
def init_utvm(args):
"""MicroTVM-specific RPC initialization
Parameters
----------
args : argparse.Namespace
parsed args from command-line invocation
"""
if args.utvm_dev_config and args.utvm_dev_id:
raise RuntimeError('only one of --utvm-dev-config and --utvm-dev-id allowed')
if args.utvm_dev_config:
with open(args.utvm_dev_config, 'r') as dev_conf_file:
dev_config = json.load(dev_conf_file)
else:
dev_config_args = ast.literal_eval(args.utvm_dev_config_args)
default_config_func = micro.device.get_device_funcs(args.utvm_dev_id)['default_config']
dev_config = default_config_func(*dev_config_args)
if args.utvm_dev_config or args.utvm_dev_id:
# add MicroTVM overrides
@tvm.register_func('tvm.rpc.server.start', override=True)
def server_start():
# pylint: disable=unused-variable
session = micro.Session(dev_config)
session._enter()
@tvm.register_func('tvm.rpc.server.shutdown', override=True)
def server_shutdown():
session._exit()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--host', type=str, default="0.0.0.0",
help='the hostname of the server')
parser.add_argument('--port', type=int, default=9090,
help='The port of the RPC')
parser.add_argument('--port-end', type=int, default=9199,
help='The end search port of the RPC')
parser.add_argument('--tracker', type=str,
help="The address of RPC tracker in host:port format. "
"e.g. (10.77.1.234:9190)")
parser.add_argument('--key', type=str, default="",
help="The key used to identify the device type in tracker.")
parser.add_argument('--silent', action='store_true',
help="Whether run in silent mode.")
parser.add_argument('--load-library', type=str,
help="Additional library to load")
parser.add_argument('--no-fork', dest='fork', action='store_false',
help="Use spawn mode to avoid fork. This option \
is able to avoid potential fork problems with Metal, OpenCL \
and ROCM compilers.")
parser.add_argument('--custom-addr', type=str,
help="Custom IP Address to Report to RPC Tracker")
parser.add_argument('--utvm-dev-config', type=str,
help='JSON config file for the target device (if using MicroTVM)')
parser.add_argument('--utvm-dev-id', type=str,
help='Unique ID for the target device (if using MicroTVM)')
parser.add_argument('--utvm-dev-config-args', type=str,
help=('Python list of literals required to generate a default'
' MicroTVM config (if --utvm-dev-id is specified)'))
parser.set_defaults(fork=True)
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
if args.fork is False:
if sys.version_info[0] < 3:
raise RuntimeError(
"Python3 is required for spawn mode."
)
multiprocessing.set_start_method('spawn')
else:
if not args.silent:
logging.info("If you are running ROCM/Metal, fork will cause "
"compiler internal error. Try to launch with arg ```--no-fork```")
main(args)
```
#### File: python/unittest/test_autotvm_feature.py
```python
import numpy as np
import tvm
from tvm.autotvm import feature
def test_iter_feature_gemm():
N = 128
k = tvm.reduce_axis((0, N), 'k')
A = tvm.placeholder((N, N), name='A')
B = tvm.placeholder((N, N), name='B')
C = tvm.compute(
A.shape,
lambda y, x: tvm.sum(A[y, k] * B[k, x], axis=k),
name='C')
s = tvm.create_schedule(C.op)
feas = feature.get_itervar_feature(s, [A, B, C], take_log=False)
expected = [
{
'_attr_': [128, 1, 128, 2097152, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
'A_0': [128, -1, 16384, 128, 0, 0], 'B_0': [0, -1, 16384, 128, 0, 0],
'C_0': [128, -1, 16384, 128, 0, 0], 'C_1': [128, -1, 16384, 128, 0, 0],
},
{
'_attr_': [128, 2, 16384, 16384, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
'A_0': [0, -1, 128, 128, 0, 0], 'B_0': [1, -1, 16384, 1, 0, 0],
'C_0': [1, -1, 128, 128, 0, 0], 'C_1': [1, -1, 128, 128, 0, 0],
},
{
'_attr_': [128, 3, 2097152, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
'A_0': [1, -1, 128, 1, 0, 0], 'B_0': [128, -1, 128, 1, 0, 0],
'C_1': [0, -1, 1, 128, 0, 0], 'C_2': [0, -1, 1, 128, 0, 0],
}
]
for ans, row in zip(expected, feas):
for pair in row:
if pair[0] not in ans:
continue
assert ans[pair[0]] == pair[1:], "%s: %s vs %s" % (pair[0], ans[pair[0]], pair[1:])
def test_curve_feature_gemm():
N = 128
k = tvm.reduce_axis((0, N), 'k')
A = tvm.placeholder((N, N), name='A')
B = tvm.placeholder((N, N), name='B')
C = tvm.compute(
A.shape,
lambda y, x: tvm.sum(A[y, k] * B[k, x], axis=k),
name='C')
s = tvm.create_schedule(C.op)
feas = feature.get_buffer_curve_sample_flatten(s, [A, B, C], sample_n=30)
# sample_n * #buffers * #curves * 2 numbers per curve
assert len(feas) == 30 * 3 * 4 * 2
def test_feature_shape():
"""test the dimensions of flatten feature are the same"""
N = 1024
n_sample = 100
def get_gemm_feature(target):
k = tvm.reduce_axis((0, N), 'k')
A = tvm.placeholder((N, N), name='A')
B = tvm.placeholder((N, N), name='B')
C = tvm.compute(A.shape, lambda y, x: tvm.sum(A[y, k] * B[k, x], axis=k),
name='C')
s = tvm.create_schedule(C.op)
y, x = s[C].op.axis
axes = list(s[C].tile(y, x, 8, 8)) + [k]
perm = np.random.permutation(5)
axes = [axes[x] for x in perm]
s[C].reorder(*axes)
if "gpu" in target.keys:
pick = []
# filter out reduction axis
for i in range(len(perm)):
if perm[i] != 4:
pick.append(axes[i])
s[C].bind(pick[0], tvm.thread_axis("blockIdx.x"))
s[C].bind(pick[1], tvm.thread_axis("vthread"))
s[C].bind(pick[2], tvm.thread_axis("threadIdx.y"))
with target:
feas = feature.get_itervar_feature(s, [A, B, C])
feas = feature.flatten_itervar_feature(feas)
return feas
targets = [
tvm.target.cuda(),
tvm.target.mali(),
tvm.target.arm_cpu(),
]
for target in targets:
dim = len(get_gemm_feature(target))
for i in range(n_sample):
assert dim == len(get_gemm_feature(target)), "dimensions of feature do not match" \
" for different configurations"
if __name__ == "__main__":
test_iter_feature_gemm()
test_curve_feature_gemm()
test_feature_shape()
```
#### File: python/unittest/test_schedule_graph.py
```python
import tvm
def test_scan():
m = tvm.var("m")
n = tvm.var("n")
x = tvm.compute((m, n), lambda i, j: tvm.const(1, "float32"), name="x")
s_state = tvm.placeholder((m, n))
s_init = tvm.compute((1, n), lambda _, i: x[0, i], name="s_init")
x_trans = tvm.compute((m, n), lambda i, j: x[i, j] + 1, name="x_trans")
s_up1 = tvm.compute((m, n), lambda t, i: s_state[t - 1, i] + 1, name="up1")
s_update = tvm.compute((m, n), lambda t, i: s_up1[t, i] + x_trans[t, i], name="update")
s_scan = tvm.scan(s_init, s_update, s_state)
def test_getbody():
body = tvm.schedule.ScanGetBody(s_scan.op)
assert set(body) == set([s_scan.op, s_update.op, s_up1.op])
def test_attach_path():
s = tvm.create_schedule(s_scan.op)
s[x_trans].compute_at(s[s_update], s_update.op.axis[0])
apath = tvm.schedule.CreateAttachPath(s)
assert(tuple(apath[s_update.op]) == tuple([s_scan.op.scan_axis]))
assert(tuple(apath[x_trans.op]) == tuple([s_update.op.axis[0], s_scan.op.scan_axis]))
def test_fix_pt():
body = tvm.schedule.ScanGetBody(s_scan.op)
fxpt = tvm.schedule.ScanFixPointAnalysis(s_scan.op, body)
assert(fxpt[s_scan.spatial_axis_[0]].value != 0)
def test_scan_fix_point():
m = tvm.var("m")
n = tvm.var("n")
l = tvm.var("l")
x = tvm.compute((l, m, n), lambda *i: tvm.const(1, "float32"), name="x")
s_state = tvm.placeholder((l, m, n))
s_init = tvm.compute((1, m, n), lambda _, i, j: x[0, i, j], name="s_init")
def test_scan0():
s_update = tvm.compute((l, m, n),
lambda t, i, j: x[t, j, i] + s_state[t-1, i, j], name="update")
s_scan = tvm.scan(s_init, s_update, s_state)
body = tvm.schedule.ScanGetBody(s_scan.op)
fxpt = tvm.schedule.ScanFixPointAnalysis(s_scan.op, body)
assert(fxpt[s_scan.op.spatial_axis_[0]].value == 1)
assert(fxpt[s_scan.op.spatial_axis_[1]].value == 1)
def test_scan1():
s_update = tvm.compute((l, m, n),
lambda t, i, j: x[t, j, i] + s_state[t-1, j, i], name="update")
s_scan = tvm.scan(s_init, s_update, s_state)
body = tvm.schedule.ScanGetBody(s_scan.op)
fxpt = tvm.schedule.ScanFixPointAnalysis(s_scan.op, body)
assert(fxpt[s_scan.op.spatial_axis_[0]].value == 0)
assert(fxpt[s_scan.op.spatial_axis_[1]].value == 0)
def test_scan3_not_exact_reach():
s_h1 = tvm.compute((l, n, m), lambda t, j, i: s_state[t-1, i, j], name="h1")
s_h2 = tvm.compute((l, m, n), lambda t, i, j: s_state[t-1, i, 10] * 2, name="h1")
s_update = tvm.compute((l, m, n), lambda t, i, j: s_h1[t, j, i] + s_h2[t, i, j], name="update")
s_scan = tvm.scan(s_init, s_update, s_state)
body = tvm.schedule.ScanGetBody(s_scan.op)
fxpt = tvm.schedule.ScanFixPointAnalysis(s_scan.op)
assert(fxpt[s_scan.op.spatial_axis_[0]].value == 1)
assert(fxpt[s_scan.op.spatial_axis_[1]].value == 0)
def test_scan4_reach_other():
s_h1 = tvm.compute((l, n, m), lambda t, j, i: s_state[t-1, j, j], name="h1")
s_h2 = tvm.compute((l, m, n), lambda t, i, j: s_state[t-1, i, j] * 2, name="h1")
s_update = tvm.compute((l, m, n),
lambda t, i, j: s_h1[t, j, i] + s_h2[t, i, j], name="update")
s_scan = tvm.scan(s_init, s_update, s_state)
fxpt = tvm.schedule.ScanFixPointAnalysis(s_scan.op)
assert(fxpt[s_scan.op.spatial_axis_[0]].value == 0)
assert(fxpt[s_scan.op.spatial_axis_[1]].value == 0)
def test_scan5_multi_output():
m = tvm.var("m")
n = tvm.var("n")
x1 = tvm.placeholder((m, n))
s1 = tvm.placeholder((m, n))
x2 = tvm.placeholder((m, n))
s2 = tvm.placeholder((m, n))
s1_init = tvm.compute((1, n), lambda _, i: x1[0, i])
s2_init = tvm.compute((1, n), lambda _, i: x2[0, i])
s1_update = tvm.compute((m, n), lambda t, i: s1[t-1, i] + x1[t, i])
s2_update = tvm.compute((m, n), lambda t, i: x2[t, i] + s2[t-1,i])
r0, r1 = tvm.scan([s1_init, s2_init],
[s1_update, s2_update],
[s1, s2])
body = tvm.schedule.ScanGetBody(r0.op)
fxpt = tvm.schedule.ScanFixPointAnalysis(r0.op)
assert(fxpt[r1.op.spatial_axis_[0]].value == 1)
test_scan0()
test_scan1()
test_scan3_not_exact_reach()
test_scan4_reach_other()
test_scan5_multi_output()
def test_create_read_graph():
m = tvm.var('m')
l = tvm.var('l')
A = tvm.placeholder((m, l), name='A')
A1 = tvm.compute((m, l), lambda i, j: A[i, j])
A2 = tvm.compute((m, l), lambda i, j: A1[i, j] + 3)
g = tvm.schedule.CreateReadGraph([A2.op])
assert g[A2.op][0] == A1
assert g[A1.op][0] == A
post_order = tvm.schedule.PostDFSOrder([A2.op], g)
assert(post_order[0] == A.op)
assert(post_order[1] == A1.op)
if __name__ == "__main__":
test_scan()
test_create_read_graph()
test_scan_fix_point()
```
#### File: topi/sparse/dense.py
```python
from __future__ import absolute_import
import tvm
from .. import tag
from ..util import simplify
def dense_si(data, indices, indptr, weight, bias=None):
# pylint: disable=invalid-name
"""The implementation of dense in topi, assuming sparse input.
Parameters
----------
data : tvm.Tensor
1-D with shape [num_nonzeros]
indices : tvm.Tensor
1-D with shape [num_nonzeros]
indptr : tvm.Tensor
1-D with shape [m+1]
weight : tvm.Tensor
2-D with shape [k, n]
bias : tvm.Tensor, optional
1-D with shape [m]
Returns
-------
output : tvm.Tensor
2-D with shape [m, n]
"""
assert len(data.shape) == 1 and len(indices.shape) == 1 and len(indptr.shape) == 1 \
and len(weight.shape) == 2, "only support 2-dim dense"
assert isinstance(weight, tvm.tensor.Tensor), \
"weight matrix is assumed to be tvm.Tensor, but weight is `%s`" % (type(weight))
if bias is not None:
assert len(bias.shape) == 1
dtype = data.dtype
M = simplify(indptr.shape[0]-1)
N, _ = weight.shape
def dense_default_ir(data, indices, indptr, weight, out):
"""Define IR for Dense"""
dtype = data.dtype
irb = tvm.ir_builder.create()
data_ptr = irb.buffer_ptr(data)
indices_ptr = irb.buffer_ptr(indices)
indptr_ptr = irb.buffer_ptr(indptr)
weight_ptr = irb.buffer_ptr(weight)
out_ptr = irb.buffer_ptr(out)
M = simplify(indptr.shape[0]-1)
N, K = weight.shape
with irb.for_range(0, N, for_type="vectorize", name='n') as n:
with irb.for_range(0, M, for_type="parallel", name='m') as m:
dot = irb.allocate(dtype, (1,), name='dot', scope='local')
out_ptr[m*N+n] = tvm.const(0, dtype)
dot[0] = tvm.const(0, dtype)
row_start = indptr_ptr[m]
row_elems = indptr_ptr[m+1]-row_start
with irb.for_range(0, row_elems, name='k') as k:
elem = row_start+k
dot[0] += data_ptr[elem] * weight_ptr[indices_ptr[elem]+n*K]
out_ptr[m*N+n] += dot[0]
return irb.get()
oshape = (M, N)
matmul = tvm.extern(oshape, [data, indices, indptr, weight],
lambda ins, outs: dense_default_ir(ins[0], ins[1], ins[2], ins[3], outs[0]),
tag="dense", dtype=dtype, name='out')
if bias is not None:
matmul = tvm.compute(oshape, lambda i, j: matmul[i, j] + bias[j], \
tag=tag.BROADCAST)
return matmul
def dense_sw(data, w_data, w_indices, w_indptr, bias=None):
# pylint: disable=invalid-name
"""The implementation of dense in topi, assuming sparse weight.
Parameters
----------
data : tvm.Tensor
2-D with shape [m, k]
w_data : tvm.Tensor
1-D with shape [nonzeros]
w_indices : tvm.Tensor
1-D with shape [nonzeros]
w_indptr : tvm.Tensor
1-D with shape [n+1]
bias : tvm.Tensor, optional
1-D with shape [n]
Returns
-------
output : tvm.Tensor
2-D with shape [m, n]
"""
assert len(w_data.shape) == 1 and len(w_indices.shape) == 1 and len(w_indptr.shape) == 1 \
and len(data.shape) == 2, "only support 2-dim dense"
assert isinstance(data, tvm.tensor.Tensor), \
"data matrix is assumed to be tvm.Tensor, but weight is `%s`" % (type(data))
if bias is not None:
assert len(bias.shape) == 1
dtype = data.dtype
M, _ = data.shape
N = simplify(w_indptr.shape[0]-1)
def dense_default_ir(data, w_data, w_indices, w_indptr, out):
"""Define IR for Dense"""
dtype = data.dtype
irb = tvm.ir_builder.create()
data_ptr = irb.buffer_ptr(data)
w_data_ptr = irb.buffer_ptr(w_data)
w_indices_ptr = irb.buffer_ptr(w_indices)
w_indptr_ptr = irb.buffer_ptr(w_indptr)
out_ptr = irb.buffer_ptr(out)
M, K = data.shape
N = simplify(w_indptr.shape[0]-1)
with irb.for_range(0, M, for_type="vectorize", name='m') as m:
with irb.for_range(0, N, for_type="parallel", name='n') as n:
dot = irb.allocate(dtype, (1,), name='dot', scope='local')
out_ptr[m*N+n] = tvm.const(0, dtype)
dot[0] = tvm.const(0, dtype)
row_start = w_indptr_ptr[n]
row_elems = w_indptr_ptr[n+1]-row_start
with irb.for_range(0, row_elems, name='k') as k:
elem = row_start+k
dot[0] += w_data_ptr[elem] * data_ptr[w_indices_ptr[elem]+m*K]
out_ptr[m*N+n] += dot[0]
return irb.get()
oshape = (M, N)
matmul = tvm.extern(oshape, [data, w_data, w_indices, w_indptr],
lambda ins, outs: dense_default_ir(ins[0], ins[1], ins[2], ins[3], outs[0]),
tag="dense", dtype=dtype, name='out')
if bias is not None:
matmul = tvm.compute(oshape, lambda i, j: matmul[i, j] + bias[j], \
tag=tag.BROADCAST)
return matmul
def dense(data, weight, bias=None):
"""Applies a linear transformation: :math:`Y = XW^T + b`.
Either data or weight should be tvm.contrib.sparse.CSRNDArray.
Parameters
----------
data : tvm.contrib.sparse.CSRNDArray or tvm.tensor.Tensor
2-D with shape [batch, in_dim]
weight : tvm.tensor.Tensor or tvm.contrib.sparse.CSRNDArray
2-D with shape [out_dim, in_dim]
bias : tvm.tensor.Tensor, optional
1-D with shape [out_dim]
Returns
-------
output : tvm.Tensor
2-D with shape [batch, out_dim]
"""
ret = None
if isinstance(data, tvm.contrib.sparse.CSRPlaceholderOp) and \
isinstance(weight, tvm.tensor.Tensor):
ret = dense_si(data.data, data.indices, data.indptr, weight, bias)
elif isinstance(data, tvm.tensor.Tensor) and \
isinstance(weight, tvm.contrib.sparse.CSRPlaceholderOp):
ret = dense_sw(data, weight.data, weight.indices, weight.indptr, bias)
else:
raise NotImplementedError("implementation for %s as data and %s as weights, "
"is not supported yet." % (type(data), type(weight), ))
return ret
```
#### File: tests/python/test_topi_matmul.py
```python
import numpy as np
import tvm
import topi
from topi.util import get_const_tuple
def with_tvm(lam, *args):
""" Take numpy arrays as args, convert them to TVM tensors and call `lam`.
Result of lambda is converted back to numpy array and returned.
"""
ctx = tvm.cpu(0)
pls = [] # placeholders
vals_nd = [] # initial values
for i,arg in enumerate(args):
pls.append(tvm.placeholder(arg.shape, name='pl'+str(i)))
vals_nd.append(tvm.nd.array(arg, ctx))
out = lam(*pls)
out_nd = tvm.nd.array(np.zeros(get_const_tuple(out.shape), dtype=out.dtype), ctx)
s = tvm.create_schedule([out.op])
m = tvm.build(s, pls + [out], "llvm")
m(*(vals_nd+[out_nd]))
return out_nd.asnumpy()
def verify_matmul(sa, sb, transp_a, transp_b):
a = np.random.uniform(low=-1.0, high=1.0, size=sa).astype(np.float32)
b = np.random.uniform(low=-1.0, high=1.0, size=sb).astype(np.float32)
c1 = np.matmul(np.transpose(a) if transp_a else a,
np.transpose(b) if transp_b else b)
c2 = with_tvm(lambda A,B: topi.matmul(A,B,transp_a,transp_b), a,b)
tvm.testing.assert_allclose(c1, c2, rtol=1e-5, atol=1e-5)
def test_matmul():
verify_matmul((1,1),(1,1),False,False)
verify_matmul((1,1),(1,1),True,True)
verify_matmul((2,2),(2,2),False,False)
verify_matmul((2,2),(2,2),True,True)
verify_matmul((2,3),(3,5),False,False)
verify_matmul((5,3),(3,2),False,False)
verify_matmul((3,5),(3,2),True,False)
verify_matmul((3,5),(2,3),True,True)
def verify_tensordot(sa, sb, axes):
a = np.random.uniform(low=-1.0, high=1.0, size=sa).astype(np.float32)
b = np.random.uniform(low=-1.0, high=1.0, size=sb).astype(np.float32)
c1 = np.tensordot(a, b, axes)
c2 = with_tvm(lambda A, B: topi.tensordot(A, B, axes), a, b)
tvm.testing.assert_allclose(c1, c2, rtol=1e-5, atol=1e-5)
def test_tensordot():
verify_tensordot((3), (3), 0)
verify_tensordot((2, 3), (3, 5), 1)
verify_tensordot((2, 2, 3), (2, 3, 5), 2)
verify_tensordot((2, 2, 3, 4), (2, 3, 4, 5), 3)
verify_tensordot((3, 2, 2), (2, 3, 5), (1, 0))
verify_tensordot((3, 2, 2), (2, 3, 5), ((1, 0), (0, 1)))
verify_tensordot((4, 3, 2, 2), (2, 4, 3, 5), ((1, 2, 0), (2, 0, 1)))
if __name__ == "__main__":
test_matmul()
test_tensordot()
```
#### File: tests/python/test_topi_sort.py
```python
from __future__ import print_function
import numpy as np
import tvm
import topi
import topi.testing
def verify_argsort(axis, is_ascend):
dshape = (20, 100)
data_dtype = "float32"
data = tvm.placeholder(dshape, name="data", dtype=data_dtype)
perm = np.arange(dshape[0] * dshape[1], dtype=data_dtype)
np.random.shuffle(perm)
np_data = perm.reshape(dshape)
if is_ascend:
np_indices = np.argsort(np_data, axis=axis)
else:
np_indices = np.argsort(-np_data, axis=axis)
if axis == 0:
np_indices = np_indices[:dshape[axis], :]
else:
np_indices = np_indices[:, :dshape[axis]]
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
out = topi.argsort(data, axis=axis, is_ascend=is_ascend)
s = topi.generic.schedule_argsort(out)
tvm_data = tvm.nd.array(np_data, ctx)
tvm_out = tvm.nd.array(np.zeros(dshape, dtype=data_dtype), ctx)
f = tvm.build(s, [data, out], device)
f(tvm_data, tvm_out)
tvm.testing.assert_allclose(tvm_out.asnumpy(), np_indices.astype(data_dtype), rtol=1e0)
for device in ['llvm', 'cuda', 'opencl']:
check_device(device)
def verify_topk(k, axis, ret_type, is_ascend, dtype):
shape = (20, 100)
data_dtype = "float32"
data = tvm.placeholder(shape, name="data", dtype=data_dtype)
np_data = np.random.uniform(size=shape).astype(data_dtype)
if is_ascend:
np_indices = np.argsort(np_data, axis=axis)
else:
np_indices = np.argsort(-np_data, axis=axis)
kk = k if k >= 1 else shape[axis]
if axis == 0:
np_indices = np_indices[:kk, :]
np_values = np.zeros(np_indices.shape).astype(data_dtype)
for i in range(shape[1]):
np_values[:, i] = np_data[np_indices[:, i], i]
else:
np_indices = np_indices[:, :kk]
np_values = np.zeros(np_indices.shape).astype(data_dtype)
for i in range(shape[0]):
np_values[i, :] = np_data[i, np_indices[i, :]]
np_indices = np_indices.astype(dtype)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
outs = topi.topk(data, k, axis, ret_type, is_ascend, dtype)
outs = outs if isinstance(outs, list) else [outs]
s = topi.generic.schedule_topk(outs)
tvm_data = tvm.nd.array(np_data, ctx)
tvm_res = []
for t in outs:
tvm_res.append(tvm.nd.empty(t.shape, dtype=t.dtype, ctx=ctx))
f = tvm.build(s, [data] + outs, device)
f(tvm_data, *tvm_res)
if ret_type == "both":
tvm.testing.assert_allclose(tvm_res[0].asnumpy(), np_values)
tvm.testing.assert_allclose(tvm_res[1].asnumpy(), np_indices)
elif ret_type == "values":
tvm.testing.assert_allclose(tvm_res[0].asnumpy(), np_values)
else:
tvm.testing.assert_allclose(tvm_res[0].asnumpy(), np_indices)
for device in ['llvm', 'cuda', 'opencl']:
check_device(device)
def test_argsort():
np.random.seed(0)
for axis in [0, -1, 1]:
verify_argsort(axis, True)
verify_argsort(axis, False)
def test_topk():
np.random.seed(0)
for k in [0, 1, 5]:
for axis in [0, -1, 1]:
for ret_type in ["both", "values", "indices"]:
verify_topk(k, axis, ret_type, True, "int64")
verify_topk(k, axis, ret_type, False, "float32")
if __name__ == "__main__":
test_argsort()
test_topk()
``` |
{
"source": "jheo4/zpipe",
"score": 3
} |
#### File: zpipe/stages/stage_class.py
```python
from abc import ABCMeta, abstractmethod
class StageClass(metaclass=ABCMeta):
@abstractmethod
def init_class(self, cls_args):
pass
@abstractmethod
def run(self, args):
pass
``` |
{
"source": "jherberg462/smf-mod-bot",
"score": 3
} |
#### File: jherberg462/smf-mod-bot/threads_classes.py
```python
from datetime import datetime, timedelta, timezone
from splinter import Browser
from selenium.webdriver.common.keys import Keys
from webdriver_manager.chrome import ChromeDriverManager
import time
class Thread:
def __init__(self, thread_id, last_post_time, OP, lastPoster, post_id, browser=None):
self.thread_id = thread_id
self.last_post_time = last_post_time
self.OP = OP
self.last_poster = lastPoster
self.last_bump = None
self.double_post_count = 0
self.rule_violations = dict()
self.last_post_id = post_id
self._browser = browser
self.last_bump_archive = None
def update_thread(self, last_post_time, lastPoster_, post_id, browser):
self._browser = None #browser
if post_id > self.last_post_id:
if self.last_poster == self.OP and lastPoster_ == self.OP:
current_bump_archive = 'na' #self.get_archive(post_id)
if self.last_bump:
current_bump_archive = 'na'#self.get_archive(post_id)
#check if last bump is w/i 24 hours of last_post_time (give 1.5 hours leeway)
if (self.last_bump + timedelta(hours=22, minutes=30)) > last_post_time:
if (self.last_bump + timedelta(hours=1)) < last_post_time:
self.rule_violations['excessive_bump'] = {
'post_id':post_id,
'last_bump_post_id':self.last_post_id,
'last_bump':self.last_bump,
'current_bump':last_post_time,
'current_bump_archive':current_bump_archive,
'last_bump_archive':self.last_bump_archive
}
self.last_bump_archive = current_bump_archive
self.last_bump = last_post_time
elif self.last_poster == lastPoster_: #consider if I want to update this to if
if self.last_post_time + timedelta(days=1) > last_post_time:
self.rule_violations['double_post_{}'.format(self.double_post_count)] = {
'post_id': post_id,
'previous_post_id': self.last_post_id,
'current_poster': lastPoster_,
'last_poster':self.last_poster
}
self.double_post_count += 1
self.last_post_time = last_post_time
self.last_poster = lastPoster_
self.last_post_id = post_id
return self.rule_violations
def reset_rule_violations(self):
self.double_post_count = 0
self.rule_violations = dict()
def get_rule_violations(self):
return self.rule_violations
def get_archive(self, post_id, domain='https://bitcointalk.org/'):
url_to_archive = '{}index.php?topic={}.msg{}'.format(domain, self.thread_id, post_id)
archive_url = 'http://archive.md'
self._browser.visit(archive_url)
time.sleep(0.5)
self._browser.fill('url', url_to_archive)
active_web_element = self._browser.driver.switch_to.active_element
active_web_element.send_keys(Keys.ENTER)
time.sleep(1)
try:
archive_code = self._browser.driver.current_url.split('/')[-1]
archived_url = '{}/{}'.format(archive_url, archive_code)
except IndexError:
archived_url = 'unsuccessfully attepted to archive post'
return archived_url
class All_threads:
def __init__(self, threads_to_ignore=None, browser=None):
self.threads = set()
self.violations = dict()
self.thread = dict()
# self._browser = browser
self.executable_path = {'executable_path': ChromeDriverManager().install()}
self._browser = Browser('chrome', **self.executable_path, headless=True)
for thread in threads_to_ignore:
self.add_thread(thread, None, None, None, 99999999999)
self.threads.add(thread)
def add_thread(self, thread_id, last_post_time, OP, last_poster, post_id):
self.thread[thread_id] = Thread(thread_id, last_post_time, OP, last_poster, post_id, self._browser)
def update_thread(self, last_post_time, last_poster, post_id, thread_id):
violation_ = self.thread[thread_id].update_thread(last_post_time, last_poster, post_id, self._browser)
if len(violation_) > 0:
return violation_
def process_post(self, thread_id, last_post_time, OP, last_poster, post_id):
try:
pass #self._browser.visit('about:blank')
except:
self.reset_browser()
if thread_id in self.threads:
violation_ = self.update_thread(last_post_time, last_poster, post_id, thread_id)
if violation_:
self.violations[thread_id] = violation_
else:
self.add_thread(thread_id, last_post_time, OP, last_poster, post_id)
self.threads.add(thread_id)
def reset_violations(self):
self.violations = dict()
def reset_thread_violations(self):
for threadID in self.violations.keys():
self.thread[threadID].reset_rule_violations()
def get_rule_violations(self):
return self.violations
def reset_browser(self):
self._browser.quit()
self._browser = Browser('chrome', **self.executable_path, headless=True)
``` |
{
"source": "jherby2k/twcmanager_ha",
"score": 2
} |
#### File: custom_components/twcmanager/config_flow.py
```python
from __future__ import annotations
import logging
from typing import Any
from aiohttp import ClientConnectorError
from twcmanager_client.client import TWCManagerClient
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.data_entry_flow import FlowResult
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
STEP_USER_DATA_SCHEMA = vol.Schema(
{
vol.Required("host"): str,
}
)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for TWCManager."""
VERSION = 1
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle the initial step."""
if user_input is None:
return self.async_show_form(
step_id="user", data_schema=STEP_USER_DATA_SCHEMA
)
errors = {}
try:
api = TWCManagerClient(user_input["host"])
uuid = await api.async_get_uuid()
except ClientConnectorError:
errors["base"] = "cannot_connect"
except Exception as exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception: " + type(exception).__name__)
errors["base"] = "unknown"
else:
await self.async_set_unique_id(uuid)
self._abort_if_unique_id_configured()
return self.async_create_entry(title=user_input["host"], data=user_input)
return self.async_show_form(
step_id="user", data_schema=STEP_USER_DATA_SCHEMA, errors=errors
)
```
#### File: custom_components/twcmanager/__init__.py
```python
from __future__ import annotations
import logging
from aiohttp.web import HTTPError
import async_timeout
from twcmanager_client.client import TWCManagerClient
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import DOMAIN, SCAN_INTERVAL
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor"]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up TWCManager from a config entry."""
hass.data.setdefault(DOMAIN, {})
api = TWCManagerClient(entry.data["host"])
async def async_update_data():
"""Fetch data from API endpoint."""
try:
# Note: asyncio.TimeoutError and aiohttp.ClientError are already
# handled by the data update coordinator.
async with async_timeout.timeout(10):
return await api.async_get_slave_twcs()
except HTTPError as err:
raise UpdateFailed("Error communicating with API") from err
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
# Name of the data. For logging purposes.
name="sensor",
update_method=async_update_data,
# Polling interval. Will only be polled if there are subscribers.
update_interval=SCAN_INTERVAL,
)
hass.data[DOMAIN][entry.entry_id] = {"coordinator": coordinator, "api": api}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
``` |
{
"source": "jherby2k/TWCManager",
"score": 3
} |
#### File: TWCManager/Logging/MySQLLogging.py
```python
import logging
logger = logging.getLogger(__name__.rsplit(".")[-1])
class MySQLHandler(logging.Handler):
slaveSession = {}
def __init__(self, db):
logging.Handler.__init__(self)
self.db = db
def emit(self, record):
self.format(record)
log_type = getattr(record, "logtype", "")
if log_type == "charge_sessions":
charge_state = getattr(record, "chargestate", "")
if charge_state == "start":
# Called when a Charge Session Starts.
twcid = "%02X%02X" % (
getattr(record, "TWCID")[0],
getattr(record, "TWCID")[1],
)
self.slaveSession[twcid] = getattr(record, "startTime", 0)
query = """
INSERT INTO charge_sessions (chargeid, startTime, startkWh, slaveTWC)
VALUES (%s,now(),%s,%s)
"""
# Ensure database connection is alive, or reconnect if not
self.db.ping(reconnect=True)
cur = self.db.cursor()
rows = 0
try:
rows = cur.execute(
query,
(
getattr(record, "startTime", 0),
getattr(record, "startkWh", 0),
twcid,
),
)
except Exception as e:
logger.error("Error updating MySQL database: %s", e)
if rows:
# Query was successful. Commit
self.db.commit()
else:
# Issue, log message and rollback
logger.info("Error updating MySQL database. Rows = %d", rows)
self.db.rollback()
cur.close()
elif charge_state == "update":
# Called when additional information needs to be updated for a
# charge session
twcid = "%02X%02X" % (
getattr(record, "TWCID")[0],
getattr(record, "TWCID")[1],
)
chgid = self.slaveSession.get(twcid, 0)
if getattr(record, "vehicleVIN", None):
query = """
UPDATE charge_sessions SET vehicleVIN = '%s'
WHERE chargeid = %s AND slaveTWC = %s"
"""
# Ensure database connection is alive, or reconnect if not
self.db.ping(reconnect=True)
cur = self.db.cursor()
rows = 0
try:
rows = cur.execute(
query % (getattr(record, "vehicleVIN", ""), chgid, twcid)
)
except Exception as e:
logger.error("Error updating MySQL database: %s", e)
if rows:
# Query was successful. Commit
self.db.commit()
else:
# Issue, log message and rollback
self.db.rollback()
cur.close()
elif charge_state == "stop":
# Called when a Charge Session Ends.
twcid = "%02X%02X" % (
getattr(record, "TWCID")[0],
getattr(record, "TWCID")[1],
)
chgid = self.slaveSession.get(twcid, 0)
query = """
UPDATE charge_sessions SET endTime = now(), endkWh = %s
WHERE chargeid = %s AND slaveTWC = %s
"""
# Ensure database connection is alive, or reconnect if not
self.db.ping(reconnect=True)
cur = self.db.cursor()
rows = 0
try:
rows = cur.execute(
query,
(
getattr(record, "endkWh", 0),
chgid,
twcid,
),
)
except Exception as e:
logger.error("Error updating MySQL database: %s", e)
if rows:
# Query was successful. Commit
self.db.commit()
else:
# Issue, log message and rollback
logger.error("Error updating MySQL database. Rows = %d", rows)
self.db.rollback()
cur.close()
self.slaveSession[twcid] = 0
elif log_type == "green_energy":
# Ensure database connection is alive, or reconnect if not
self.db.ping(reconnect=True)
query = """
INSERT INTO green_energy (time, genW, conW, chgW)
VALUES (now(), %s, %s, %s)
"""
cur = self.db.cursor()
rows = 0
try:
rows = cur.execute(
query,
(
getattr(record, "genWatts", 0),
getattr(record, "conWatts", 0),
getattr(record, "chgWatts", 0),
),
)
except Exception as e:
logger.error("Error updating MySQL database: %s", e)
if rows:
# Query was successful. Commit
self.db.commit()
else:
# Issue, log message and rollback
logger.info("Error updating MySQL database. Rows = %d" % rows)
self.db.rollback()
cur.close()
class MySQLLogging:
capabilities = {"queryGreenEnergy": True}
config = None
configConfig = None
configLogging = None
db = None
status = False
def __init__(self, master):
self.master = master
self.config = master.config
try:
self.configConfig = master.config["config"]
except KeyError:
self.configConfig = {}
try:
self.configLogging = master.config["logging"]["MySQL"]
except KeyError:
self.configLogging = {}
self.status = self.configLogging.get("enabled", False)
# Unload if this module is disabled or misconfigured
if not self.status or not self.configLogging.get("host", None):
self.master.releaseModule("lib.TWCManager.Logging", "MySQLLogging")
return None
# Initialize the mute config tree if it is not already
if not self.configLogging.get("mute", None):
self.configLogging["mute"] = {}
# Import MySQL module if module is not released
global pymysql
import pymysql
try:
self.db = pymysql.connect(
host=self.configLogging.get("host", ""),
port=self.configLogging.get("port", 3306),
user=self.configLogging.get("username", ""),
password=self.configLogging.get("password", ""),
database=self.configLogging.get("database", ""),
)
except pymysql.err.OperationalError as e:
logger.info("Error connecting to MySQL database")
logger.info(str(e))
else:
mysql_handler = MySQLHandler(db=self.db)
mysql_handler.addFilter(self.mysql_filter)
logging.getLogger("").addHandler(mysql_handler)
def getCapabilities(self, capability):
# Allows query of module capabilities when deciding which Logging module to use
return self.capabilities.get(capability, False)
def mysql_filter(self, record):
log_type = getattr(record, "logtype", "")
# Check if this status is muted or it is not the correct log type
if log_type == "charge_sessions" and not self.configLogging["mute"].get(
"ChargeSessions", 0
):
return True
# Check if this status is muted or it is not the correct log type
if log_type == "green_energy" and not self.configLogging["mute"].get(
"GreenEnergy", 0
):
return True
# Check if this status is muted or it is not the correct log type
if log_type == "slave_status" and not self.configLogging["mute"].get(
"SlaveStatus", 0
):
return True
return False
def queryGreenEnergy(self, data):
# Check if this status is muted
if self.configLogging["mute"].get("GreenEnergy", 0):
return None
# Ensure database connection is alive, or reconnect if not
self.db.ping(reconnect=True)
query = """
SELECT * from green_energy where time>%s and time<%s
"""
cur = self.db.cursor()
rows = 0
result = {}
try:
rows = cur.execute(
query, (data.get("dateBegin", 0), data.get("dateEnd", 0))
)
except Exception as e:
logger.exception("Error executing queryGreenEnergy query: %s", e)
else:
if rows:
# Query was successful. Commit
result = cur.fetchall()
else:
# Issue, log message
logger.error("Error query MySQL database. Rows = %d", rows)
cur.close()
return list(result)
def slaveStatus(self, data):
# Check if this status is muted
if self.configLogging["mute"].get("SlaveStatus", 0):
return None
# Ensure database connection is alive, or reconnect if not
self.db.ping(reconnect=True)
# Otherwise, add to database
cursor = self.db.cursor()
query = """
INSERT INTO slave_status (slaveTWC, time, kWh, voltsPhaseA,
voltsPhaseB, voltsPhaseC)
VALUES (%s, now(), %s, %s, %s, %s);
"""
rows = 0
try:
rows = cursor.execute(
query,
(
"%02X%02X" % (data["TWCID"][0], data["TWCID"][1]),
data["kWh"],
data["voltsPerPhase"][0],
data["voltsPerPhase"][1],
data["voltsPerPhase"][2],
),
)
except Exception as e:
logger.info("Error updating MySQL database")
logger.info(str(e))
if rows:
# Query was successful. Commit
self.db.commit()
else:
# Issue, log message and rollback
logger.info("Error updating MySQL database. Rows = %d" % rows)
self.db.rollback()
cursor.close()
``` |
{
"source": "jhereth/safer",
"score": 3
} |
#### File: safer/test/test_printer.py
```python
from pathlib import Path
from unittest import TestCase
import functools
import safer
import tdir
topen = functools.partial(safer.open, temp_file=True)
copen = functools.partial(safer.open, mode='w')
@tdir
class TestPrinter(TestCase):
filename = Path('test.txt')
def test_printer(self):
with safer.printer(self.filename) as print:
print('hello')
assert self.filename.read_text() == 'hello\n'
def test_printer_dry_run(self):
assert not self.filename.exists()
with safer.printer(self.filename, dry_run=True) as print:
assert not self.filename.exists()
print('hello')
assert not self.filename.exists()
def test_printer_dry_run_callable(self):
results = []
assert not self.filename.exists()
with safer.printer(self.filename, dry_run=results.append) as print:
assert not self.filename.exists()
print('hello')
assert not self.filename.exists()
assert results == ['hello\n']
def test_printer_errors(self):
with safer.printer(self.filename):
pass
with self.assertRaises(IOError) as m:
with safer.printer(self.filename, 'r'):
pass
assert 'not open' in m.exception.args[0].lower()
with self.assertRaises(IOError) as m:
with safer.printer(self.filename, 'rb'):
pass
assert 'not open' in m.exception.args[0].lower()
with self.assertRaises(ValueError) as m:
with safer.printer(self.filename, 'wb'):
pass
assert 'binary mode' in m.exception.args[0].lower()
``` |
{
"source": "JherezTaylor/thesis-preprocessing",
"score": 2
} |
#### File: modules/db/elasticsearch_base.py
```python
from pymongo import InsertOne
from elasticsearch import Elasticsearch
from elasticsearch import helpers as es_helpers
from ..utils import settings
from ..db import mongo_base
def connect(es_url=None):
"""Initializes an elasticseach conection object.
Returns:
elaticsearch.Elasticsearch: Connection object for Elasticsearch
"""
if es_url is None:
es_url = settings.ES_URL
try:
es_host = {"host": es_url, "port": 9200}
_es = Elasticsearch([es_host])
if _es.ping():
settings.logger.info(
"Connected to ElasticSearch at %s successfully", es_url)
except ValueError as ex:
settings.logger.error(
"Could not connect to ElasticSearch: %s", ex, exc_info=True)
return _es
def more_like_this(_es, es_index, field, like_list, min_term_freq, max_query_terms):
"""Build and execute a more like this query on the like document
See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-mlt-query.html
Returns
result (list): list of documents that match the like document.
"""
queries = [{
"stored_fields": field,
"query": {
"more_like_this": {
"fields": field,
"like": like,
"min_term_freq": min_term_freq,
"max_query_terms": max_query_terms
}
}
} for like in like_list]
results = []
for query in queries:
res = _es.search(index=es_index, body=query)
results.append([hit['fields'][field[0]][0]
for hit in res['hits']['hits']])
return results
def match(_es, es_index, doc_type, field, lookup_list):
"""Build and execute an exact match query, matching on the passed field and the
values in the lookup_list.
Returns
result (list): list of documents.
"""
query = {
"query": {
"constant_score": {
"filter": {
"terms": {
field: lookup_list
}
}
}
}
}
results = es_helpers.scan(
_es, index=es_index, doc_type=doc_type, query=query, scroll='2m', size=3400)
return results
def aggregate(_es, es_index, field, use_range, query_filter, size=10, min_doc_count=10):
"""Build and execute an aggregate query, matching on the passed field.
Args:
use_range (bool): Apply time range to query.
query_filter (str): Documents must match the requirements in this
query, formatted with ELS Query DSL. example: "_exists_:hs_keyword_matches"
Pass "*" to match all documents.
Returns
result (list): list of documents.
"""
if use_range:
query = {
"query": {
"bool": {
"must": [
{
"query_string": {
"query": query_filter,
"analyze_wildcard": True
}
},
{
"range": {
"created_at": {
"gte": 1339553696602,
"lte": 1497320096602,
"format": "epoch_millis"
}
}
}
],
"must_not": []
}
},
"size": 0,
"_source": {
"excludes": []
},
"aggs": {
"2": {
"terms": {
"field": field,
"size": size,
"order": {
"_count": "desc"
},
"min_doc_count": min_doc_count
}
}
}
}
else:
query = {
"query": {
"query_string": {
"query": query_filter,
"analyze_wildcard": True
}
},
"size": 0,
"_source": {
"excludes": []
},
"aggs": {
"2": {
"terms": {
"field": field,
"size": size,
"order": {
"_count": "desc"
},
"min_doc_count": min_doc_count
}
}
}
}
response = _es.search(index=es_index, body=query)
results = {item["key"]: item["doc_count"]
for item in response["aggregations"]["2"]["buckets"]}
if not results:
return response
else:
return results, response["hits"]["total"]
def count(_es, es_index, query):
"""Execute a query and get the number of matches for that query.
Returns
result (int): Query count result.
"""
response = _es.count(index=es_index, body=query)
return response["count"]
def get_els_subset_size(_es, es_index, field):
""" Return both the number of documents that have the given field and the inverse.
"""
positive_query = {
"query": {
"exists": {
"field": field
}
}
}
negative_query = {
"query": {
"bool": {
"must_not": {
"exists": {
"field": field
}
}
}
}
}
positive_count = count(_es, es_index, positive_query)
negative_count = count(_es, es_index, negative_query)
result = {}
result["positive_count"] = positive_count
result["negative_count"] = negative_count
return result
def migrate_es_tweets(connection_params, args):
""" Scroll an elasticsearch instance and insert the tweets into MongoDB
"""
db_name = connection_params[0]
target_collection = connection_params[1]
es_url = args[0]
es_index = args[1]
doc_type = args[2]
field = args[3]
lookup_list = args[4]
_es = connect(es_url)
# Setup client object for bulk op
bulk_client = mongo_base.connect()
dbo = bulk_client[db_name]
dbo.authenticate(settings.MONGO_USER, settings.MONGO_PW,
source=settings.DB_AUTH_SOURCE)
es_results = match(
_es, es_index, doc_type, field, lookup_list)
operations = []
for doc in es_results:
operations.append(InsertOne(doc["_source"]))
if len(operations) == settings.BULK_BATCH_SIZE:
_ = mongo_base.do_bulk_op(dbo, target_collection, operations)
operations = []
if operations:
_ = mongo_base.do_bulk_op(dbo, target_collection, operations)
```
#### File: modules/db/mongo_data_filters.py
```python
from bs4 import BeautifulSoup as BSHTML
from pymongo import UpdateOne, DeleteMany, UpdateMany, ASCENDING
from ..utils import settings
from ..utils import notifiers
from . import mongo_base
def retweet_removal(connection_params):
"""Bulk operation to delete all retweets.
Preprocessing Pipeline Stage 1.
Args:
connection_params (list): Contains connection params as follows:
0: db_name (str): Name of database to query.
1: collection (str): Name of collection to use.
"""
client = mongo_base.connect()
db_name = connection_params[0]
collection = connection_params[1]
dbo = client[db_name]
dbo.authenticate(settings.MONGO_USER, settings.MONGO_PW,
source=settings.DB_AUTH_SOURCE)
pipeline = [
DeleteMany({"retweeted_status": {"$exists": True}})
]
result = mongo_base.do_bulk_op(dbo, collection, pipeline)
return result
def create_indexes(connection_params):
"""Create index for field existence checks
Preprocessing Pipeline Stage 2.
Filters by any lang not in lang_list. This should ideally be
run directly through mongo shellfor large collections.
Args:
connection_params (list): Contains connection params as follows:
0: db_name (str): Name of database to query.
1: collection (str): Name of collection to use.
"""
client = mongo_base.connect()
db_name = connection_params[0]
collection = connection_params[1]
dbo = client[db_name]
dbo.authenticate(settings.MONGO_USER, settings.MONGO_PW,
source=settings.DB_AUTH_SOURCE)
dbo[collection].create_index(
[("entities.user_mentions", ASCENDING)], background=True)
settings.logger.info("User mentions Index built")
dbo[collection].create_index(
[("entities.hashtags", ASCENDING)], background=True)
settings.logger.info("Hashtag Index built")
dbo[collection].create_index(
[("quoted_status_id", ASCENDING)], background=True)
settings.logger.info("Quoted status Index built")
dbo[collection].create_index(
[("extended_tweet.id_str", ASCENDING)], background=True)
settings.logger.info("Extended tweet Index built")
dbo[collection].create_index(
[("quoted_status.entities.hashtags", ASCENDING)], background=True)
settings.logger.info("Quoted status hashtag Index built")
dbo[collection].create_index(
[("quoted_status.entities.user_mentions", ASCENDING)], background=True)
settings.logger.info("Quoted status user_mention Index built")
@notifiers.timing
def field_removal(connection_params):
"""Bulk operation to remove unwanted fields from the tweet object
Preprocessing Pipeline Stage 3.
Args:
connection_params (list): Contains connection params as follows:
0: db_name (str): Name of database to query.
1: collection (str): Name of collection to use.
"""
client = mongo_base.connect()
db_name = connection_params[0]
collection = connection_params[1]
dbo = client[db_name]
dbo.authenticate(settings.MONGO_USER, settings.MONGO_PW,
source=settings.DB_AUTH_SOURCE)
pipeline = [
UpdateMany({},
{
"$unset": {
"contributors": "", "truncated": "", "retweet_count": "",
"retweeted": "", "favorited": "",
"user.follow_request_sent": "", "user.profile_use_background_image": "",
"user.default_profile_image": "", "user.profile_sidebar_fill_color": "",
"user.profile_text_color": "", "user.profile_sidebar_border_color": "",
"user.profile_image_url_https": "", "in_reply_to_user_id": "",
"user.profile_background_color": "", "in_reply_to_status_id": "",
"user.profile_link_color": "", "geo": "",
"user.profile_image_url": "", "following": "",
"user.profile_background_tile": "", "user.contributors_enabled": "",
"user.notifications": "", "user.is_translator": "", "user.id": "",
"user.profile_background_image_url": "", "user.has_extended_profile": "",
"user.profile_background_image_url_https": "",
"user.is_translation_enabled": "", "metadata": "",
"user.translator_type": "",
},
"$set": {"fields_removed": True}}, upsert=False)
]
result = mongo_base.do_bulk_op(dbo, collection, pipeline)
return result
@notifiers.timing
def quoted_status_field_removal(connection_params):
"""Bulk operation to remove unwanted fields from the quoted_status tweet object
Preprocessing Pipeline Stage 3.
Args:
connection_params (list): Contains connection params as follows:
0: db_name (str): Name of database to query.
1: collection (str): Name of collection to use.
"""
client = mongo_base.connect()
db_name = connection_params[0]
collection = connection_params[1]
dbo = client[db_name]
dbo.authenticate(settings.MONGO_USER, settings.MONGO_PW,
source=settings.DB_AUTH_SOURCE)
pipeline = [
UpdateMany({"quoted_status": {"$exists": True}},
{
"$unset": {
"quoted_status.contributors": "", "quoted_status.truncated": "",
"quoted_status.retweeted": "", "quoted_status.favorited": "",
"quoted_status.user.follow_request_sent": "", "quoted_status.user.profile_use_background_image": "",
"quoted_status.user.default_profile_image": "", "quoted_status.user.profile_sidebar_fill_color": "",
"quoted_status.user.profile_text_color": "", "quoted_status.user.profile_sidebar_border_color": "",
"quoted_status.user.profile_image_url_https": "", "quoted_status.in_reply_to_user_id": "",
"quoted_status.user.profile_background_color": "", "quoted_status.in_reply_to_status_id": "",
"quoted_status.user.profile_link_color": "", "quoted_status.geo": "",
"quoted_status.user.profile_image_url": "", "quoted_status.following": "",
"quoted_status.user.profile_background_tile": "", "quoted_status.user.contributors_enabled": "",
"quoted_status.user.notifications": "", "quoted_status.user.is_translator": "", "quoted_status.user.id": "",
"quoted_status.user.profile_background_image_url": "", "quoted_status.user.has_extended_profile": "",
"quoted_status.user.profile_background_image_url_https": "",
"quoted_status.user.is_translation_enabled": "", "quoted_status.metadata": "",
"quoted_status.user.translator_type": "",
},
"$set": {"fields_removed": True}}, upsert=False)
]
result = mongo_base.do_bulk_op(dbo, collection, pipeline)
return result
@notifiers.timing
def language_trimming(connection_params, lang_list):
"""Bulk operation to trim the list of languages present.
Preprocessing Pipeline Stage 4.
Args:
connection_params (list): Contains connection params as follows:
0: db_name (str): Name of database to query.
1: collection (str): Name of collection to use.
lang_list (list): List of languages to match on.
"""
client = mongo_base.connect()
db_name = connection_params[0]
collection = connection_params[1]
dbo = client[db_name]
dbo.authenticate(settings.MONGO_USER, settings.MONGO_PW,
source=settings.DB_AUTH_SOURCE)
pipeline = [
DeleteMany({"lang": {"$nin": lang_list}})
]
result = mongo_base.do_bulk_op(dbo, collection, pipeline)
return result
@notifiers.do_cprofile
def field_flattening_base(connection_params, depth, field_name, field_to_set, field_to_extract):
"""Aggregate operation to unwind entries in the various entities object.
Preprocessing Pipeline Stage 5.
Entities include hashtags, user_mentions, urls and media.
Args:
connection_params (list): Contains connection params as follows:
0: db_name (str): Name of database to query.
1: collection (str): Name of collection to use.
depth (str): Extract from top level of tweet or from nested quote tweet.
"""
client = mongo_base.connect()
db_name = connection_params[0]
collection = connection_params[1]
dbo = client[db_name]
dbo.authenticate(settings.MONGO_USER, settings.MONGO_PW,
source=settings.DB_AUTH_SOURCE)
if depth == "top_level":
field_name_base = field_name
field_name = "$" + field_name
elif depth == "quoted_status":
field_name_base = "quoted_status." + field_name
field_name = "$" + "quoted_status." + field_name
# Store the documents for our bulkwrite
operations = []
pipeline = [
{"$match": {field_name_base: {"$exists": True}}},
{"$project": {field_name_base: 1, "_id": 1}},
{"$unwind": field_name},
{"$group": {"_id": "$_id", field_to_set: {
"$addToSet": field_name + field_to_extract}}},
{"$out": "temp_" + field_name_base}
]
dbo[collection].aggregate(pipeline, allowDiskUse=True)
cursor = dbo["temp_" + field_name_base].find({}, no_cursor_timeout=True)
for document in cursor:
operations.append(
UpdateOne({"_id": document["_id"]},
{
"$set": {
field_to_set: document[field_to_set],
str(field_to_set) + "_extracted": True
},
"$unset": {
str(field_name_base): ""
}
}, upsert=False))
# Send once every settings.BULK_BATCH_SIZE in batch
if (len(operations) % settings.BULK_BATCH_SIZE) == 0:
_ = mongo_base.do_bulk_op(dbo, collection, operations)
operations = []
if (len(operations) % settings.BULK_BATCH_SIZE) != 0:
_ = mongo_base.do_bulk_op(dbo, collection, operations)
# Clean Up
dbo["temp_" + field_name_base].drop()
def field_flattening_complex(connection_params, depth, field_params):
"""Aggregate operation to unwind entries in the various entities object.
Preprocessing Pipeline Stage 5.
Entities include hashtags, user_mentions, urls and media.
Args:
connection_params (list): Contains connection params as follows:
0: db_name (str): Name of database to query.
1: collection (str): Name of collection to use.
depth (str): Extract from top level of tweet or from nested quote tweet.
"""
client = mongo_base.connect()
db_name = connection_params[0]
collection = connection_params[1]
field_name = field_params[0]
field_to_set_1 = field_params[2]
field_to_set_2 = field_params[3]
field_to_extract_1 = field_params[4]
field_to_extract_2 = field_params[5]
dbo = client[db_name]
dbo.authenticate(settings.MONGO_USER, settings.MONGO_PW,
source=settings.DB_AUTH_SOURCE)
if depth == "top_level":
field_name_base = field_name
field_name = "$" + field_name
field_top_level = field_params[1]
insertion_field = field_top_level
elif depth == "quoted_status":
field_top_level = field_params[1]
field_name_base = "quoted_status." + field_name
field_name = "$" + "quoted_status." + field_name
insertion_field = "quoted_status." + field_top_level
# Store the documents for our bulkwrite
operations = []
pipeline = [
{"$match": {field_name_base: {"$exists": True}}},
{"$project": {field_name_base: 1, "_id": 1}},
{"$unwind": field_name},
{"$group": {"_id": "$_id", field_top_level:
{"$addToSet": {field_to_set_1: field_name + field_to_extract_1,
field_to_set_2: field_name + field_to_extract_2}
}
}
},
{"$out": "temp_" + field_name_base}
]
dbo[collection].aggregate(pipeline, allowDiskUse=True)
cursor = dbo["temp_" + field_name_base].find({}, no_cursor_timeout=True)
for document in cursor:
operations.append(
UpdateOne({"_id": document["_id"]},
{
"$set": {
insertion_field: document[field_top_level],
str(insertion_field) + "_extracted": True
},
"$unset": {
str(field_name_base): ""
}
}, upsert=False))
# Send once every settings.BULK_BATCH_SIZE in batch
if (len(operations) % settings.BULK_BATCH_SIZE) == 0:
_ = mongo_base.do_bulk_op(dbo, collection, operations)
operations = []
if (len(operations) % settings.BULK_BATCH_SIZE) != 0:
_ = mongo_base.do_bulk_op(dbo, collection, operations)
# Clean Up
dbo["temp_" + field_name_base].drop()
def parse_extended_tweet(connection_params, depth):
"""Aggregate operation to parse extended tweet and append contents to top level.
Preprocessing Pipeline Stage 6.
Entities include hashtags, user_mentions, urls and media.
http://stackoverflow.com/questions/28827516/using-unwind-twice-with-group-and-sum-mongodb
Args:
connection_params (list): Contains connection params as follows:
0: db_name (str): Name of database to query.
1: collection (str): Name of collection to use.
depth (str): Extract from top level of tweet or from nested quote tweet.
"""
client = mongo_base.connect()
db_name = connection_params[0]
collection = connection_params[1]
dbo = client[db_name]
dbo.authenticate(settings.MONGO_USER, settings.MONGO_PW,
source=settings.DB_AUTH_SOURCE)
hashtag_field = "hashtags"
user_mention_field = "user_mentions"
url_field = "urls"
media_field = "media"
if depth == "top_level":
field_name_base = "extended_tweet"
field_name = "$" + "extended_tweet"
elif depth == "quoted_status":
field_name_base = "quoted_status.extended_tweet"
field_name = "$" + "quoted_status.extended_tweet"
pipeline_hashtags = [
{"$match": {field_name_base + ".entities.hashtags": {"$exists": True}}},
{"$project": {field_name_base: 1, "_id": 1}},
{"$unwind": field_name + ".entities.hashtags"},
{"$group": {
"_id": "$_id",
"full_text": {"$first": field_name + ".full_text"},
"hashtags": {"$addToSet": field_name + ".entities.hashtags.text"}
}},
{"$out": "temp_hs_" + field_name_base}
]
pipeline_user_mentions = [
{"$match": {field_name_base + ".entities.user_mentions": {"$exists": True}}},
{"$project": {field_name_base: 1, "_id": 1}},
{"$unwind": field_name + ".entities.user_mentions"},
{"$group": {
"_id": "$_id",
"full_text": {"$first": "$full_text"},
"user_mentions": {"$addToSet": {"screen_name": field_name +
".entities.user_mentions.screen_name",
"id_str": field_name + ".entities.user_mentions.id_str"}},
}},
{"$out": "temp_um" + field_name_base}
]
pipeline_urls = [
{"$match": {field_name_base + ".entities.urls": {"$exists": True}}},
{"$project": {field_name_base: 1, "_id": 1}},
{"$unwind": field_name + ".entities.urls"},
{"$group": {
"_id": "$_id",
"full_text": {"$first": "$full_text"},
"urls": {"$addToSet": field_name + ".entities.urls.expanded_url"}
}},
{"$out": "temp_url" + field_name_base}
]
pipeline_media = [
{"$match": {field_name_base + ".entities.media": {"$exists": True}}},
{"$project": {field_name_base: 1, "_id": 1}},
{"$unwind": field_name + ".entities.media"},
{"$group": {
"_id": "$_id",
"full_text": {"$first": "$full_text"},
"media": {"$addToSet": {"media_url": field_name + ".entities.media.media_url",
"id_str": field_name + ".entities.media.type"}},
}},
{"$out": "temp_md" + field_name_base}
]
dbo[collection].aggregate(pipeline_hashtags, allowDiskUse=True)
iterate_cursor(dbo, "temp_hs_" + field_name_base,
collection, hashtag_field, depth)
dbo[collection].aggregate(pipeline_user_mentions, allowDiskUse=True)
iterate_cursor(dbo, "temp_um" + field_name_base,
collection, user_mention_field, depth)
dbo[collection].aggregate(pipeline_urls, allowDiskUse=True)
iterate_cursor(dbo, "temp_url" + field_name_base,
collection, url_field, depth)
dbo[collection].aggregate(pipeline_media, allowDiskUse=True)
iterate_cursor(dbo, "temp_md" + field_name_base,
collection, media_field, depth)
def iterate_cursor(dbo, source_collection, target_collection, field_to_set, depth):
""" Iterate the specified collections and apply the updates
Args:
dbo (MongoClient): MongoClient connection object
source_collection (str): Collection containing aggregate results.
target_collection (str): Collection to update.
field_to_set (str): Name of field to append to collection.
depth (str): Extract from top level of tweet or from nested quote tweet.
"""
# Store the documents for our bulkwrite
if depth == "top_level":
field = field_to_set
field_to_set = field_to_set
elif depth == "quoted_status":
field = field_to_set
field_to_set = "quoted_status." + field_to_set
operations = []
cursor = dbo[source_collection].find({}, no_cursor_timeout=True)
for document in cursor:
# Some tweets appear to be missing text, do this check just in case
if document["full_text"]:
operations.append(
UpdateOne({"_id": document["_id"]},
{
"$set": {
"text": document["full_text"],
field_to_set: document[field],
"extended_tweet_extracted": True
}
}, upsert=False))
else:
operations.append(
UpdateOne({"_id": document["_id"]},
{
"$set": {
field_to_set: document[field],
"extended_tweet_extracted": True
}
}, upsert=False))
# Send once every settings.BULK_BATCH_SIZE in batch
if (len(operations) % settings.BULK_BATCH_SIZE) == 0:
_ = mongo_base.do_bulk_op(dbo, target_collection, operations)
operations = []
if (len(operations) % settings.BULK_BATCH_SIZE) != 0:
_ = mongo_base.do_bulk_op(dbo, target_collection, operations)
# Clean Up
dbo[source_collection].drop()
@notifiers.timing
def final_field_removal(connection_params):
"""Bulk operation to remove unwanted fields from the tweet object
Preprocessing Pipeline Stage 7.
Args:
connection_params (list): Contains connection params as follows:
0: db_name (str): Name of database to query.
1: collection (str): Name of collection to use.
"""
client = mongo_base.connect()
db_name = connection_params[0]
collection = connection_params[1]
dbo = client[db_name]
dbo.authenticate(settings.MONGO_USER, settings.MONGO_PW,
source=settings.DB_AUTH_SOURCE)
dbo[collection].drop_indexes()
pipeline = [
UpdateMany({},
{
"$unset": {
"entities": "", "quoted_status.entities": "",
"id": "", "quoted_status.id": "", "quoted_status_id": "",
"quoted_status.quoted_status_id": "", "quoted_status.extended_entities": "",
"extended_entities": "", "extended_tweet": "", "quoted_status.extended_tweet": ""
}}, upsert=False)
]
result = mongo_base.do_bulk_op(dbo, collection, pipeline)
return result
def clean_source_field(connection_params):
"""Parse the HTML in the source field.
Preprocessing Pipeline Stage 8.
Args:
connection_params (list): Contains connection params as follows:
0: db_name (str): Name of database to query.
1: collection (str): Name of collection to use.
"""
client = mongo_base.connect()
db_name = connection_params[0]
collection = connection_params[1]
dbo = client[db_name]
dbo.authenticate(settings.MONGO_USER, settings.MONGO_PW,
source=settings.DB_AUTH_SOURCE)
operations = []
cursor = dbo[collection].find({"source": {"$exists": True}}, {
"source": 1}, no_cursor_timeout=True)
for document in cursor:
try:
cleaned_source = BSHTML(
"'" + document["source"] + "'", "html.parser").a.contents[0].encode('utf-8').strip()
except AttributeError:
cleaned_source = document["source"]
operations.append(
UpdateOne({"_id": document["_id"]},
{
"$set": {
"source": cleaned_source
}
}, upsert=False))
# Send once every settings.BULK_BATCH_SIZE in batch
if (len(operations) % settings.BULK_BATCH_SIZE) == 0:
_ = mongo_base.do_bulk_op(dbo, collection, operations)
operations = []
if (len(operations) % settings.BULK_BATCH_SIZE) != 0:
_ = mongo_base.do_bulk_op(dbo, collection, operations)
```
#### File: modules/db/mongo_search_pipelines.py
```python
import itertools
from collections import defaultdict
from pymongo import InsertOne, UpdateOne
from ..utils import settings
from ..utils import file_ops
from ..utils import text_preprocessing
from . import mongo_base
# @notifiers.do_cprofile
def select_porn_candidates(connection_params, filter_options, partition):
""" Iterate the specified collection and store the ObjectId
of documents that have been tagged as being pornographic in nature.
Outputs value to new collection.
Args:
connection_params (list): Contains connection objects and params as follows:
0: db_name (str): Name of database to query.
1: collection (str): Name of collection to use.
filter_options (list): Contains a list of filter conditions as follows:
0: query (dict): Query to execute.
1: target_collection (str): Name of output collection.
2: subj_check (bool): Check text for subjectivity.
3: sent_check (bool): Check text for sentiment.
4: porn_black_list (list): List of porn keywords.
5: hs_keywords (list) HS corpus.
6: black_list (list) Custom words to filter on.
partition (tuple): Contains skip and limit values.
"""
# Setup args for mongo_base.finder() call
client = mongo_base.connect()
db_name = connection_params[0]
connection_params.insert(0, client)
query = filter_options[0]
target_collection = filter_options[1]
subj_check = filter_options[2]
sent_check = filter_options[3]
porn_black_list = filter_options[4]
hs_keywords = filter_options[5]
# Set skip limit values
query["skip"] = partition[0]
query["limit"] = partition[1]
# Setup client object for bulk op
bulk_client = mongo_base.connect()
dbo = bulk_client[db_name]
dbo.authenticate(settings.MONGO_USER, settings.MONGO_PW,
source=settings.DB_AUTH_SOURCE)
# Store the documents for our bulkwrite
staging = []
operations = []
progress = 0
cursor = mongo_base.finder(connection_params, query, False)
for document in cursor:
progress = progress + 1
set_intersects = text_preprocessing.do_create_ngram_collections(
document["text"].lower(), [porn_black_list, hs_keywords, None])
# unigram_intersect = set_intersects[0]
ngrams_intersect = set_intersects[1]
if ngrams_intersect:
staging.append(document)
else:
# No intersection, skip entry
pass
# Send once every settings.BULK_BATCH_SIZE in batch
if len(staging) == settings.BULK_BATCH_SIZE:
settings.logger.debug(
"Progress: %s", (progress * 100) / partition[1])
for job in text_preprocessing.parallel_preprocess(staging, hs_keywords, subj_check, sent_check):
if job:
operations.append(InsertOne(job))
else:
pass
staging = []
if len(operations) == settings.BULK_BATCH_SIZE:
_ = mongo_base.do_bulk_op(dbo, target_collection, operations)
operations = []
if (len(staging) % settings.BULK_BATCH_SIZE) != 0:
for job in text_preprocessing.parallel_preprocess(staging, hs_keywords, subj_check, sent_check):
if job:
operations.append(InsertOne(job))
else:
pass
if operations:
_ = mongo_base.do_bulk_op(dbo, target_collection, operations)
# @profile
# @notifiers.do_cprofile
def select_hs_candidates(connection_params, filter_options, partition):
""" Iterate the specified collection and check for tweets that contain
hatespeech keywords.
Outputs value to new collection.
Args:
connection_params (list): Contains connection objects and params as follows:
0: db_name (str): Name of database to query.
1: collection (str): Name of collection to use.
filter_options (list): Contains a list of filter conditions as follows:
0: query (dict): Query to execute.
1: target_collection (str): Name of output collection.
2: subj_check (bool): Check text for subjectivity.
3: sent_check (bool): Check text for sentiment.
4: porn_black_list (list): List of porn keywords.
5: hs_keywords (list) HS corpus.
6: black_list (list) Custom words to filter on.
partition (tuple): Contains skip and limit values.
"""
# Setup args for mongo_base.finder() call
client = mongo_base.connect()
db_name = connection_params[0]
connection_params.insert(0, client)
query = filter_options[0]
target_collection = filter_options[1]
subj_check = filter_options[2]
sent_check = filter_options[3]
porn_black_list = filter_options[4]
hs_keywords = filter_options[5]
black_list = filter_options[6]
account_list = filter_options[7]
# Set skip limit values
query["skip"] = partition[0]
query["limit"] = partition[1]
# Setup client object for bulk op
bulk_client = mongo_base.connect()
dbo = bulk_client[db_name]
dbo.authenticate(settings.MONGO_USER, settings.MONGO_PW,
source=settings.DB_AUTH_SOURCE)
# Store the documents for our bulkwrite
staging = []
staging_ngram_freq = defaultdict(list)
operations = []
# Keep track of how often we match an ngram in our blacklist
porn_black_list_counts = dict.fromkeys(porn_black_list, 0)
new_blacklist_accounts = []
progress = 0
cursor = mongo_base.finder(connection_params, query, False)
for document in cursor:
progress = progress + 1
set_intersects = text_preprocessing.do_create_ngram_collections(
document["text"].lower(), [porn_black_list, hs_keywords, black_list])
# unigram_intersect = set_intersects[0]
ngrams_intersect = set_intersects[1]
hs_keywords_intersect = set_intersects[2]
# black_list_intersect = set_intersects[3]
if not ngrams_intersect and document["user"]["screen_name"] not in account_list and hs_keywords_intersect:
staging.append(document)
# Here we want to keep track of how many times a user has text that matches
# one of our porn ngrams. Users below the threshold will be processed.
elif ngrams_intersect and document["user"]["screen_name"] not in account_list and hs_keywords_intersect:
staging_ngram_freq[document["user"][
"screen_name"]].append(document)
for token in ngrams_intersect:
porn_black_list_counts[token] += 1
else:
# No hs intersections, skip entry and update blacklist count
for token in ngrams_intersect:
porn_black_list_counts[token] += 1
# Send once every settings.BULK_BATCH_SIZE in batch
if len(staging) == settings.BULK_BATCH_SIZE:
settings.logger.debug(
"Progress: %s", (progress * 100) / partition[1])
for job in text_preprocessing.parallel_preprocess(staging, hs_keywords, subj_check, sent_check):
if job:
operations.append(InsertOne(job))
else:
pass
staging = []
if len(operations) == settings.BULK_BATCH_SIZE:
_ = mongo_base.do_bulk_op(dbo, target_collection, operations)
operations = []
if (len(staging) % settings.BULK_BATCH_SIZE) != 0:
for job in text_preprocessing.parallel_preprocess(staging, hs_keywords, subj_check, sent_check):
if job:
operations.append(InsertOne(job))
else:
pass
if operations:
_ = mongo_base.do_bulk_op(dbo, target_collection, operations)
# Check for users with porn ngram frequencies below threshold
# Note that the cursor has already been exhausted and this now
# becomes a local disk operation
staging = []
operations = []
for screen_name in staging_ngram_freq:
# Consider users that don't appear frequently and stage them
if len(staging_ngram_freq[screen_name]) < settings.PNGRAM_THRESHOLD:
staging = staging + staging_ngram_freq[screen_name]
else:
new_blacklist_accounts.append(screen_name)
if staging:
for job in text_preprocessing.parallel_preprocess(staging, hs_keywords, subj_check, sent_check):
if job:
operations.append(InsertOne(job))
else:
pass
if operations:
_ = mongo_base.do_bulk_op(dbo, target_collection, operations)
file_ops.write_json_file(
'porn_ngram_hits', settings.OUTPUT_PATH, porn_black_list_counts)
file_ops.write_csv_file("new_porn_account_filter",
settings.OUTPUT_PATH, new_blacklist_accounts)
# @notifiers.do_cprofile
def select_general_candidates(connection_params, filter_options, partition):
""" Iterate the specified collection and store the ObjectId
of documents that do not match any hs or pornographic keywords.
Outputs value to new collection.
Args:
connection_params (list): Contains connection objects and params as follows:
0: db_name (str): Name of database to query.
1: collection (str): Name of collection to use.
filter_options (list): Contains a list of filter conditions as follows:
0: query (dict): Query to execute.
1: target_collection (str): Name of output collection.
2: subj_check (bool): Check text for subjectivity.
3: sent_check (bool): Check text for sentiment.
4: porn_black_list (list): List of porn keywords.
5: hs_keywords (list) HS corpus.
6: black_list (list) Custom words to filter on.
partition (tuple): Contains skip and limit values.
"""
# Setup args for mongo_base.finder() call
client = mongo_base.connect()
db_name = connection_params[0]
connection_params.insert(0, client)
query = filter_options[0]
target_collection = filter_options[1]
subj_check = filter_options[2]
sent_check = filter_options[3]
porn_black_list = filter_options[4]
hs_keywords = filter_options[5]
black_list = filter_options[6]
# Set skip limit values
query["skip"] = partition[0]
query["limit"] = partition[1]
# Setup client object for bulk op
bulk_client = mongo_base.connect()
dbo = bulk_client[db_name]
dbo.authenticate(settings.MONGO_USER, settings.MONGO_PW,
source=settings.DB_AUTH_SOURCE)
# Store the documents for our bulkwrite
staging = []
operations = []
progress = 0
cursor = mongo_base.finder(connection_params, query, False)
for document in cursor:
progress = progress + 1
set_intersects = text_preprocessing.do_create_ngram_collections(
document["text"].lower(), [porn_black_list, hs_keywords, black_list])
unigram_intersect = set_intersects[0]
ngrams_intersect = set_intersects[1]
hs_keywords_intersect = set_intersects[2]
black_list_intersect = set_intersects[3]
if not ngrams_intersect and (len(unigram_intersect) > 1) and not black_list_intersect and not hs_keywords_intersect:
staging.append(document)
else:
# No intersection, skip entry
pass
# Send once every settings.BULK_BATCH_SIZE in batch
if len(staging) == settings.BULK_BATCH_SIZE:
settings.logger.debug(
"Progress: %s", (progress * 100) / partition[1])
for job in text_preprocessing.parallel_preprocess(staging, hs_keywords, subj_check, sent_check):
if job:
operations.append(InsertOne(job))
else:
pass
staging = []
if len(operations) == settings.BULK_BATCH_SIZE:
_ = mongo_base.do_bulk_op(dbo, target_collection, operations)
operations = []
if (len(staging) % settings.BULK_BATCH_SIZE) != 0:
for job in text_preprocessing.parallel_preprocess(staging, hs_keywords, subj_check, sent_check):
if job:
operations.append(InsertOne(job))
else:
pass
if operations:
_ = mongo_base.do_bulk_op(dbo, target_collection, operations)
def emotion_coverage_pipeline(connection_params, filter_options, partition):
""" Iterate the specified collection get the emtotion coverage for each
tweet. As a secondary function, create ngrams for the CrowdFlower dataset
if the argument is passed.
Outputs value to new collection.
Args:
connection_params (list): Contains connection objects and params as follows:
0: db_name (str): Name of database to query.
1: collection (str): Name of collection to use.
filter_options (list): Contains a list of filter conditions as follows:
0: query (dict): Query to execute.
1: create_ngrams (bool): Create ngrams or not.
2: projection (str): Document field name.
partition (tuple): Contains skip and limit values.
"""
# Setup args for mongo_base.finder() call
client = mongo_base.connect()
db_name = connection_params[0]
collection = connection_params[1]
connection_params.insert(0, client)
query = filter_options[0]
create_ngrams = filter_options[1]
projection = filter_options[2]
# Set skip limit values
query["skip"] = partition[0]
query["limit"] = partition[1]
# Setup client object for bulk op
bulk_client = mongo_base.connect()
dbo = bulk_client[db_name]
dbo.authenticate(settings.MONGO_USER, settings.MONGO_PW,
source=settings.DB_AUTH_SOURCE)
operations = []
staging = []
progress = 0
cursor = mongo_base.finder(connection_params, query, False)
for document in cursor:
progress = progress + 1
staging.append(document)
if len(staging) == 3000:
settings.logger.debug(
"Progress: %s", (progress * 100) / partition[1])
for job in text_preprocessing.parallel_emotion_coverage(staging, projection):
if job:
operations.append(job)
else:
pass
staging = []
if create_ngrams:
cleaned_result = text_preprocessing.clean_tweet_text(document[
projection], True)
xgrams = ([(text_preprocessing.create_ngrams(cleaned_result[0], i))
for i in range(1, 6)])
operations.append(UpdateOne({"_id": document["_id"]}, {
"$set": {"unigrams": xgrams[0], "bigrams": xgrams[1], "trigrams": xgrams[2],
"quadgrams": xgrams[3], "pentagrams": xgrams[4]}}, upsert=False))
if len(operations) == settings.BULK_BATCH_SIZE:
_ = mongo_base.do_bulk_op(dbo, collection, operations)
operations = []
if (len(staging) % 3000) != 0:
for job in text_preprocessing.parallel_emotion_coverage(staging, projection):
if job:
operations.append(job)
else:
pass
if operations:
_ = mongo_base.do_bulk_op(dbo, collection, operations)
def linear_test(connection_params, filter_options):
""" Iterate the specified collection and store the ObjectId
of documents that have been tagged as being subjective with a negative sentiment.
Outputs value to new collection.
Args:
connection_params (list): Contains connection objects and params as follows:
0: db_name (str): Name of database to query.
1: collection (str): Name of collection to use.
filter_options (list): Contains a list of filter conditions as follows:
0: target_collection (str): Name of output collection.
1: subj_check (bool): Check text for subjectivity.
2: sent_check (bool): Check text for sentiment.
3: porn_black_list (list): List of porn keywords.
4: hs_keywords (list) HS corpus.
5: black_list (list) Custom words to filter on.
"""
client = mongo_base.connect()
db_name = connection_params[0]
collection = connection_params[1]
dbo = client[db_name]
dbo.authenticate(settings.MONGO_USER, settings.MONGO_PW,
source=settings.DB_AUTH_SOURCE)
target_collection = filter_options[0]
subj_check = filter_options[1]
sent_check = filter_options[2]
porn_black_list = filter_options[3]
hs_keywords = filter_options[4]
black_list = filter_options[5]
# Store the documents for our bulkwrite
staging = []
operations = []
# Keep track of how often we match an ngram in our blacklist
porn_black_list_counts = dict.fromkeys(porn_black_list, 0)
cursor_count = dbo[collection].count()
progress = 0
cursor = dbo[collection].find({"text": {"$ne": None}},
{"text": 1, "created_at": 1, "coordinates": 1,
"place": 1, "user": 1, "source": 1,
"in_reply_to_user_id_str": 1}, no_cursor_timeout=True)
for document in cursor:
progress = progress + 1
set_intersects = text_preprocessing.do_create_ngram_collections(
document["text"].lower(), [porn_black_list, hs_keywords, black_list])
# unigram_intersect = set_intersects[0]
ngrams_intersect = set_intersects[1]
hs_keywords_intersect = set_intersects[2]
black_list_intersect = set_intersects[3]
if not ngrams_intersect and not black_list_intersect and hs_keywords_intersect:
staging.append(document)
else:
# No intersections, skip entry and update blacklist count
for token in ngrams_intersect:
porn_black_list_counts[token] += 1
# Send once every settings.BULK_BATCH_SIZE in batch
if len(staging) == settings.BULK_BATCH_SIZE:
settings.logger.debug(
"Progress: %s", (progress * 100) / cursor_count)
for job in text_preprocessing.parallel_preprocess(staging, hs_keywords, subj_check, sent_check):
if job:
operations.append(InsertOne(job))
else:
pass
staging = []
if len(operations) == settings.BULK_BATCH_SIZE:
_ = mongo_base.do_bulk_op(dbo, target_collection, operations)
operations = []
if (len(staging) % settings.BULK_BATCH_SIZE) != 0:
for job in text_preprocessing.parallel_preprocess(staging, hs_keywords, subj_check, sent_check):
if job:
operations.append(InsertOne(job))
else:
pass
if operations:
_ = mongo_base.do_bulk_op(dbo, target_collection, operations)
def build_annotation_experiment(word, sample_size):
""" Accepts a list of words and randomly samples from several datasets.
"""
core_tweets = [
["twitter", "tweets"],
["uselections", "tweets"],
["inauguration", "tweets"],
["inauguration_no_filter", "tweets"],
["unfiltered_stream_May17", "tweets"],
["manchester_event", "tweets"]
]
hate_corpus = [
["dailystormer_archive", "d_stormer_titles"],
["twitter", "melvyn_hs_users"]
]
twitter_clean = {}
twitter_hate = {}
hate_community = {}
for connection_params in hate_corpus:
pipeline = [
{"$match": {"$and": [{"tokens": {"$in": [word]}}, {
"has_hs_keywords": False}]}},
{"$project": {"preprocessed_txt": 1, "_id": 1}},
{"$sample": {"size": sample_size}}]
results = mongo_base.aggregate(connection_params, pipeline)
for entry in results:
hate_community[entry["_id"]] = {"database": connection_params[0],
"collection": connection_params[1],
"text": entry["preprocessed_txt"]}
for connection_params in core_tweets:
pipeline = [
{"$match": {"$and": [{"tokens": {"$in": [word]}}, {
"has_hs_keywords": False}]}},
{"$project": {"preprocessed_txt": 1, "_id": 1}},
{"$sample": {"size": sample_size}}]
results = mongo_base.aggregate(connection_params, pipeline)
for entry in results:
twitter_clean[entry["_id"]] = {"database": connection_params[0],
"collection": connection_params[1],
"text": entry["preprocessed_txt"]}
pipeline = [
{"$match": {"$and": [{"tokens": {"$in": [word]}}, {
"has_hs_keywords": True}]}},
{"$project": {"preprocessed_txt": 1, "_id": 1}},
{"$sample": {"size": sample_size}}]
results = mongo_base.aggregate(connection_params, pipeline)
for entry in results:
twitter_hate[entry["_id"]] = {"database": connection_params[0],
"collection": connection_params[1],
"text": entry["preprocessed_txt"]}
results = {}
results["twitter_clean"] = dict(
itertools.islice(twitter_clean.items(), sample_size))
results["twitter_hate"] = dict(
itertools.islice(twitter_hate.items(), sample_size))
results["hate_community"] = dict(itertools.islice(
hate_community.items(), sample_size))
return results
```
#### File: modules/pattern_classifier/PatternVectorizer.py
```python
import regex
import pandas as pd
import numpy as np
class PatternVectorizer:
def __init__(self, patterns, binary=False):
self.binary = binary
vocabulary = pd.DataFrame()
vocabulary['patterns'] = patterns
vocabulary['regex'] = vocabulary.patterns.apply(
lambda p: regex.compile(PatternVectorizer.pattern_to_regexp(p))
)
self.vocabulary = vocabulary
def transform(self, documents):
X = np.array([*map(lambda doc: self.count_vocab(doc), documents)], dtype=np.int32)
if self.binary:
X[X>0] = 1
return X
def count_vocab(self, text):
return self.vocabulary.regex.apply(lambda voc: len(voc.findall(text)))
@classmethod
def token_to_regexp(cls, token):
tok_to_reg = {
'.+': "((?![@,#])[\\p{L}\\p{M}*\\p{N}_]+|(?![@,#])\\p{Punct}+)",
'<hashtag>': "#([\\p{L}\\p{M}*\\p{N}_]+|(?![@,#])\\p{Punct}+)",
'<usermention>': "@([\\p{L}\\p{M}*\\p{N}_]+|(?![@,#])\\p{Punct}+)",
'<url>': "http://([\\p{L}\\p{M}*\\p{N}_\\.\\/]+|(?![@,#])\\p{Punct}+)"
}
return tok_to_reg.get(token) or token
@classmethod
def pattern_to_regexp(cls, pattern_str):
delimRegex = "((?![@,#])\\b|\\p{Z}+|$|^|(?![@,#])\\p{Punct})"
patt = pattern_str.strip()
tokens = patt.split(" ")
tokens_reg = map(lambda t: cls.token_to_regexp(t),tokens)
pattern = delimRegex + delimRegex.join(tokens_reg) + delimRegex
return pattern
```
#### File: modules/preprocessing/db_cleaning.py
```python
import plotly
from plotly.graph_objs import Scatter, Layout
from ..utils import settings
from ..utils import file_ops
from ..utils import notifiers
from ..db import mongo_base
from ..db import mongo_data_filters
from ..db import mongo_complex
@notifiers.do_cprofile
def run_get_language_distribution(connection_params):
"""Test and print results of aggregation
Args:
client (pymongo.MongoClient): Connection object for Mongo DB_URL.
"""
lang_list = mongo_base.get_language_list(connection_params)
cursor = mongo_base.get_language_distribution(
connection_params, lang_list)
file_ops.write_json_file("language_distribution",
settings.OUTPUT_PATH, list(cursor))
@notifiers.do_cprofile
def run_get_top_k_users(connection_params, lang_list, field_name):
"""Test and print results of top k aggregation
"""
cursor = mongo_complex.get_top_k_users(connection_params, lang_list,
field_name, settings.USER_MENTIONS_LIMIT)
file_ops.write_json_file("user_distribution",
settings.OUTPUT_PATH, list(cursor))
@notifiers.do_cprofile
def run_get_top_k_hashtags(connection_params, lang_list, field_name, k_value):
"""Test and print results of top k aggregation
"""
cursor = mongo_complex.get_top_k_hashtags(
connection_params, lang_list, field_name, settings.HASHTAG_LIMIT, k_value)
file_ops.write_json_file("hashtag_distribution",
settings.OUTPUT_PATH, list(cursor))
def generate_bar_chart(chart_title):
"""Generate a plotly bar_chart
"""
json_obj = file_ops.read_json_file("hashtag_dist_en", settings.OUTPUT_PATH)
data_x = []
data_y = []
for document in json_obj:
data_x.append(document["hashtag"])
data_y.append(document["count"])
plotly.offline.plot({
"data": [Scatter(x=data_x[0:10], y=data_y[0:10])],
"layout": Layout(title=chart_title)
})
def run_retweet_removal(connection_params):
"""Start the retweet removal task.
Stage 1 in preprocessing pipeline.
"""
time1 = notifiers.time()
db_response = mongo_data_filters.retweet_removal(connection_params)
time2 = notifiers.time()
result = db_response
settings.logger.debug(result.modified_count)
notifiers.send_job_completion(
[time1, time2], ["run_retweet_removal", connection_params[0] + ": Retweet Removal"])
def run_create_indexes(connection_params):
"""Init indexes.
Stage 2 in preprocessing pipeline.
"""
time1 = notifiers.time()
mongo_data_filters.create_indexes(connection_params)
time2 = notifiers.time()
notifiers.send_job_completion(
[time1, time2], ["create_indexes", connection_params[0] + ": Index creation took"])
def run_field_removal(connection_params):
"""Start the field removal task.
Stage 3 in preprocessing pipeline.
"""
time1 = notifiers.time()
db_response = mongo_data_filters.field_removal(connection_params)
time2 = notifiers.time()
result = db_response
settings.logger.debug(result.modified_count)
notifiers.send_job_completion(
[time1, time2], ["field_removal", connection_params[0] + ": Field Removal took"])
time1 = notifiers.time()
db_response = mongo_data_filters.quoted_status_field_removal(
connection_params)
time2 = notifiers.time()
result = db_response
settings.logger.debug(result.modified_count)
notifiers.send_job_completion(
[time1, time2], ["quoted_status_field_removal", connection_params[0] + ": Quoted_status Field Removal took"])
def run_language_trimming(connection_params, lang_list):
"""Start the language trimming task.
Stage 4 in preprocessing pipeline.
"""
time1 = notifiers.time()
db_response = mongo_data_filters.language_trimming(
connection_params, lang_list)
time2 = notifiers.time()
result = db_response
settings.logger.debug(result.modified_count)
notifiers.send_job_completion(
[time1, time2], ["language_trimming", connection_params[0] + ": Language trimming took"])
def run_field_flattening(connection_params, depth, job_name, field_params):
"""Start the field flattening task.
Stage 5 in preprocessing pipeline.
"""
field_name = field_params[0]
field_to_set = field_params[1]
field_to_extract = field_params[2]
time1 = notifiers.time()
if len(field_params) == 3:
mongo_data_filters.field_flattening_base(
connection_params, depth, field_name, field_to_set, field_to_extract)
if len(field_params) == 6:
mongo_data_filters.field_flattening_complex(
connection_params, depth, field_params)
time2 = notifiers.time()
notifiers.send_job_completion(
[time1, time2], [str(job_name) + " ", connection_params[0] + " " + str(job_name)])
def run_parse_extended_tweet(connection_params, depth, job_name):
"""Start the field flattening task.
Stage 6 in preprocessing pipeline.
"""
time1 = notifiers.time()
mongo_data_filters.parse_extended_tweet(connection_params, depth)
time2 = notifiers.time()
notifiers.send_job_completion(
[time1, time2], [str(job_name) + " ", connection_params[0] + " " + str(job_name)])
def run_final_field_removal(connection_params, job_name):
"""Start the final field removal task.
Stage 7 in preprocessing pipeline.
"""
time1 = notifiers.time()
mongo_data_filters.final_field_removal(connection_params)
time2 = notifiers.time()
notifiers.send_job_completion(
[time1, time2], [str(job_name) + " ", connection_params[0] + " " + str(job_name)])
def run_clean_source_field(connection_params, job_name):
"""Start the clean source field task.
Stage 8 in preprocessing pipeline.
"""
time1 = notifiers.time()
mongo_data_filters.clean_source_field(connection_params)
time2 = notifiers.time()
notifiers.send_job_completion(
[time1, time2], [str(job_name) + " ", connection_params[0] + " " + str(job_name)])
def run_update_missing_text(connection_params):
""" Start the missing tweet text replacement job
"""
client = mongo_base.connect()
connection_params.insert(0, client)
time1 = notifiers.time()
db_response = mongo_base.update_missing_text(connection_params)
time2 = notifiers.time()
notifiers.send_job_completion(
[time1, time2], ["update_missing_text", connection_params[1] + ": Replace missing text took " + str(
db_response[0]) + "Not Found: " + str(db_response[1]) + "Total Requests: "
+ str(db_response[2])])
def preprocessing_pipeline():
""" Handle DB operations"""
job_names = ["hashtags", "entities.urls", "user_mentions", "media"]
field_names = ["entities.hashtags", "entities.urls",
"entities.user_mentions", "entities.media"]
fields_to_set = ["hashtags", "urls",
"user_mentions", "screen_name", "id_str", "media", "url", "type"]
field_to_extract = [".text", ".expanded_url",
".screen_name", ".id_str", ".media_url", ".type"]
hashtag_args = [field_names[0], fields_to_set[0], field_to_extract[0]]
url_args = [field_names[1], fields_to_set[1], field_to_extract[1]]
user_mentions_args = [field_names[2], fields_to_set[2], fields_to_set[
3], fields_to_set[4], field_to_extract[2], field_to_extract[3]]
media_args = [field_names[3], fields_to_set[5], fields_to_set[
6], fields_to_set[7], field_to_extract[4], field_to_extract[5]]
job_list = [
["twitter", "melvyn_hs_users"],
["manchester_event", "tweets"],
["unfiltered_stream_May17", "tweets"]
]
for job in job_list:
# Remove retweets
run_retweet_removal(job)
# Create Indexes
run_create_indexes(job)
# Remove unwanted and redundant fields
run_field_removal(job)
run_language_trimming(job, ['en'])
# Hashtags
run_field_flattening(
job, "top_level", job_names[0], hashtag_args)
# Urls
run_field_flattening(
job, "top_level", job_names[1], url_args)
# User mentions
run_field_flattening(
job, "top_level", job_names[2], user_mentions_args)
# # Media
run_field_flattening(
job, "top_level", job_names[3], media_args)
# Quoted_status Hashtags
run_field_flattening(job, "quoted_status",
job_names[0], hashtag_args)
# Quoted_status Urls
run_field_flattening(job, "quoted_status",
job_names[1], url_args)
# Quoted_status User mentions
run_field_flattening(job, "quoted_status",
job_names[2], user_mentions_args)
# Quoted_status Media
run_field_flattening(job, "quoted_status",
job_names[3], media_args)
# Parse extended tweet
run_parse_extended_tweet(
job, "top_level", "Top Level Extended Tweet")
run_parse_extended_tweet(
job, "quoted_status", "Quoted Status Extended Tweet")
# Remove final field set
run_final_field_removal(job, "Final Field Removal")
# Clean source field
run_clean_source_field(job, "Clean Source Field")
def fetch_missing_text_pipeline():
""" Get missing text from Twitter API
"""
connection_params_1 = ["twitter", "NAACL_SRW_2016"]
connection_params_2 = ["twitter", "NLP_CSS_2016_expert"]
run_update_missing_text(connection_params_1)
run_update_missing_text(connection_params_2)
```
#### File: modules/preprocessing/feature_prep.py
```python
import itertools
import spacy
import joblib
from pymongo import InsertOne, UpdateOne
from ..utils import settings
from ..utils import file_ops
from ..utils import notifiers
from ..utils import text_preprocessing
from ..db import mongo_base
from ..db import elasticsearch_base
from ..utils.CustomTwokenizer import CustomTwokenizer, EmbeddingTwokenizer
from ..pattern_classifier import SimpleClassifier, PatternVectorizer
def init_nlp_pipeline(parser, tokenizer=CustomTwokenizer):
"""Initialize spaCy nlp pipeline
The params are boolean values that determine if that feature should
be loaded with the pipeline.
Returns:
nlp: spaCy language model
"""
if parser is False:
nlp = spacy.load(settings.SPACY_EN_MODEL, create_make_doc=tokenizer,
parser=False)
else:
nlp = spacy.load(settings.SPACY_EN_MODEL,
create_make_doc=tokenizer)
return nlp
def load_emotion_classifier():
"""Loads persisted classes for the emotion classifier
Returns:
SimpleClassifier, PatternVectorizer classes
"""
cls_persistence = settings.MODEL_PATH + "simple_classifier_model.pkl.compressed"
pv_persistence = settings.MODEL_PATH + "pattern_vectorizer.pkl.compressed"
_cls = joblib.load(cls_persistence)
_pv = joblib.load(pv_persistence)
return (_cls, _pv)
def extract_lexical_features_test(nlp, tweet_list):
"""Provides tokenization, POS and dependency parsing
Args:
nlp (spaCy model): Language processing pipeline
"""
result = []
texts = (tweet for tweet in tweet_list)
for doc in nlp.pipe(texts, batch_size=10000, n_threads=3):
settings.logger.info(doc)
for parsed_doc in doc:
result.append((parsed_doc.orth_, parsed_doc.tag_))
return result
def run_parallel_pipeline(connection_params, method, job_details):
""" Generic function for processing a collection in parallel.
Args:
connection_params (list): Contains connection objects and params as follows:
0: db_name (str): Name of database to query.
1: collection (str): Name of collection to use.
2: projection (str): Document field name to return.
job_details (list): Info to insert in job complete notifcation
"""
client = mongo_base.connect()
query = {}
projection = connection_params[2]
# query["filter"] = {"conllFormat":{"$exists":False}}
query["filter"] = {}
query["projection"] = {projection: 1}
query["limit"] = 0
query["skip"] = 0
query["no_cursor_timeout"] = True
connection_params.insert(0, client)
collection_size = mongo_base.finder(connection_params, query, True)
del connection_params[0]
client.close()
if collection_size == 0:
return
num_cores = 4
partition_size = collection_size // num_cores
partitions = [(i, partition_size)
for i in range(0, collection_size, partition_size)]
# Account for lists that aren't evenly divisible, update the last tuple to
# retrieve the remainder of the items
partitions[-1] = (partitions[-1][0], (collection_size - partitions[-1][0]))
settings.logger.debug(
"Starting parallel job with %s cores and batch size of %s", num_cores, partition_size)
time1 = notifiers.time()
joblib.Parallel(n_jobs=num_cores)(joblib.delayed(method)(
connection_params, query, partition) for partition in partitions)
time2 = notifiers.time()
notifiers.send_job_completion(
[time1, time2], [job_details[0], connection_params[1] + ": " + job_details[1]])
def feature_extraction_pipeline(connection_params, query, partition, usage=None):
"""Handles the extraction of features needed for the model.
Updates collection in place.
Args:
connection_params (list): Contains connection objects and params as follows:
0: db_name (str): Name of database to query.
1: collection (str): Name of collection to use.
2: projection (str): Document field name to return.
3: usage (str): Operation to apply.
"""
# Set skip limit values
query["skip"] = partition[0]
query["limit"] = partition[1]
nlp = init_nlp_pipeline(True)
client = mongo_base.connect()
db_name = connection_params[0]
collection = connection_params[1]
projection = connection_params[2]
usage = connection_params[3]
connection_params.insert(0, client)
# _cls, _pv = load_emotion_classifier()
# Setup client object for bulk op
bulk_client = mongo_base.connect()
dbo = bulk_client[db_name]
dbo.authenticate(settings.MONGO_USER, settings.MONGO_PW,
source=settings.DB_AUTH_SOURCE)
cursor = mongo_base.finder(connection_params, query, False)
hs_keywords = set(file_ops.read_csv_file(
"refined_hs_keywords", settings.TWITTER_SEARCH_PATH))
# Makes a copy of the MongoDB cursor, to the best of my
# knowledge this does not attempt to exhaust the cursor
cursor_1, cursor_2 = itertools.tee(cursor, 2)
object_ids = (object_id["_id"] for object_id in cursor_1)
tweet_texts = (tweet_text[projection] for tweet_text in cursor_2)
operations = []
staging = []
count = 0
# https://github.com/explosion/spaCy/issues/172
docs = nlp.pipe(tweet_texts, batch_size=15000, n_threads=4)
for object_id, doc in zip(object_ids, docs):
# emotion_vector.append(doc.text)
count += 1
settings.logger.debug("Document %s out of %s", count, partition[1])
# Construct a new tweet object to be appended
parsed_tweet = {}
parsed_tweet["_id"] = object_id
if usage == "conll":
parsed_tweet = text_preprocessing.prep_dependency_features(
parsed_tweet, doc, usage)
else:
parsed_tweet["text"] = doc.text
if usage == "analysis":
parsed_tweet["tokens"] = list(set([token.lower_ for token in doc if not(
token.lower_ in text_preprocessing.STOP_LIST or token.lower_ == "user_mention"
or token.is_punct or token.lower_ == "rt" or token.is_digit
or token.prefix_ == "#")]))
parsed_tweet = text_preprocessing.prep_linguistic_features(
parsed_tweet, hs_keywords, doc, usage)
parsed_tweet = text_preprocessing.prep_dependency_features(
parsed_tweet, doc, usage)
elif usage == "features":
parsed_tweet["tokens"] = list(set([token.lower_ for token in doc if not(
token.lower_ in text_preprocessing.STOP_LIST or token.lower_ == "user_mention"
or token.is_punct or token.lower_ == "rt" or token.is_digit
or token.prefix_ == "#")]))
parsed_tweet = text_preprocessing.prep_linguistic_features(
parsed_tweet, hs_keywords, doc, usage)
parsed_tweet = text_preprocessing.prep_dependency_features(
parsed_tweet, doc, usage)
# parsed_tweet["related_keywords"] = [[w.lower_ for w in text_preprocessing.get_similar_words(
# nlp.vocab[token], settings.NUM_SYNONYMS)] for token in
# text_preprocessing.get_keywords(doc)]
staging.append(parsed_tweet)
if len(staging) == settings.BULK_BATCH_SIZE:
settings.logger.debug("Progress %s out of %s", count, partition[1])
operations = update_schema(staging)
_ = mongo_base.do_bulk_op(dbo, collection, operations)
operations = []
staging = []
if staging:
operations = update_schema(staging)
_ = mongo_base.do_bulk_op(dbo, collection, operations)
def update_schema(staging):
""" Short function for appending features"""
operations = []
for _idx, parsed_tweet in enumerate(staging):
update_values = {}
for key, val in parsed_tweet.items():
update_values[key] = val
update_values.pop("_id", None)
operations.append(UpdateOne({"_id": parsed_tweet["_id"]}, {
"$set": update_values}, upsert=False))
return operations
def unpack_emotions(staging, emotion_vector, _pv, _cls):
"""Vectorize a list of tweets and return the emotion emotion_coverage
for each entry.
Args:
staging (list): List of tweet objects.
emotion_vector (list): List of tweet text.
_pv (PatternVectorizer)
_cls (SimpleClassifier)
Returns:
list of MongoDB InsertOne operations.
"""
import pandas as pd
emotion_vector = pd.Series(
emotion_vector, index=range(0, len(emotion_vector)))
emotion_vector = _pv.transform(emotion_vector)
emotion_coverage = _cls.get_top_classes(
emotion_vector, ascending=True, n=2)
emotion_min_score = _cls.get_max_score_class(emotion_vector)
operations = []
for idx, parsed_tweet in enumerate(staging):
parsed_tweet["emotions"] = {}
parsed_tweet["emotions"]["first"] = emotion_coverage[idx][0]
parsed_tweet["emotions"]["second"] = emotion_coverage[idx][1]
parsed_tweet["emotions"]["min"] = emotion_min_score[idx]
operations.append(InsertOne(parsed_tweet))
return operations
def prep_word_embedding_text(connection_params, query, partition):
""" Read a MongoDB collection and store the preprocessed text
as a separate field. Text is preprocessed for training a word embedding model.
Removes URLS, numbers, and stopwords, normalizes @usermentions. Updates the passed collection.
Args:
connection_params (list): Contains connection objects and params as follows:
0: db_name (str): Name of database to query.
1: collection (str): Name of collection to use.
2: projection (str): Document field name to return.
query (dict): Query to execute.
partition (tuple): Contains skip and limit values.
"""
client = mongo_base.connect()
db_name = connection_params[0]
collection = connection_params[1]
projection = connection_params[2]
connection_params.insert(0, client)
# Set skip limit values
query["skip"] = partition[0]
query["limit"] = partition[1]
# Setup client object for bulk op
bulk_client = mongo_base.connect()
dbo = bulk_client[db_name]
dbo.authenticate(settings.MONGO_USER, settings.MONGO_PW,
source=settings.DB_AUTH_SOURCE)
operations = []
nlp = init_nlp_pipeline(False, tokenizer=EmbeddingTwokenizer)
cursor = mongo_base.finder(connection_params, query, False)
# Makes a copy of the MongoDB cursor, to the best of my
# knowledge this does not attempt to exhaust the cursor
count = 0
cursor_1, cursor_2 = itertools.tee(cursor, 2)
object_ids = (object_id["_id"] for object_id in cursor_1)
tweet_texts = (tweet_text[projection] for tweet_text in cursor_2)
docs = nlp.pipe(tweet_texts, batch_size=15000, n_threads=4)
for object_id, doc in zip(object_ids, docs):
count += 1
parsed_tweet = {}
parsed_tweet["_id"] = object_id
parsed_tweet["word_embedding_txt"] = str(doc.text).lower()
operations.append(UpdateOne({"_id": parsed_tweet["_id"]}, {
"$set": {"word_embedding_txt": parsed_tweet["word_embedding_txt"]}}, upsert=False))
if len(operations) == settings.BULK_BATCH_SIZE:
_ = mongo_base.do_bulk_op(dbo, collection, operations)
operations = []
settings.logger.debug("Progress %s out of %s", count, partition[1])
if operations:
_ = mongo_base.do_bulk_op(dbo, collection, operations)
def prep_preprocessed_text(connection_params, query, partition):
""" Read a MongoDB collection and store the preprocessed text
as a separate field. Text is preprocessed for generic usage.
Removes URLS, normalizes @usermentions, and lowercases text. Updates the passed collection.
Args:
connection_params (list): Contains connection objects and params as follows:
0: db_name (str): Name of database to query.
1: collection (str): Name of collection to use.
2: projection (str): Document field name to return.
query (dict): Query to execute.
partition (tuple): Contains skip and limit values.
"""
client = mongo_base.connect()
db_name = connection_params[0]
collection = connection_params[1]
projection = connection_params[2]
connection_params.insert(0, client)
# Set skip limit values
query["skip"] = partition[0]
query["limit"] = partition[1]
# Setup client object for bulk op
bulk_client = mongo_base.connect()
dbo = bulk_client[db_name]
dbo.authenticate(settings.MONGO_USER, settings.MONGO_PW,
source=settings.DB_AUTH_SOURCE)
operations = []
nlp = init_nlp_pipeline(False)
cursor = mongo_base.finder(connection_params, query, False)
# Makes a copy of the MongoDB cursor, to the best of my
# knowledge this does not attempt to exhaust the cursor
count = 0
cursor_1, cursor_2 = itertools.tee(cursor, 2)
object_ids = (object_id["_id"] for object_id in cursor_1)
tweet_texts = (tweet_text[projection] for tweet_text in cursor_2)
docs = nlp.pipe(tweet_texts, batch_size=15000, n_threads=4)
for object_id, doc in zip(object_ids, docs):
count += 1
parsed_tweet = {}
parsed_tweet["_id"] = object_id
parsed_tweet["preprocessed_txt"] = str(doc.text).lower()
operations.append(UpdateOne({"_id": parsed_tweet["_id"]}, {
"$set": {"preprocessed_txt": parsed_tweet["preprocessed_txt"]}}, upsert=False))
if len(operations) == settings.BULK_BATCH_SIZE:
_ = mongo_base.do_bulk_op(dbo, collection, operations)
operations = []
settings.logger.debug("Progress %s out of %s", count, partition[1])
if operations:
_ = mongo_base.do_bulk_op(dbo, collection, operations)
def run_fetch_es_tweets():
""" Fetch tweets from elasticsearch
"""
connection_params = ["twitter", "melvyn_hs_users"]
lookup_list = file_ops.read_csv_file(
'melvyn_hs_user_ids', settings.TWITTER_SEARCH_PATH)
elasticsearch_base.migrate_es_tweets(connection_params, [
"192.168.2.33", "tweets", "tweet", "user.id_str", lookup_list])
def start_store_preprocessed_text():
""" Start the job for both word embedding and generic text preprocessing
"""
job_list = [
["twitter_annotated_datasets", "NAACL_SRW_2016", "text"],
["twitter_annotated_datasets",
"NLP_CSS_2016_expert", "text"],
["twitter_annotated_datasets", "crowdflower", "text"],
["dailystormer_archive", "d_stormer_documents", "article"],
["twitter", "melvyn_hs_users", "text"],
["manchester_event", "tweets", "text"],
["inauguration", "tweets", "text"],
["uselections", "tweets", "text"],
["twitter", "candidates_hs_exp6_combo_3_Mar_9813004", "text"],
["unfiltered_stream_May17", "tweets", "text"],
["twitter", "tweets", "text"],
["inauguration_no_filter", "tweets", "text"]
]
for job in job_list:
run_parallel_pipeline(job, prep_word_embedding_text, ["prep_word_embedding_text",
"Preprocess embedding text"])
run_parallel_pipeline(
job, prep_preprocessed_text, ["prep_preprocessed_text", "Preprocess Text"])
def start_feature_extraction():
"""Run operations"""
job_list = [
["twitter_annotated_datasets", "NAACL_SRW_2016",
"preprocessed_txt", "features"],
["twitter_annotated_datasets",
"NLP_CSS_2016_expert", "preprocessed_txt", "features"],
["twitter_annotated_datasets", "crowdflower",
"preprocessed_txt", "features"],
["dailystormer_archive", "d_stormer_documents",
"preprocessed_txt", "features"],
["twitter", "melvyn_hs_users", "preprocessed_txt", "features"],
["manchester_event", "tweets", "preprocessed_txt", "features"],
["inauguration", "tweets", "preprocessed_txt", "features"],
["unfiltered_stream_May17", "tweets", "preprocessed_txt", "features"],
["twitter", "tweets", "preprocessed_txt", "features"],
["inauguration_no_filter", "tweets", "preprocessed_txt", "features"],
["uselections", "tweets", "preprocessed_txt", "features"]
]
conll_list = [
# ["dailystormer_archive", "d_stormer_documents", "preprocessed_txt", "conll"]
["twitter", "melvyn_hs_users", "preprocessed_txt", "conll"],
["manchester_event", "tweets", "preprocessed_txt", "conll"],
["inauguration", "tweets", "preprocessed_txt", "conll"],
["unfiltered_stream_May17", "tweets", "preprocessed_txt", "conll"],
["twitter", "tweets", "preprocessed_txt", "conll"],
["inauguration_no_filter", "tweets", "preprocessed_txt", "conll"],
["uselections", "tweets", "preprocessed_txt", "conll"]
]
# for job in job_list:
# run_parallel_pipeline(
# job[0:4], feature_extraction_pipeline, [job[0] + "_" + job[2], "Extract
# Features"])
for job in conll_list:
run_parallel_pipeline(
job[0:4], feature_extraction_pipeline, [job[0] + "_" + job[2], "Extract Conll Features"])
```
#### File: modules/utils/graphing.py
```python
from math import log10
from collections import Counter
import networkx as nx
from . import text_preprocessing
def init_digraph():
""" Initialize an empty directed graph"""
return nx.DiGraph()
def compose_graphs(G, H):
""" Return a new graph of G composed with H.
Args
----
G,H (networkX graph)
Returns:
C: A new graph with the same type as G.
"""
return nx.compose(G, H)
def compute_pagerank(G, alpha=0.85):
"""Return the PageRank of the nodes in the graph
Returns:
pagerank: (dict) of nodes with PageRank as value.
"""
return nx.pagerank_scipy(G, alpha=alpha)
def get_leaf_nodes(G):
"""Return a list of nodes in the graph that do not have children."""
leafs = [node for node in G.nodes() if not G.successors(node)]
return leafs
def get_parent_nodes(G):
"""Return a list of nodes in the graph that have children"""
parents = [node for node in G.nodes() if G.successors(node)]
return parents
def build_wordlist_dg(**kwargs):
""" Accept a target_word_list and builds a directed graph based on
the results returned by model.similar_by_word. Weights are initialized
to 1. For each word in target_word_list we call build_word_directed_graph and merge the results.
The idea is to build a similarity graph that increases the weight of an edge each
time a node appears in the similarity results.
Args
----
wordlist (list): List of words that will act as nodes.
model (gensim.models): Gensim word embedding model.
depth (int): Depth to restrict the search to.
hs_keywords (set)
topn (int): Number of words to check against in the embedding model, default=5.
"""
wordlist = kwargs["wordlist"]
model = kwargs["model"]
depth = kwargs["depth"]
hs_keywords = kwargs["hs_keywords"]
topn = 5 if "topn" not in kwargs else kwargs["topn"]
wordlist_graph = init_digraph()
model_vocab = set(model.index2word)
# Changing how we boost from HS keywords
single_plural_boost = set()
hs_checker = (word for word in hs_keywords if word in model_vocab)
for word in hs_checker:
single_plural = text_preprocessing.singles_plurals(word)
for entry in single_plural:
if entry in model_vocab:
single_plural_boost.add(entry)
hs_keywords = hs_keywords.union(single_plural_boost)
hs_checker = (word for word in hs_keywords if word in model_vocab)
boost_check = []
for hs_word in hs_checker:
boost_check.extend(
[word[0] for word in model.similar_by_word(hs_word, topn=20)])
boost_counter = Counter()
for word in boost_check:
boost_counter[word] += 1
for target_word in wordlist:
# do_hs_boosting = (
# hs_keywords and model_vocab and target_word in hs_keywords and
# target_word in model_vocab)
do_hs_boosting = (
hs_keywords and model_vocab and target_word in model_vocab)
if do_hs_boosting:
target_word_graph = build_word_dg(
target_word, model, depth, model_vocab=model_vocab, topn=topn, boost_counter=boost_counter)
wordlist_graph = nx.compose(wordlist_graph, target_word_graph)
elif not do_hs_boosting and target_word in model_vocab:
target_word_graph = build_word_dg(
target_word, model, depth, topn=topn)
wordlist_graph = nx.compose(wordlist_graph, target_word_graph)
return wordlist_graph
def build_word_dg(target_word, model, depth, model_vocab=None, boost_counter=None, topn=5):
""" Accept a target_word and builds a directed graph based on
the results returned by model.similar_by_word. Weights are initialized
to 1. Starts from the target_word and gets similarity results for it's children
and so forth, up to the specified depth.
Args
----
target_word (string): Root node.
model (gensim.models): Gensim word embedding model.
depth (int): Depth to restrict the search to.
topn (int): Number of words to check against in the embedding model, default=5.
"""
_DG = init_digraph()
seen_set = set()
do_hs_boosting = (
boost_counter and model_vocab and target_word in model_vocab)
if do_hs_boosting:
weight_boost = log10(float(model.vocab[target_word].count)) * boost_counter[
target_word] if target_word in boost_counter else 0
_DG.add_weighted_edges_from([(target_word, word[0], weight_boost + word[1])
for word in model.similar_by_word(target_word, topn=topn)])
else:
_DG.add_weighted_edges_from([(target_word, word[0], word[1])
for word in model.similar_by_word(target_word, topn=topn)])
seen_set.add(target_word)
for _idx in range(1, depth):
current_nodes = _DG.nodes()
for node in current_nodes:
if node not in seen_set:
_DG.add_weighted_edges_from(
[(node, word[0], word[1]) for word in model.similar_by_word(node, topn=topn)])
seen_set.add(node)
return _DG
def build_converged_graph(**kwargs):
""" The idea here is to build a graph from a given list and expand the graph
until it converges or until a specified number of rounds. On each iteration we store
the nodes that were already seen and repeat the process with the graph node difference.
Args
----
wordlist (list): List of words that will act as nodes.
model (gensim.models): Gensim word embedding model.
depth (int): Depth to restrict the search to.
rounds (int): Number of times to repeat the process.
hs_keywords (set)
topn (int): Number of words to check against in the embedding model, default=5.
"""
rounds = kwargs["rounds"]
candidate_graph = init_digraph()
for _idx in range(0, rounds):
graph = build_wordlist_dg(**kwargs)
candidate_graph = compose_graphs(candidate_graph, graph)
leafs = get_leaf_nodes(candidate_graph)
kwargs["wordlist"] = list(leafs)
return candidate_graph
```
#### File: modules/utils/krippendorff_alpha.py
```python
from __future__ import print_function
try:
import numpy as np
except ImportError:
np = None
class KrippendorffAlpha:
def __init__(self, frequencies):
self.frequencies = frequencies
def nominal_metric(self, a, b):
"""For nominal data"""
return a != b
def interval_metric(self, a, b):
"""For interval data"""
return (a - b)**2
def ratio_metric(self, a, b):
"""For ratio data"""
return ((a - b) / (a + b))**2
def ordinal_metric(self, a, b):
"""For ordinal data
frequencies (Collections.Counter): stores the frequencies of ratings
"""
return (self.frequencies[a] + self.frequencies[b] - (self.frequencies[a] + self.frequencies[b]) / 2) ** 2
def krippendorff_alpha(self, data, metric=interval_metric, force_vecmath=False, convert_items=float, missing_items=None):
'''
Calculate Krippendorff's alpha (inter-rater reliability):
data is in the format
[
{unit1:value, unit2:value, ...}, # coder 1
{unit1:value, unit3:value, ...}, # coder 2
... # more coders
]
or it is a sequence of (masked) sequences (list, numpy.array, numpy.ma.array,
e.g.) with rows corresponding to coders and columns to items
metric: function calculating the pairwise distance
force_vecmath: force vector math for custom metrics (numpy required)
convert_items: function for the type conversion of items (default: float)
missing_items: indicator for missing items (default: None)
'''
# number of coders
_m = len(data)
# set of constants identifying missing values
maskitems = list(missing_items)
if np is not None:
maskitems.append(np.ma.masked_singleton)
# convert input data to a dict of items
units = {}
for _d in data:
try:
# try if d behaves as a dict
diter = _d.items()
except AttributeError:
# sequence assumed for d
diter = enumerate(_d)
for it, g in diter:
if g not in maskitems:
try:
its = units[it]
except KeyError:
its = []
units[it] = its
its.append(convert_items(g))
units = dict((it, d) for it, d in units.items()
if len(d) > 1) # units with pairable values
n = sum(len(pv) for pv in units.values()) # number of pairable values
if n == 0:
raise ValueError("No items to compare.")
np_metric = (np is not None) and (
(metric in (self.interval_metric, self.nominal_metric, self.ratio_metric)) or force_vecmath)
Do = 0.
for grades in units.values():
if np_metric:
gr = np.asarray(grades)
Du = sum(np.sum(metric(gr, gri)) for gri in gr)
else:
Du = sum(metric(gi, gj) for gi in grades for gj in grades)
Do += Du / float(len(grades) - 1)
Do /= float(n)
De = 0.
for g1 in units.values():
if np_metric:
d1 = np.asarray(g1)
for g2 in units.values():
De += sum(np.sum(metric(d1, gj)) for gj in g2)
else:
for g2 in units.values():
De += sum(metric(gi, gj) for gi in g1 for gj in g2)
De /= float(n * (n - 1))
return 1. - Do / De if (Do and De) else 1.
if __name__ == '__main__':
print("Example from http://en.wikipedia.org/wiki/Krippendorff's_Alpha")
data = (
"* * * * * 3 4 1 2 1 1 3 3 * 3", # coder A
"1 * 2 1 3 3 4 3 * * * * * * *", # coder B
"* * 2 1 3 4 4 * 2 1 1 3 3 * 4", # coder C
)
# missing = '*' # indicator for missing values
# # convert to 2D list of string items
# array = [d.split() for d in data]
# print("nominal metric: %.3f" % krippendorff_alpha(
# array, nominal_metric, missing_items=missing))
# print("interval metric: %.3f" % krippendorff_alpha(
# array, interval_metric, missing_items=missing))
```
#### File: modules/utils/model_helpers.py
```python
from math import log10
import joblib
import pandas as pd
import numpy as np
from tqdm import tqdm
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.decomposition import IncrementalPCA
from sklearn.base import TransformerMixin, BaseEstimator
from . import file_ops
from . import settings
from . import visualization
from ..db import mongo_base
def fetch_as_df(connection_params, projection):
""" Takes MongoDB connection params and returns the specified
collection as a pandas dataframe.
Args:
connection_params (list): Contains connection objects and params as follows:
0: db_name (str): Name of database to query.
1: collection (str): Name of collection to use.
projection (dict): Dictionary of fields to return, returns all fields if blank.
"""
client = mongo_base.connect()
connection_params.insert(0, client)
query = {}
query["filter"] = {}
query["projection"] = projection
query["limit"] = 0
query["skip"] = 0
query["no_cursor_timeout"] = True
cursor = mongo_base.finder(connection_params, query, False)
_df = pd.DataFrame(list(cursor))
return _df
def run_experiment(X, y, pipeline, process_name, display_args, num_expts=1):
""" Accept features, labels and a model pipeline for running an underlying classifier model
Args:
X (pandas.Dataframe): Model features
y (pandas.Series): Feature labels.
pipeline (sklearn.pipeline): Underlying classifier model.
process_name (str): Model description.
display_args (list):
0: Boolean for printing confusion matrix.
1: Boolean for plotting confusion matrix.
num_expts (int): Number of times to run model.
plot_cm=False, print_cm=False
"""
settings.logger.info("Predicting the labels of the test set...")
settings.logger.debug("%s documents", len(X))
settings.logger.debug("%s categories", len(y.value_counts()))
scores = list()
for i in tqdm(range(num_expts)):
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.20, train_size=0.80)
model = pipeline.fit(X_train, y_train) # train the classifier
# apply the model to the test data
y_prediction = model.predict(X_test)
report = classification_report(y_test, y_prediction)
# compare the results to the gold standard
score = accuracy_score(y_prediction, y_test)
scores.append(score)
settings.logger.info("Classification Report: %s", process_name)
print(report)
cm = confusion_matrix(y_test, y_prediction)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
cm = cm.round(decimals=3)
if display_args[0]:
print("Confusion matrix: ")
print(cm)
if display_args[1]:
visualization.plot_confusion_matrix(
cm, y.unique(), process_name, process_name + "_cm")
# print(sum(scores) / num_expts)
def pca_reduction(vectors, num_dimensions, model_name):
""" Run dimensionality reduction using IncrementalPCA
Args:
vectors: Word vectors to be reduced.
num_dimensions (int): Number of dimensions to reduce to.
model_name (string)
"""
settings.logger.info(
"Reducing to %sD using IncrementalPCA...", num_dimensions)
ipca = IncrementalPCA(n_components=num_dimensions)
vectors = ipca.fit_transform(vectors)
joblib.dump(vectors, model_name, compress=True)
settings.logger.info("Reduction Complete")
return vectors
def run_gridsearch_cv(pipeline, X, y, param_grid, n_jobs, score):
""" Perfrom k-fold grid search across the data in order to fine tune
the parameters.
Args:
pipeline (sklearn.pipeline): Underlying classifier model.
X (pandas.Dataframe): Model features
y (pandas.Series): Feature labels.
param_grid (list): List of hyperparameters to validate.
n_jobs (int): Number of threads to utilize.
score (string): Scoring mechanism to utilize [recall or precision].
"""
print("Tuning hyper-parameters for {0}\n".format(score))
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
clf = GridSearchCV(pipeline, param_grid=param_grid, cv=2,
n_jobs=4, scoring='%s_macro' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:\n")
print(clf.best_params_)
print("Grid scores on development set:\n")
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r\n" % (mean, std * 2, params))
print("Detailed classification report:\n")
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.\n")
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
def evaluate_prediction(predictions, target):
print(classification_report(target, predictions))
print("Accuracy: ", accuracy_score(target, predictions))
def get_feature_stats(vectorizer, X, skb, feature_names):
""" Returns the number of features after vectorization,
also returns the top features as determined by a chi square test.
Args:
vectorizer (sklearn vectorizer)
X (pandas.Dataframe): Raw features.
skb (sklearn.selectKBest)
feature_names (Boolean): Optional, returns top features.
"""
vectorizer.fit(X)
if feature_names:
return len(vectorizer.get_feature_names()), [feature_names[i] for i in skb.get_support(indices=True)]
else:
return len(vectorizer.get_feature_names())
def empty_analyzer():
""" Lambda for use with the following class.
"""
analyzer = lambda x: x
return analyzer
class TextExtractor(BaseEstimator, TransformerMixin):
""" Adapted from code by @zacstewart
https://github.com/zacstewart/kaggle_seeclickfix/blob/master/estimator.py
Also see <NAME>'s excellent blogpost on pipelines:
http://zacstewart.com/2014/08/05/pipelines-of-featureunions-of-pipelines.html
"""
def __init__(self, column_name):
self.column_name = column_name
def transform(self, df):
""" Select the relevant column and return it as a numpy array
"""
# set the array type to be string
return np.asarray(df[self.column_name]).astype(str)
def fit(self, *_):
return self
class TextListExtractor(BaseEstimator, TransformerMixin):
""" Extract a list of values from a dataframe.
"""
def __init__(self, column_name):
self.column_name = column_name
def transform(self, df):
return df[self.column_name].tolist()
def fit(self, *_):
return self
class Apply(BaseEstimator, TransformerMixin):
"""Applies a function f element-wise to the numpy array
"""
def __init__(self, fn):
self.fn = np.vectorize(fn)
def transform(self, data):
# note: reshaping is necessary because otherwise sklearn
# interprets 1-d array as a single sample
return self.fn(data.reshape(data.size, 1))
def fit(self, *_):
return self
class BooleanExtractor(BaseEstimator, TransformerMixin):
def __init__(self, column_name):
self.column_name = column_name
def transform(self, df):
# select the relevant column and return it as a numpy array
# set the array type to be string
return np.asarray(df[self.column_name]).astype(np.int)
def fit(self, *_):
return self
def get_els_word_weights(vocab, total_doc_count, hs_keywords):
""" Calculate word frequencies and IDF weights from the vocab results returned by
elasticsearch_base.aggregate()
Args:
vocab (dict): Dictionary of token:doc_count values. doc_count is the number of
documents where that token appears.
total_doc_count (int): Total number of documents in the corpus.
Returns:
hs_vocab_freqs (dict): subset of token:frequency pairs for tokens in the HS keywords corpus.
vocab_freqs (dict): subset of token:frequency pairs for the entire vocab.
*_idf (dict): token:idf pairs.
P(wi) = number of docs with (wi) / count(total number of documents)
IDF = log(total number of documents / number of docs with (wi))
"""
vocab_frequencies = {}
hs_vocab_frequencies = {}
vocab_idf = {}
hs_vocab_idf = {}
vocab_frequencies = {token:
float(val) / float(total_doc_count) for token, val in vocab.items()}
vocab_idf = {token: log10(float(total_doc_count) / float(val))
for token, val in vocab.items()}
hs_vocab_frequencies = {token: vocab_frequencies[
token] for token in vocab_frequencies if token in hs_keywords}
hs_vocab_idf = {token: vocab_idf[token]
for token in vocab_idf if token in hs_keywords}
return hs_vocab_frequencies, vocab_frequencies, hs_vocab_idf, vocab_idf
def get_overlapping_weights(vocab, comparison_vocab):
""" Accepts a pair of dictionary that stores token:weight and gets
the values for tokens that are in both vocabularies.
Args:
vocab (dict): token:weight pairs extracted from a corpus.
comparison_vocab (dict): The vocabulary to be checked.
Returns:
token_list_1, token_weight_1, token_list_2, token_weight_2 (list): List of tokens and
weights resepctively.
"""
vocab_tokens = set(vocab.keys())
comparison_tokens = set(comparison_vocab.keys())
token_intersection = vocab_tokens.intersection(comparison_tokens)
vocab_list = {}
comparison_vocab_list = {}
vocab_list = {token: vocab[token] for token in token_intersection}
comparison_vocab_list = {token: comparison_vocab[
token] for token in token_intersection}
return list(vocab_list.keys()), list(vocab_list.values()), \
list(comparison_vocab_list.keys()), list(
comparison_vocab_list.values())
def get_model_vocabulary(model):
""" Return the stored vocabulary for an embedding model
Args:
model (gensim.models) KeyedVectors or Word2Vec model.
"""
return set(model.vocab.keys())
def get_model_word_count(model, word):
""" Return the count for a given word in an embedding model
Args:
model (gensim.models) KeyedVectors or Word2Vec model
"""
return model.vocab[word].count
```
#### File: thesis-preprocessing/hatespeech_core/staging.py
```python
import string
from pprint import pprint
from itertools import chain
from modules.utils import model_helpers
from modules.utils import word_enrichment
from modules.utils import file_ops
from modules.utils import settings
from modules.utils import text_preprocessing
from modules.db import mongo_base
from modules.db import mongo_search_pipelines
from modules.db import elasticsearch_base
from modules.preprocessing import neural_embeddings
from nltk.corpus import words
from nltk.corpus import stopwords
from joblib import Parallel, delayed
def check_token_lengths(wordlist):
"""Find the number of unigrams in the blacklist"""
unigrams = [word for word in wordlist if len(
file_ops.twokenize.tokenizeRawTweetText(word)) == 2]
result = unusual_words(unigrams)
print("Single token count: {0}".format(len(unigrams)))
print("Non dictionary matches: {0}".format(len(result[0])))
print("Dictionary matches: {0}".format(len(result[1])))
# pprint(result[0])
def unusual_words(text_list):
"""Filtering a Text: this program computes the vocabulary of a text,
then removes all items that occur in an existing wordlist,
leaving just the uncommon or mis-spelt words."""
# text_vocab = set(w.lower() for w in text_list if w.isalpha())
text_vocab = set(w.lower() for w in text_list)
english_vocab = set(w.lower() for w in words.words())
unusual = text_vocab - english_vocab
return [sorted(unusual), sorted(text_vocab - unusual)]
def ngram_stopword_check(text):
"""Check if all the tokens in an ngram are stopwords"""
punctuation = list(string.punctuation)
stop_list = dict.fromkeys(stopwords.words(
"english") + punctuation + ["rt", "via", "RT"])
bigrams = text_preprocessing.create_ngrams(
file_ops.twokenize.tokenizeRawTweetText(text.lower()), 2)
bigrams = [ngram for ngram in bigrams if not set(
file_ops.twokenize.tokenizeRawTweetText(ngram)).issubset(set(stop_list))]
print(bigrams)
# @profile
def test_linear_scan(connection_params, sample_size):
"""Test linear scan"""
client = mongo_base.connect()
db_name = connection_params[0]
collection = connection_params[1]
dbo = client[db_name]
dbo.authenticate(settings.MONGO_USER, settings.MONGO_PW,
source=settings.DB_AUTH_SOURCE)
cursor = dbo[collection].find({}, {"id_str": 1}).limit(sample_size)
documents = {str(document["_id"]) for document in cursor}
print(len(documents))
def process_cursor(cursor):
"""Return all documents in a cursor"""
documents = [str(document["_id"]) for document in cursor]
return documents
# @profile
def process_partition(partition, connection_params):
"""Thread safe process
partition stores a tuple with the skip and limit values
"""
client = mongo_base.connect()
db_name = connection_params[0]
collection = connection_params[1]
dbo = client[db_name]
dbo.authenticate(settings.MONGO_USER, settings.MONGO_PW,
source=settings.DB_AUTH_SOURCE)
cursor = dbo[collection].find({}, {"id_str": 1}).skip(
partition[0]).limit(partition[1])
documents = {str(document["_id"]) for document in cursor}
return documents
def parallel_test(num_cores, connection_params, sample_size):
"""Test parallel functionality"""
partition_size = sample_size // num_cores
partitions = [(i, partition_size)
for i in range(0, sample_size, partition_size)]
# Account for lists that aren't evenly divisible, update the last tuple to
# retrieve the remainder of the items
partitions[-1] = (partitions[-1][0], (sample_size - partitions[-1][0]))
results = Parallel(n_jobs=num_cores)(
delayed(process_partition)(partition, connection_params) for partition in partitions)
results = list(chain.from_iterable(results))
print(partitions)
print(len(results))
def profile_codeword_selection():
_es = elasticsearch_base.connect(settings.ES_URL)
positive_hs_filter = "_exists_:hs_keyword_matches"
negative_hs_filter = "!_exists_:hs_keyword_matches"
hs_keywords = set(file_ops.read_csv_file(
"refined_hs_keywords", settings.TWITTER_SEARCH_PATH))
test_size = 100000
min_doc_count = 5
subset_sizes = elasticsearch_base.get_els_subset_size(
_es, "manchester_event", "hs_keyword_matches")
doc_count = subset_sizes["positive_count"] + subset_sizes["negative_count"]
subset_sizes = elasticsearch_base.get_els_subset_size(
_es, "dailystormer", "hs_keyword_matches")
doc_count = subset_sizes["positive_count"] + subset_sizes["negative_count"]
dailystormer_pos_subset = elasticsearch_base.aggregate(
_es, "dailystormer", "tokens.keyword", False, positive_hs_filter, size=test_size, min_doc_count=min_doc_count)
dailystormer_neg_subset = elasticsearch_base.aggregate(
_es, "dailystormer", "tokens.keyword", False, negative_hs_filter, size=test_size, min_doc_count=min_doc_count)
dailystormer_pos_hs_freqs, dailystormer_pos_vocab_freqs, dailystormer_pos_hs_idfs, dailystormer_pos_vocab_idfs = model_helpers.get_els_word_weights(
dailystormer_pos_subset[0], doc_count, hs_keywords)
_, dailystormer_neg_vocab_freqs, _, dailystormer_neg_vocab_idfs = model_helpers.get_els_word_weights(
dailystormer_neg_subset[0], doc_count, hs_keywords)
test_size = 150000
min_doc_count = 10
subset_sizes = elasticsearch_base.get_els_subset_size(
_es, "unfiltered_stream", "hs_keyword_matches")
doc_count = subset_sizes["positive_count"] + subset_sizes["negative_count"]
unfiltered_stream_pos_subset = elasticsearch_base.aggregate(
_es, "unfiltered_stream", "tokens.keyword", False, positive_hs_filter, size=test_size, min_doc_count=min_doc_count)
unfiltered_stream_neg_subset = elasticsearch_base.aggregate(
_es, "unfiltered_stream", "tokens.keyword", False, negative_hs_filter, size=test_size, min_doc_count=min_doc_count)
unfiltered_stream_pos_hs_freqs, unfiltered_stream_pos_vocab_freqs, unfiltered_stream_pos_hs_idfs, unfiltered_stream_pos_vocab_idfs = model_helpers.get_els_word_weights(
unfiltered_stream_pos_subset[0], doc_count, hs_keywords)
_, unfiltered_stream_neg_vocab_freqs, _, unfiltered_stream_neg_vocab_idfs = model_helpers.get_els_word_weights(
unfiltered_stream_neg_subset[0], doc_count, hs_keywords)
dep_model_ids = [0, 7]
dep_embeddings = neural_embeddings.get_embeddings(
"dep2vec", model_ids=dep_model_ids, load=True)
if dep_embeddings:
dep2vec_dstormer = dep_embeddings[0] if dep_embeddings[0] else None
dep2vec_ustream = dep_embeddings[1] if dep_embeddings[1] else None
word_model_ids = [3, 9]
word_embeddings = neural_embeddings.get_embeddings(
"ft", model_ids=word_model_ids, load=True)
if word_embeddings:
ft_dstormer = word_embeddings[0] if word_embeddings[0] else None
ft_ustream = word_embeddings[1] if word_embeddings[1] else None
candidate_codewords = word_enrichment.select_candidate_codewords(biased_embeddings=[dep2vec_dstormer, ft_dstormer],
unbiased_embeddings=[dep2vec_ustream, ft_ustream], freq_vocab_pair=[
dailystormer_neg_vocab_freqs, unfiltered_stream_neg_vocab_freqs],
idf_vocab_pair=[dailystormer_neg_vocab_idfs, unfiltered_stream_neg_vocab_idfs], topn=5, p_at_k_threshold=0.2, hs_keywords=hs_keywords, hs_check=True)
# pprint(candidate_codewords)
def main():
"""Run operations"""
profile_codeword_selection()
if __name__ == "__main__":
main()
```
#### File: thesis-preprocessing/support_files/archived_code.py
```python
def keyword_search(connection_params, keyword_list, lang_list):
"""Perform a text search with the provided keywords.
We also preprocess the tweet text in order to avoid redundant operations.
Outputs value to new collection.
Args:
connection_params (list): Contains connection objects and params as follows:
0: client (pymongo.MongoClient): Connection object for Mongo DB_URL.
1: db_name (str): Name of database to query.
2: collection (str): Name of collection to use.
keyword_list (list): List of keywords to search for.
lang_list (list): List of languages to match on.
"""
client = connection_params[0]
db_name = connection_params[1]
collection = connection_params[2]
# Store the documents for our bulkwrite
operations = []
# Keep track of the tweets that we have already seen, keep distinct.
seen_set = set()
dbo = client[db_name]
for search_query in keyword_list:
# Run an aggregate search for each keyword, might get better performance
# from running n keywords at a time, but I'm not sure.
pipeline = [
{"$match": {"$and": [{"$text": {"$search": search_query}},
{"id_str": {"$nin": list(seen_set)}},
{"lang": {"$in": lang_list}},
{"retweet_count": 0}]}},
{"$project": {"_id": 1, "id_str": 1, "text": 1, "id": 1, "timestamp": 1,
"lang": 1, "user.id_str": 1, "user.screen_name": 1, "user.location": 1}},
{"$out": "temp_set"}
]
dbo[collection].aggregate(pipeline, allowDiskUse=True)
cursor = dbo["temp_set"].find({}, no_cursor_timeout=True)
entities = cursor[:]
print("Keyword:", search_query, "| Count:", cursor.count(), " | Seen:", len(seen_set))
for document in entities:
seen_set.add(document["id_str"])
# Create a new field and add the preprocessed text to it
operations.append(document)
# document["vector"] = text_preprocessing.preprocess_text(document["text"])
# operations.append(InsertOne(document))
# Send once every 1000 in batch
if (len(operations) % 1000) == 0:
operations = text_preprocessing.parallel_preprocess(operations)
dbo["keyword_collection"].bulk_write(operations, ordered=False)
operations = []
if (len(operations) % 1000) != 0:
operations = text_preprocessing.parallel_preprocess(operations)
dbo["keywords_collection"].bulk_write(operations, ordered=False)
# Clean Up
dbo["temp_set"].drop()
def filter_by_language(connection_params, lang_list, output_name):
"""Aggregation pipeline to remove tweets with a lang field not in
lang_list. This should ideally be run directly through mongo shell
for large collections.
Args:
client (pymongo.MongoClient): Connection object for Mongo DB_URL.
db_name (str): Name of database to query.
collection (str): Name of collection to use.
lang_list (list): List of languages to match on.
output_name (str): Name of the collection to store ids of non removed tweets.
"""
client = connection_params[0]
db_name = connection_params[1]
collection = connection_params[2]
dbo = client[db_name]
bulk = dbo[collection].initialize_unordered_bulk_op()
count = 0
pipeline = [
{"$match": {"lang": {"$nin": lang_list}}},
{"$project": {"lang": 1, "_id": 1}},
{"$group": {
"_id": {
"lang": "$lang",
},
"ids": {"$push": "$_id"}
}},
{"$project": {"ids": 1}}
]
cursor = dbo[collection].aggregate(pipeline, allowDiskUse=True)
print("Finished aggregation. Iterating now")
for document in cursor:
bulk.find({"_id": {"$in": document["ids"]}}).remove()
count = count + 1
print("Count:", count)
if count % 1000 == 0:
print("Running bulk execute")
bulk.execute()
bulk = dbo[collection].initialize_unordered_bulk_op()
if count % 1000 != 0:
print("Running bulk execute")
bulk.execute()
pipeline = [
{"$project": {"_id": 1}},
{"$out": output_name}
]
dbo[collection].aggregate(pipeline, allowDiskUse=True)
```
#### File: thesis-preprocessing/tests/feature_prep_test.py
```python
import string
from nose.tools import *
from context import hatespeech_core
class TestFeaturePrep(object):
""" init class """
def __init__(self):
self.test_list = ["I'm here", "get rekt", "#squadgoals okay"]
def setup(self):
"""This method is run once before _each_ test method is executed"""
def teardown(self):
"""This method is run once after _each_ test method is executed"""
@nottest
def test_extract_lexical_features(self):
"""This method tests the OR concatenation function"""
nlp = hatespeech_core.feature_prep.init_nlp_pipeline(False)
result_set = [("I'm", 'NN'), ('here', 'RB'), ('get', 'VB'),
('rekt', 'NN'), ('#squadgoals', 'NNS'), ('okay', 'JJ')]
response_string = hatespeech_core.feature_prep.extract_lexical_features_test(nlp,
self.test_list)
assert_equals(response_string, result_set)
``` |
{
"source": "jherico/pyopenxr",
"score": 2
} |
#### File: generate/xrg/declarations.py
```python
from abc import ABC, abstractmethod
import inspect
import re
import textwrap
from typing import Generator, Set
from clang.cindex import Cursor, CursorKind, TokenKind, TypeKind
from .types import Api, py_type_name, parse_type, capi_type_name, StringType
from .registry import xr_registry
class SkippableCodeItemException(Exception):
pass
class CodeItem(ABC):
def __init__(self, cursor: Cursor) -> None:
self.cursor = cursor
@staticmethod
def blank_lines_before() -> int:
return 1
@staticmethod
def blank_lines_after() -> int:
return 1
@abstractmethod
def name(self, api=Api.PYTHON) -> str:
pass
@abstractmethod
def code(self, api=Api.PYTHON) -> str:
pass
@abstractmethod
def used_ctypes(self, api=Api.PYTHON) -> Set[str]:
pass
class DefinitionItem(CodeItem):
def __init__(self, cursor: Cursor) -> None:
super().__init__(cursor)
assert cursor.kind == CursorKind.MACRO_DEFINITION
self._capi_name = cursor.spelling
if self._capi_name.endswith("_"):
raise SkippableCodeItemException # OPENVR_H_
tokens = list(cursor.get_tokens())[1:]
if len(tokens) > 1:
raise SkippableCodeItemException # We only want simple #define values
self.c_value = tokens[0].spelling
self.value = self.c_value
if self.value is None:
raise SkippableCodeItemException # #define with no value
assert self._capi_name.startswith("XR_")
self._py_name = self._capi_name[3:]
if self.value.endswith("LL"):
self.value = self.value[:-2]
@staticmethod
def blank_lines_before():
return 0
@staticmethod
def blank_lines_after():
return 0
def name(self, api=Api.PYTHON) -> str:
if api == api.PYTHON:
return self._py_name
elif api == api.C:
return self._capi_name
elif api == api.CTYPES:
return self._capi_name
else:
raise NotImplementedError
def code(self, api=Api.PYTHON) -> str:
if api == api.C:
return f"#define {self.name(api)} {self.c_value}"
return f"{self.name(api)} = {self.value}"
def used_ctypes(self, api=Api.PYTHON) -> Set[str]:
return set()
class EnumItem(CodeItem):
def __init__(self, cursor: Cursor) -> None:
super().__init__(cursor)
assert cursor.kind == CursorKind.ENUM_DECL
self._capi_name = cursor.spelling
self._py_name = py_type_name(self._capi_name)
self.values = []
for v in cursor.get_children():
assert v.kind == CursorKind.ENUM_CONSTANT_DECL
self.values.append(EnumValueItem(cursor=v, parent=self))
@staticmethod
def blank_lines_before():
return 2
@staticmethod
def blank_lines_after():
return 1
def name(self, api=Api.PYTHON) -> str:
if api == api.PYTHON:
return self._py_name
elif api == api.C:
return self._capi_name
elif api == api.CTYPES:
return self._py_name
else:
raise NotImplementedError
def code(self, api=Api.PYTHON) -> str:
if api == api.CTYPES:
result = f"{self.name(api)} = c_int"
for v in self.values:
result += f"\n{v.code(api)}"
return result
elif api == api.PYTHON:
result = f"class {self.name(api)}(EnumBase):"
value_count = 0
for v in self.values:
if v.name(api) == "_MAX_ENUM":
continue
result += v.code(api)
value_count += 1
if value_count < 1:
result += "\n pass"
return result
elif api == api.C:
# TODO: this is probably not tested...
result = f"{self.name(api)} {{" # Q: does this already have "enum" at the beginning?
for v in self.values:
result += f" \n{v.code(api)}"
result += "\n}"
return result
def used_ctypes(self, api=Api.PYTHON) -> Set[str]:
return {
"c_int",
}
class EnumValueItem(CodeItem):
# Certain enums name their values differently than others
_PREFIX_TABLE = {
"RESULT_": "",
"STRUCTURE_TYPE_": "TYPE_",
"PERF_SETTINGS_NOTIFICATION_LEVEL_": "PERF_SETTINGS_NOTIF_LEVEL_",
}
def __init__(self, cursor: Cursor, parent: EnumItem) -> None:
super().__init__(cursor)
assert cursor.kind == CursorKind.ENUM_CONSTANT_DECL
self.parent = parent
self._capi_name = cursor.spelling
self._py_name = self._make_py_name()
self.value = self.cursor.enum_value
def _make_py_name(self):
# Compute pythonic name...
n = self._capi_name
assert n.startswith("XR_")
n = n[3:] # Strip off initial "XR_"
prefix = self.parent.name(Api.PYTHON)
postfix = ""
for postfix1 in ["EXT", "FB", "KHR", "MSFT"]:
if prefix.endswith(postfix1):
prefix = prefix[: -len(postfix1)]
postfix = f"_{postfix1}"
break
prefix = snake_from_camel(prefix).upper() + "_"
if n == f"{prefix}MAX_ENUM{postfix}":
return f"_MAX_ENUM" # private enum value
if prefix in self._PREFIX_TABLE:
prefix = self._PREFIX_TABLE[prefix]
assert n.startswith(prefix)
n = n[len(prefix):]
if len(postfix) > 0:
n = n[: -len(postfix)] # It's already in the parent enum name
return n
@staticmethod
def blank_lines_before():
return 0
@staticmethod
def blank_lines_after():
return 0
def name(self, api=Api.PYTHON) -> str:
if api == api.PYTHON:
return self._py_name
elif api == api.C:
return self._capi_name
elif api == api.CTYPES:
return self._capi_name
else:
raise NotImplementedError
def code(self, api=Api.PYTHON) -> str:
line_end = ""
line_indent = " "
if api == Api.C:
line_end = "," # TODO: but not the last one, right?
elif api == Api.CTYPES:
line_indent = ""
return f"\n{line_indent}{self.name(api)} = {self.value}{line_end}"
def used_ctypes(self, api=Api.PYTHON) -> Set[str]:
return {
"c_int",
}
class FunctionItem(CodeItem):
def __init__(self, cursor: Cursor) -> None:
super().__init__(cursor)
assert cursor.kind == CursorKind.FUNCTION_DECL
self._capi_name = cursor.spelling
self._py_name = self._py_function_name(self._capi_name)
self.parameters = []
self.return_type = None
for c in cursor.get_children():
if c.kind == CursorKind.TYPE_REF:
assert self.return_type is None
self.return_type = parse_type(c.type)
elif c.kind == CursorKind.PARM_DECL:
self.parameters.append(FunctionParameterItem(c))
else:
assert False
@staticmethod
def _py_function_name(capi_name: str) -> str:
s = capi_name
if s.startswith("xr"):
s = s[2:]
return snake_from_camel(s)
@staticmethod
def blank_lines_before():
return 2
@staticmethod
def blank_lines_after():
return 2
def name(self, api=Api.PYTHON) -> str:
if api == api.PYTHON:
return self._py_name
elif api == api.C:
return self._capi_name
elif api == api.CTYPES:
return self._capi_name
else:
raise NotImplementedError
def code(self, api=Api.PYTHON) -> str:
if api == Api.CTYPES:
# ctypes raw function definition
result = inspect.cleandoc(
f"""
{self.name(Api.C)} = openxr_loader_library.{self.name(Api.C)}
{self.name(Api.C)}.restype = {self.return_type.name(Api.PYTHON)}
{self.name(Api.C)}.argtypes = [
""")
for p in self.parameters:
result += f"\n {p.type.name(api)}, # {p.name(Api.PYTHON)}"
result += "\n]"
return result
elif api == Api.PYTHON:
return str(FunctionCoder(self))
elif api == Api.C:
raise NotImplementedError
def used_ctypes(self, api=Api.PYTHON) -> Set[str]:
result = self.return_type.used_ctypes(api)
for p in self.parameters:
result.update(p.used_ctypes(api))
return result
class FunctionParameterItem(CodeItem):
def __init__(self, cursor: Cursor):
super().__init__(cursor)
assert cursor.kind == CursorKind.PARM_DECL
self._capi_name = cursor.spelling
self._py_name = snake_from_camel(self._capi_name)
self.type = parse_type(cursor.type)
self._optional = False
# Query xr registry to see if this parameter is optional
if xr_registry:
function_c_name = cursor.semantic_parent.spelling
try:
command = xr_registry.find(f'commands/command/proto[name="{function_c_name}"]/..')
this_param = command.find(f'param[name="{self._capi_name}"]')
self._optional = this_param.attrib["optional"] == "true"
except Exception:
pass
def name(self, api=Api.PYTHON) -> str:
if api == api.PYTHON:
return self._py_name
elif api == api.C:
return self._capi_name
elif api == api.CTYPES:
return self._capi_name
else:
raise NotImplementedError
def code(self, api=Api.PYTHON) -> str:
pass
@staticmethod
def default_value() -> str:
"""Only applies if is_optional() is True"""
return "None"
def is_optional(self) -> bool:
return self._optional
def used_ctypes(self, api=Api.PYTHON) -> Set[str]:
return self.type.used_ctypes(api)
class StructFieldItem(CodeItem):
def __init__(self, cursor: Cursor) -> None:
super().__init__(cursor)
assert cursor.kind == CursorKind.FIELD_DECL
self._capi_name = cursor.spelling
self._py_name = snake_from_camel(self._capi_name)
self.type = parse_type(cursor.type)
def name(self, api=Api.PYTHON) -> str:
if api == api.PYTHON:
return self._py_name
elif api == api.C:
return self._capi_name
elif api == api.CTYPES:
return self._py_name
else:
raise NotImplementedError
def code(self, api=Api.PYTHON) -> str:
if api == Api.C:
raise NotImplementedError
return f'\n ("{self.name(api)}", {self.type.name(api)}),'
def used_ctypes(self, api=Api.PYTHON) -> Set[str]:
return self.type.used_ctypes(api)
class StructItem(CodeItem):
def __init__(self, cursor: Cursor):
super().__init__(cursor)
assert cursor.kind == CursorKind.STRUCT_DECL
self.c_name = cursor.spelling
self._capi_name = capi_type_name(self.c_name)
self._py_name = py_type_name(self._capi_name)
self.fields = []
for c in cursor.get_children():
if c.kind == CursorKind.FIELD_DECL:
self.fields.append(StructFieldItem(c))
elif c.kind == CursorKind.UNEXPOSED_ATTR:
pass # something about the typedef?
elif c.kind == CursorKind.STRUCT_DECL:
pass # Probably just a structure pointer, right?
else:
assert False
self.is_recursive = False
for f in self.fields:
m = re.search(fr"\b{self.name(Api.CTYPES)}\b", f.type.name(Api.CTYPES))
if m:
self.is_recursive = True
@staticmethod
def blank_lines_before():
return 2
@staticmethod
def blank_lines_after():
return 2
def name(self, api=Api.PYTHON) -> str:
if api == api.PYTHON:
return self._py_name
elif api == api.C:
return self._capi_name
elif api == api.CTYPES:
return self._py_name
else:
raise NotImplementedError
def code(self, api=Api.PYTHON) -> str:
if api == Api.C:
raise NotImplementedError
result = f"class {self.name(api)}(Structure):"
if len(self.fields) == 0:
# Empty structure
result += "\n pass"
return result
elif len(self.fields) >= 2 and self.fields[0].name() == "type":
assert self.fields[0].type.name() == "StructureType"
assert self.fields[1].name() == "next"
result += "\n"
type_enum_name = snake_from_camel(self.name()).upper()
result += textwrap.indent(inspect.cleandoc(f"""
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.{type_enum_name}.value,
*args, **kwargs,
)
"""), " ")
result += "\n"
# Hard code this for now, generalize later if needed
if self.name() == "ExtensionProperties":
result += "\n"
# This structure is sort of equivalent to a string
string_field = "extension_name"
result += textwrap.indent(inspect.cleandoc(f"""
def __bytes__(self):
return self.extension_name
def __eq__(self, other):
try:
if other.type != self.type:
return False
except AttributeError:
pass # That's OK, objects without those attributes can use string comparison
return str(other) == str(self)
def __str__(self):
return self.{string_field}.decode()
"""), " ")
result += "\n"
if self.is_recursive:
# Structure containing self-reference must be declared in two stanzas
result += "\n pass"
result += f"\n\n\n{self.name(api)}._fields_ = ["
else:
result += "\n _fields_ = ["
result += "".join([f.code(Api.CTYPES) for f in self.fields])
result += "\n ]"
return result
def used_ctypes(self, api=Api.PYTHON) -> Set[str]:
result = {
"Structure",
}
for f in self.fields:
result.update(f.used_ctypes(Api.CTYPES))
return result
class TypeDefItem(CodeItem):
def __init__(self, cursor: Cursor):
super().__init__(cursor)
assert cursor.kind == CursorKind.TYPEDEF_DECL
self._capi_name = cursor.spelling
self._py_name = py_type_name(self._capi_name)
self.type = parse_type(cursor.underlying_typedef_type)
if self.type.clang_type.kind == TypeKind.ENUM:
raise SkippableCodeItemException # Keep enum typedefs out of typedefs.py
if self._py_name == self.type.name(Api.CTYPES):
raise SkippableCodeItemException # Nonsense A = A typedef
def name(self, api=Api.PYTHON) -> str:
if api == api.PYTHON:
return self._py_name
elif api == api.C:
return self._capi_name
elif api == api.CTYPES:
return self._capi_name
else:
raise NotImplementedError
def code(self, api=Api.PYTHON) -> str:
if api == Api.C:
raise NotImplementedError
return f"{self.name(api)} = {self.type.name(Api.CTYPES)}"
def used_ctypes(self, api=Api.PYTHON) -> Set[str]:
return self.type.used_ctypes(Api.CTYPES)
class VariableItem(CodeItem):
def __init__(self, cursor: Cursor) -> None:
super().__init__(cursor)
assert cursor.kind == CursorKind.VAR_DECL
self._capi_name = cursor.spelling
if not self._capi_name.startswith("XR_"):
assert False
self._py_name = self._capi_name[3:]
self.type = None
for e in cursor.get_children():
if e.kind == CursorKind.TYPE_REF:
self.type = parse_type(e.type)
elif e.kind == CursorKind.UNEXPOSED_EXPR:
value_cursor = list(e.get_children())[0]
tokens = list(value_cursor.get_tokens())
assert len(tokens) == 1
self.value = tokens[0].spelling
elif e.kind == CursorKind.INTEGER_LITERAL:
tokens = list(e.get_tokens())
assert tokens[0].kind == TokenKind.LITERAL
self.value = tokens[0].spelling
else:
assert False
if self.value.endswith("LL"):
self.value = self.value[:-2]
@staticmethod
def blank_lines_before():
return 0
@staticmethod
def blank_lines_after():
return 0
def name(self, api=Api.PYTHON) -> str:
if api == api.PYTHON:
return self._py_name
elif api == api.C:
return self._capi_name
elif api == api.CTYPES:
return self._capi_name
else:
raise NotImplementedError
def code(self, api=Api.PYTHON) -> str:
if api == Api.C:
raise NotImplementedError
return f"{self.name(api)} = {self.value}"
def used_ctypes(self, api=Api.PYTHON) -> Set[str]:
return set()
class NothingParameterCoder(object):
def __init__(self, parameter: FunctionParameterItem):
self.parameter = parameter
@staticmethod
def declaration_code(api=Api.PYTHON) -> Generator[str, None, None]:
yield from []
def main_call_code(self, api=Api.PYTHON) -> Generator[str, None, None]:
yield from []
def buffer_call_code(self, api=Api.PYTHON) -> Generator[str, None, None]:
yield from self.main_call_code()
@staticmethod
def mid_body_code(api=Api.PYTHON) -> Generator[str, None, None]:
yield from []
@staticmethod
def pre_body_code(api=Api.PYTHON) -> Generator[str, None, None]:
yield from []
@staticmethod
def result_type_code(api=Api.PYTHON) -> Generator[str, None, None]:
yield from []
def result_value_code(self, api=Api.PYTHON) -> Generator[str, None, None]:
yield from []
class ParameterCoderBase(NothingParameterCoder):
def main_call_code(self, api=Api.PYTHON) -> Generator[str, None, None]:
yield f"{self.parameter.name(api)}"
class InputParameterCoder(ParameterCoderBase):
def declaration_code(self, api=Api.PYTHON) -> Generator[str, None, None]:
# TODO: default value (from docstring?) e.g. None for string that can be empty
p = self.parameter
yield f"{p.name(api)}: {p.type.name(Api.PYTHON)}"
class StringInputParameterCoder(InputParameterCoder):
def pre_body_code(self, api=Api.PYTHON) -> Generator[str, None, None]:
yield f"if {self.parameter.name(api)} is not None:"
yield f" {self.parameter.name(api)} = {self.parameter.name(api)}.encode()"
class OutputParameterCoder(ParameterCoderBase):
def result_type_code(self, api=Api.PYTHON) -> Generator[str, None, None]:
rtype = self.parameter.type.pointee
yield f"{rtype.name(api)}"
def pre_body_code(self, api=Api.PYTHON) -> Generator[str, None, None]:
rtype = self.parameter.type.pointee
yield f"{self.parameter.name(api)} = {rtype.name(Api.CTYPES)}()"
def main_call_code(self, api=Api.PYTHON) -> Generator[str, None, None]:
yield f"byref({self.parameter.name(api)})"
def result_value_code(self, api=Api.PYTHON) -> Generator[str, None, None]:
rtype = self.parameter.type.pointee
if rtype.name(Api.PYTHON) == "int":
yield f"{self.parameter.name(api)}.value"
else:
yield f"{self.parameter.name(api)}"
class BufferCoder(ParameterCoderBase):
def __init__(self, cap_in: FunctionParameterItem, count_out: FunctionParameterItem, array: FunctionParameterItem):
super().__init__(cap_in)
self.cap_in = cap_in
self.count_out = count_out
self.array = array
if self.array.type.clang_type.spelling == "char *": # string case
assert not self.array.type.clang_type.get_pointee().is_const_qualified()
self.array_type_name = "str"
else:
self.array_type = self.array.type.pointee
self.array_type_name: str = self.array_type.name(Api.CTYPES)
def pre_body_code(self, api=Api.PYTHON) -> Generator[str, None, None]:
yield f"{self.cap_in.name(api)} = {self.cap_in.type.name(Api.CTYPES)}(0)"
def buffer_call_code(self, api=Api.PYTHON) -> Generator[str, None, None]:
yield "0"
yield f"byref({self.cap_in.name(api)})"
yield "None"
def mid_body_code(self, api=Api.PYTHON) -> Generator[str, None, None]:
name = f"{self.array.name()}"
n = f"{self.cap_in.name(api)}.value"
etype = self.array_type_name
if self.array_type_name == "str":
yield f"{name} = create_string_buffer({n})"
else:
# Use the default constructor to initialize each array member
# initialized_array = (MyStructure * N)(*([MyStructure()] * N))
yield f"{name} = ({etype} * {n})(*([{etype}()] * {n}))"
def main_call_code(self, api=Api.PYTHON) -> Generator[str, None, None]:
yield f"{self.cap_in.name(api)}"
yield f"byref({self.cap_in.name(api)})"
yield f"{self.array.name(api)}"
def result_type_code(self, api=Api.PYTHON) -> Generator[str, None, None]:
if self.array_type_name == "str":
yield "str"
else: # array case
yield f"Array[{self.array_type_name}]"
def result_value_code(self, api=Api.PYTHON) -> Generator[str, None, None]:
if self.array_type_name == "str":
yield f"{self.array.name(api)}.value.decode()"
else: # array case
yield f"{self.array.name(api)}"
class FunctionCoder(object):
def __init__(self, function: FunctionItem):
self.function = function
# TODO: categorize parameters finely
# TODO: buffer size parameters
self.param_coders = [[p, None] for p in self.function.parameters]
# First pass: Buffer size arguments
self._needs_two_calls = False
for ix, pc in enumerate(self.param_coders):
p, c = pc
if p.name().endswith("_capacity_input"):
# OpenXR buffer size parameters consist of three consecutive parameters
assert "int" in p.type.name()
p2 = self.param_coders[ix + 1][0]
assert p2.name().endswith("_count_output")
assert p2.type.clang_type.kind == TypeKind.POINTER
assert "int" in p2.type.pointee.name()
p3 = self.param_coders[ix + 2][0]
assert p2.type.clang_type.kind == TypeKind.POINTER
self.param_coders[ix][1] = BufferCoder(p, p2, p3)
self.param_coders[ix + 1][1] = NothingParameterCoder(p2)
self.param_coders[ix + 2][1] = NothingParameterCoder(p3)
self._needs_two_calls = True
# Assume remainder are simple inputs
for ix, pc in enumerate(self.param_coders):
p, c = pc
if c is not None:
continue
if isinstance(p.type, StringType):
pc[1] = StringInputParameterCoder(p)
continue
ct = p.type.clang_type
if ct.kind == TypeKind.POINTER and not ct.get_pointee().is_const_qualified():
pc[1] = OutputParameterCoder(p)
continue
pc[1] = InputParameterCoder(p)
def declaration_code(self, api=Api.PYTHON) -> str:
result_types = []
for p, c in self.param_coders:
for r in c.result_type_code(Api.PYTHON):
result_types.append(r)
# Don't show default value for any parameter that appears before required parameters
can_haz_default = True
param_strings = []
for p, c in reversed(self.param_coders):
for s in c.declaration_code(api):
default = ","
if p.is_optional() and can_haz_default:
default = f" = {p.default_value()},"
if not p.is_optional():
can_haz_default = False
param_strings.append(f"\n{' ' * 16}{s}{default}")
params = "".join(reversed(param_strings))
if len(result_types) == 0:
result = "None"
elif len(result_types) == 1:
result = result_types[0]
else:
result = f"({', '.join(result_types)})"
return inspect.cleandoc(f"""
def {self.function.name(api)}({params}
) -> {result}:
""")
def __str__(self, api=Api.PYTHON):
result = self.declaration_code(api)
docstring = ""
result += f'\n """{docstring}"""'
for p, c in self.param_coders:
for line in c.pre_body_code():
result += f"\n {line}"
result += f"\n fxn = raw_functions.{self.function.name(Api.CTYPES)}"
if self._needs_two_calls:
result += f"\n # First call of two, to retrieve buffer sizes"
result += f"\n result = check_result(fxn("
for p, c in self.param_coders:
for param_name in c.buffer_call_code():
result += f"\n {param_name},"
result += f"\n ))"
result += f"\n if result.is_exception():"
result += f"\n raise result"
for p, c in self.param_coders:
for line in c.mid_body_code():
result += f"\n {line}"
result += f"\n result = check_result(fxn("
for p, c in self.param_coders:
for param_name in c.main_call_code():
result += f"\n {param_name},"
result += f"\n ))"
result += f"\n if result.is_exception():"
result += f"\n raise result"
result_values = []
for p, c in self.param_coders:
for r in c.result_value_code(Api.PYTHON):
result_values.append(r)
if len(result_values) > 0:
result += f"\n return {', '.join(result_values)}"
return result
def snake_from_camel(camel: str) -> str:
snake = f"{camel}"
snake = re.sub(r"([^A-Z])([A-Z])", r"\1_\2", snake)
snake = snake.lower()
snake = re.sub(r"open_gl", "opengl_", snake)
return snake
__all__ = [
"CodeItem",
"DefinitionItem",
"EnumItem",
"FunctionItem",
"SkippableCodeItemException",
"StructItem",
"TypeDefItem",
"VariableItem",
]
```
#### File: generate/xrg/types.py
```python
from abc import ABC, abstractmethod
import enum
import re
from typing import Optional, Set
import clang.cindex
from clang.cindex import TypeKind
class Api(enum.Enum):
C = 1, # C language symbols and code from the original C file
CTYPES = 2, # Python code with maximum similarity to C code
PYTHON = 3, # High-level pythonic interface symbols and code
class TypeBase(ABC):
def __init__(self, clang_type: clang.cindex.Type):
self.clang_type = clang_type
@abstractmethod
def name(self, api=Api.PYTHON) -> str:
pass
@abstractmethod
def used_ctypes(self, api=Api.PYTHON) -> Set[str]:
pass
class ArrayType(TypeBase):
def __init__(self, clang_type: clang.cindex.Type):
super().__init__(clang_type)
assert clang_type.kind == TypeKind.CONSTANTARRAY
self.element_type = parse_type(clang_type.element_type)
self.count = clang_type.element_count
def name(self, api=Api.PYTHON) -> str:
if api == Api.C:
return f"{self.element_type.name(api)}[{self.count}]"
else:
return f"({self.element_type.name(Api.CTYPES)} * {self.count})"
def used_ctypes(self, api=Api.PYTHON) -> Set[str]:
return self.element_type.used_ctypes(Api.CTYPES)
class EnumType(TypeBase):
def __init__(self, clang_type: clang.cindex.Type):
super().__init__(clang_type)
assert clang_type.kind == TypeKind.ENUM
def name(self, api=Api.PYTHON) -> str:
if api == Api.C:
return self.clang_type.spelling
elif api == Api.CTYPES:
return "c_int"
else:
return "c_int" # TODO we could use the actual name if we had the enums loaded
def used_ctypes(self, api=Api.PYTHON) -> Set[str]:
return {"c_int", }
class FloatType(TypeBase):
def __init__(self, clang_type: clang.cindex.Type):
self._ctypes_name = self.CLANG_NAMES_FOR_KINDS[clang_type.kind]
super().__init__(clang_type)
CLANG_NAMES_FOR_KINDS = {
TypeKind.FLOAT: "c_float",
TypeKind.DOUBLE: "c_double",
}
def name(self, api=Api.PYTHON) -> str:
if api == Api.C:
return self.clang_type.spelling
elif api == Api.PYTHON:
return "float"
else:
return self._ctypes_name
def used_ctypes(self, api=Api.PYTHON) -> Set[str]:
if api == Api.CTYPES:
return {self._ctypes_name, }
else:
return set()
class FunctionPointerType(TypeBase):
def __init__(self, clang_type: clang.cindex.Type):
super().__init__(clang_type)
assert clang_type.kind == TypeKind.POINTER
pt = clang_type.get_pointee()
assert pt.kind == TypeKind.FUNCTIONPROTO
self.result_type = parse_type(pt.get_result())
self.arg_types = [parse_type(t) for t in pt.argument_types()]
def name(self, api=Api.PYTHON) -> str:
if api == api.C:
return self.clang_type.spelling
else:
arg_string = ", ".join(
a.name(Api.CTYPES) for a in [self.result_type, *self.arg_types]
)
return f"CFUNCTYPE({arg_string})"
def used_ctypes(self, api=Api.PYTHON) -> Set[str]:
result = {
"CFUNCTYPE",
}
result.update(self.result_type.used_ctypes(api))
result.update(self.result_type.used_ctypes(api))
for a in self.arg_types:
result.update(a.used_ctypes(api))
return result
class IntegerType(TypeBase):
def __init__(self, clang_type: clang.cindex.Type):
self._name = self.clang_name_for_type(clang_type)
if self._name is None:
raise ValueError(f"clang type `{clang_type.kind}` is not an integer")
super().__init__(clang_type)
CLANG_NAMES_FOR_KINDS = {
TypeKind.INT: "c_int",
TypeKind.LONG: "c_long",
TypeKind.LONGLONG: "c_longlong",
TypeKind.SHORT: "c_short",
TypeKind.UINT: "c_uint",
TypeKind.ULONG: "c_ulong",
TypeKind.ULONGLONG: "c_ulonglong",
TypeKind.USHORT: "c_ushort",
}
@staticmethod
def clang_name_for_type(clang_type: clang.cindex.Type) -> Optional[str]:
if clang_type.kind in IntegerType.CLANG_NAMES_FOR_KINDS:
return IntegerType.CLANG_NAMES_FOR_KINDS[clang_type.kind]
if clang_type.kind == TypeKind.TYPEDEF:
return IntegerType.clang_name_for_c_name(clang_type.spelling)
@staticmethod
def clang_name_for_c_name(c_name: str) -> Optional[str]:
m = re.match(r"(?:const )?(u?int(?:8|16|32|64))_t", c_name)
if m:
return f"c_{m.group(1)}"
return None
def name(self, api=Api.PYTHON) -> str:
if api == Api.C:
return self.clang_type.spelling
elif api == Api.PYTHON:
return "int"
else:
return self._name
def used_ctypes(self, api=Api.PYTHON) -> Set[str]:
if api == Api.CTYPES:
return {self._name, }
else:
return set()
class PointerType(TypeBase):
def __init__(self, clang_type: clang.cindex.Type):
super().__init__(clang_type)
assert clang_type.kind == TypeKind.POINTER
pt = clang_type.get_pointee()
assert pt.kind != TypeKind.VOID
assert pt.kind != TypeKind.FUNCTIONPROTO
self.pointee = parse_type(pt)
def name(self, api=Api.PYTHON) -> str:
if api == Api.C:
return self.clang_type.spelling
else:
return f"POINTER({self.pointee.name(Api.CTYPES)})"
def used_ctypes(self, api=Api.PYTHON) -> Set[str]:
result = self.pointee.used_ctypes(Api.CTYPES)
result.add("POINTER")
return result
class PrimitiveCTypesType(TypeBase):
def __init__(self, clang_type: clang.cindex.Type, ctypes_type: str, python_type: str):
super().__init__(clang_type)
self._name = ctypes_type
self.py_name = python_type
def name(self, api=Api.PYTHON) -> str:
if api == Api.C:
return self.clang_type.spelling
elif api == Api.PYTHON:
return self.py_name
else:
return self._name
def used_ctypes(self, api=Api.PYTHON) -> Set[str]:
return {
self._name,
}
class RecordType(TypeBase):
def __init__(self, clang_type: clang.cindex.Type):
super().__init__(clang_type)
assert clang_type.kind == TypeKind.RECORD
self._capi_name = capi_type_name(clang_type.get_declaration().spelling)
self._py_name = py_type_name(self._capi_name)
def name(self, api=Api.PYTHON) -> str:
if api == Api.C:
return self.clang_type.spelling
elif api == Api.CTYPES:
return self._py_name
elif api == Api.PYTHON:
return self._py_name
else:
raise NotImplementedError
def used_ctypes(self, api=Api.PYTHON) -> Set[str]:
return set()
class StringType(TypeBase):
def __init__(self, clang_type: clang.cindex.Type):
if clang_type.kind == TypeKind.POINTER:
assert clang_type.get_pointee().kind == TypeKind.CHAR_S
self._ctypes_name = "c_char_p"
else:
self._ctypes_name = self.CLANG_NAMES_FOR_KINDS[clang_type.kind]
super().__init__(clang_type)
CLANG_NAMES_FOR_KINDS = {
TypeKind.CHAR_S: "c_char",
TypeKind.UCHAR: "c_uchar",
}
def name(self, api=Api.PYTHON) -> str:
if api == Api.C:
return self.clang_type.spelling
elif api == Api.PYTHON:
return "str"
else:
return self._ctypes_name
def used_ctypes(self, api=Api.PYTHON) -> Set[str]:
if api == Api.CTYPES:
return {self._ctypes_name, }
else:
return set()
class TypedefType(TypeBase):
def __init__(self, clang_type: clang.cindex.Type):
super().__init__(clang_type)
assert clang_type.kind == TypeKind.TYPEDEF
type_name = clang_type.spelling
self._capi_name = capi_type_name(type_name)
self._py_name = py_type_name(self._capi_name)
self._ctypes_name = self._py_name
self.underlying_type = parse_type(
clang_type.get_declaration().underlying_typedef_type
)
if isinstance(self.underlying_type, EnumType):
self._ctypes_name += ".ctype()"
if not self._capi_name.upper()[:2] in ("XR", "PF", ):
raise ValueError(self._capi_name)
def name(self, api=Api.PYTHON) -> str:
if api == Api.C:
return self.clang_type.spelling
elif api == Api.CTYPES:
return self._ctypes_name
elif api == Api.PYTHON:
return self._py_name
else:
raise NotImplementedError
def used_ctypes(self, api=Api.PYTHON) -> Set[str]:
if api == Api.C:
return set()
elif self._capi_name.startswith("c_"):
return {
self._capi_name,
}
else:
return set()
class VoidType(TypeBase):
def __init__(self, clang_type: clang.cindex.Type):
super().__init__(clang_type)
assert clang_type.kind == TypeKind.VOID
def name(self, api=Api.PYTHON) -> str:
if api == Api.C:
return "void"
else:
return "None"
def used_ctypes(self, api=Api.PYTHON) -> Set[str]:
return set()
class WideCharType(TypeBase):
def __init__(self, clang_type: clang.cindex.Type):
super().__init__(clang_type)
assert clang_type.spelling == "wchar_t"
def name(self, api=Api.PYTHON) -> str:
if api == Api.C:
return "wchar_t"
else:
return "c_wchar"
def used_ctypes(self, api=Api.PYTHON) -> Set[str]:
return {"c_wchar", }
def capi_type_name(c_type_name: str) -> str:
"""The low level C-like api uses the exact same names as in C"""
s = re.sub(r"\b(?:const|volatile)\s+", "", c_type_name) # But without const
return s
def parse_type(clang_type: clang.cindex.Type) -> TypeBase:
if clang_type.kind == TypeKind.CHAR_S:
return StringType(clang_type)
elif clang_type.kind == TypeKind.CONSTANTARRAY:
return ArrayType(clang_type)
elif clang_type.kind == TypeKind.ELABORATED:
return parse_type(clang_type.get_named_type())
elif clang_type.kind == TypeKind.ENUM:
return EnumType(clang_type)
elif clang_type.kind in FloatType.CLANG_NAMES_FOR_KINDS:
return FloatType(clang_type)
elif clang_type.kind in IntegerType.CLANG_NAMES_FOR_KINDS:
return IntegerType(clang_type)
elif clang_type.kind == TypeKind.POINTER:
pt = clang_type.get_pointee()
if pt.kind == TypeKind.CHAR_S:
# But this works ONLY if these are always null terminated strings
return StringType(clang_type)
elif pt.kind == TypeKind.FUNCTIONPROTO:
return FunctionPointerType(clang_type)
elif pt.kind == TypeKind.VOID:
return PrimitiveCTypesType(clang_type, "c_void_p", "None")
else:
return PointerType(clang_type)
elif clang_type.kind == TypeKind.RECORD:
return RecordType(clang_type)
elif clang_type.kind == TypeKind.TYPEDEF:
if clang_type.spelling == "wchar_t":
return WideCharType(clang_type)
try:
return IntegerType(clang_type)
except ValueError:
underlying_type = clang_type.get_declaration().underlying_typedef_type
if clang_type.spelling[:2].upper() == "XR":
return TypedefType(clang_type)
elif clang_type.spelling.startswith("PFN_"):
return TypedefType(clang_type)
else:
return parse_type(underlying_type)
elif clang_type.kind == TypeKind.UCHAR:
return StringType(clang_type)
elif clang_type.kind == TypeKind.VOID:
return VoidType(clang_type)
assert False
def py_type_name(capi_type: str) -> str:
s = capi_type
if s.startswith("Xr"):
s = s[2:]
return s
__all__ = [
"Api",
"ArrayType",
"capi_type_name",
"EnumType",
"FunctionPointerType",
"PointerType",
"parse_type",
"PrimitiveCTypesType",
"py_type_name",
"RecordType",
"StringType",
"TypeBase",
"TypedefType",
"VoidType",
]
```
#### File: src/xr/enums.py
```python
from ctypes import c_int
import enum
class EnumBase(enum.Enum):
@staticmethod
def ctype():
return c_int
class Result(EnumBase):
SUCCESS = 0
TIMEOUT_EXPIRED = 1
SESSION_LOSS_PENDING = 3
EVENT_UNAVAILABLE = 4
SPACE_BOUNDS_UNAVAILABLE = 7
SESSION_NOT_FOCUSED = 8
FRAME_DISCARDED = 9
ERROR_VALIDATION_FAILURE = -1
ERROR_RUNTIME_FAILURE = -2
ERROR_OUT_OF_MEMORY = -3
ERROR_API_VERSION_UNSUPPORTED = -4
ERROR_INITIALIZATION_FAILED = -6
ERROR_FUNCTION_UNSUPPORTED = -7
ERROR_FEATURE_UNSUPPORTED = -8
ERROR_EXTENSION_NOT_PRESENT = -9
ERROR_LIMIT_REACHED = -10
ERROR_SIZE_INSUFFICIENT = -11
ERROR_HANDLE_INVALID = -12
ERROR_INSTANCE_LOST = -13
ERROR_SESSION_RUNNING = -14
ERROR_SESSION_NOT_RUNNING = -16
ERROR_SESSION_LOST = -17
ERROR_SYSTEM_INVALID = -18
ERROR_PATH_INVALID = -19
ERROR_PATH_COUNT_EXCEEDED = -20
ERROR_PATH_FORMAT_INVALID = -21
ERROR_PATH_UNSUPPORTED = -22
ERROR_LAYER_INVALID = -23
ERROR_LAYER_LIMIT_EXCEEDED = -24
ERROR_SWAPCHAIN_RECT_INVALID = -25
ERROR_SWAPCHAIN_FORMAT_UNSUPPORTED = -26
ERROR_ACTION_TYPE_MISMATCH = -27
ERROR_SESSION_NOT_READY = -28
ERROR_SESSION_NOT_STOPPING = -29
ERROR_TIME_INVALID = -30
ERROR_REFERENCE_SPACE_UNSUPPORTED = -31
ERROR_FILE_ACCESS_ERROR = -32
ERROR_FILE_CONTENTS_INVALID = -33
ERROR_FORM_FACTOR_UNSUPPORTED = -34
ERROR_FORM_FACTOR_UNAVAILABLE = -35
ERROR_API_LAYER_NOT_PRESENT = -36
ERROR_CALL_ORDER_INVALID = -37
ERROR_GRAPHICS_DEVICE_INVALID = -38
ERROR_POSE_INVALID = -39
ERROR_INDEX_OUT_OF_RANGE = -40
ERROR_VIEW_CONFIGURATION_TYPE_UNSUPPORTED = -41
ERROR_ENVIRONMENT_BLEND_MODE_UNSUPPORTED = -42
ERROR_NAME_DUPLICATED = -44
ERROR_NAME_INVALID = -45
ERROR_ACTIONSET_NOT_ATTACHED = -46
ERROR_ACTIONSETS_ALREADY_ATTACHED = -47
ERROR_LOCALIZED_NAME_DUPLICATED = -48
ERROR_LOCALIZED_NAME_INVALID = -49
ERROR_GRAPHICS_REQUIREMENTS_CALL_MISSING = -50
ERROR_RUNTIME_UNAVAILABLE = -51
ERROR_ANDROID_THREAD_SETTINGS_ID_INVALID_KHR = -1000003000
ERROR_ANDROID_THREAD_SETTINGS_FAILURE_KHR = -1000003001
ERROR_CREATE_SPATIAL_ANCHOR_FAILED_MSFT = -1000039001
ERROR_SECONDARY_VIEW_CONFIGURATION_TYPE_NOT_ENABLED_MSFT = -1000053000
ERROR_CONTROLLER_MODEL_KEY_INVALID_MSFT = -1000055000
ERROR_REPROJECTION_MODE_UNSUPPORTED_MSFT = -1000066000
ERROR_COMPUTE_NEW_SCENE_NOT_COMPLETED_MSFT = -1000097000
ERROR_SCENE_COMPONENT_ID_INVALID_MSFT = -1000097001
ERROR_SCENE_COMPONENT_TYPE_MISMATCH_MSFT = -1000097002
ERROR_SCENE_MESH_BUFFER_ID_INVALID_MSFT = -1000097003
ERROR_SCENE_COMPUTE_FEATURE_INCOMPATIBLE_MSFT = -1000097004
ERROR_SCENE_COMPUTE_CONSISTENCY_MISMATCH_MSFT = -1000097005
ERROR_DISPLAY_REFRESH_RATE_UNSUPPORTED_FB = -1000101000
ERROR_COLOR_SPACE_UNSUPPORTED_FB = -1000108000
ERROR_SPATIAL_ANCHOR_NAME_NOT_FOUND_MSFT = -1000142001
ERROR_SPATIAL_ANCHOR_NAME_INVALID_MSFT = -1000142002
class StructureType(EnumBase):
UNKNOWN = 0
API_LAYER_PROPERTIES = 1
EXTENSION_PROPERTIES = 2
INSTANCE_CREATE_INFO = 3
SYSTEM_GET_INFO = 4
SYSTEM_PROPERTIES = 5
VIEW_LOCATE_INFO = 6
VIEW = 7
SESSION_CREATE_INFO = 8
SWAPCHAIN_CREATE_INFO = 9
SESSION_BEGIN_INFO = 10
VIEW_STATE = 11
FRAME_END_INFO = 12
HAPTIC_VIBRATION = 13
EVENT_DATA_BUFFER = 16
EVENT_DATA_INSTANCE_LOSS_PENDING = 17
EVENT_DATA_SESSION_STATE_CHANGED = 18
ACTION_STATE_BOOLEAN = 23
ACTION_STATE_FLOAT = 24
ACTION_STATE_VECTOR2F = 25
ACTION_STATE_POSE = 27
ACTION_SET_CREATE_INFO = 28
ACTION_CREATE_INFO = 29
INSTANCE_PROPERTIES = 32
FRAME_WAIT_INFO = 33
COMPOSITION_LAYER_PROJECTION = 35
COMPOSITION_LAYER_QUAD = 36
REFERENCE_SPACE_CREATE_INFO = 37
ACTION_SPACE_CREATE_INFO = 38
EVENT_DATA_REFERENCE_SPACE_CHANGE_PENDING = 40
VIEW_CONFIGURATION_VIEW = 41
SPACE_LOCATION = 42
SPACE_VELOCITY = 43
FRAME_STATE = 44
VIEW_CONFIGURATION_PROPERTIES = 45
FRAME_BEGIN_INFO = 46
COMPOSITION_LAYER_PROJECTION_VIEW = 48
EVENT_DATA_EVENTS_LOST = 49
INTERACTION_PROFILE_SUGGESTED_BINDING = 51
EVENT_DATA_INTERACTION_PROFILE_CHANGED = 52
INTERACTION_PROFILE_STATE = 53
SWAPCHAIN_IMAGE_ACQUIRE_INFO = 55
SWAPCHAIN_IMAGE_WAIT_INFO = 56
SWAPCHAIN_IMAGE_RELEASE_INFO = 57
ACTION_STATE_GET_INFO = 58
HAPTIC_ACTION_INFO = 59
SESSION_ACTION_SETS_ATTACH_INFO = 60
ACTIONS_SYNC_INFO = 61
BOUND_SOURCES_FOR_ACTION_ENUMERATE_INFO = 62
INPUT_SOURCE_LOCALIZED_NAME_GET_INFO = 63
COMPOSITION_LAYER_CUBE_KHR = 1000006000
INSTANCE_CREATE_INFO_ANDROID_KHR = 1000008000
COMPOSITION_LAYER_DEPTH_INFO_KHR = 1000010000
VULKAN_SWAPCHAIN_FORMAT_LIST_CREATE_INFO_KHR = 1000014000
EVENT_DATA_PERF_SETTINGS_EXT = 1000015000
COMPOSITION_LAYER_CYLINDER_KHR = 1000017000
COMPOSITION_LAYER_EQUIRECT_KHR = 1000018000
DEBUG_UTILS_OBJECT_NAME_INFO_EXT = 1000019000
DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT = 1000019001
DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT = 1000019002
DEBUG_UTILS_LABEL_EXT = 1000019003
GRAPHICS_BINDING_OPENGL_WIN32_KHR = 1000023000
GRAPHICS_BINDING_OPENGL_XLIB_KHR = 1000023001
GRAPHICS_BINDING_OPENGL_XCB_KHR = 1000023002
GRAPHICS_BINDING_OPENGL_WAYLAND_KHR = 1000023003
SWAPCHAIN_IMAGE_OPENGL_KHR = 1000023004
GRAPHICS_REQUIREMENTS_OPENGL_KHR = 1000023005
GRAPHICS_BINDING_OPENGL_ES_ANDROID_KHR = 1000024001
SWAPCHAIN_IMAGE_OPENGL_ES_KHR = 1000024002
GRAPHICS_REQUIREMENTS_OPENGL_ES_KHR = 1000024003
GRAPHICS_BINDING_VULKAN_KHR = 1000025000
SWAPCHAIN_IMAGE_VULKAN_KHR = 1000025001
GRAPHICS_REQUIREMENTS_VULKAN_KHR = 1000025002
GRAPHICS_BINDING_D3D11_KHR = 1000027000
SWAPCHAIN_IMAGE_D3D11_KHR = 1000027001
GRAPHICS_REQUIREMENTS_D3D11_KHR = 1000027002
GRAPHICS_BINDING_D3D12_KHR = 1000028000
SWAPCHAIN_IMAGE_D3D12_KHR = 1000028001
GRAPHICS_REQUIREMENTS_D3D12_KHR = 1000028002
SYSTEM_EYE_GAZE_INTERACTION_PROPERTIES_EXT = 1000030000
EYE_GAZE_SAMPLE_TIME_EXT = 1000030001
VISIBILITY_MASK_KHR = 1000031000
EVENT_DATA_VISIBILITY_MASK_CHANGED_KHR = 1000031001
SESSION_CREATE_INFO_OVERLAY_EXTX = 1000033000
EVENT_DATA_MAIN_SESSION_VISIBILITY_CHANGED_EXTX = 1000033003
COMPOSITION_LAYER_COLOR_SCALE_BIAS_KHR = 1000034000
SPATIAL_ANCHOR_CREATE_INFO_MSFT = 1000039000
SPATIAL_ANCHOR_SPACE_CREATE_INFO_MSFT = 1000039001
COMPOSITION_LAYER_IMAGE_LAYOUT_FB = 1000040000
COMPOSITION_LAYER_ALPHA_BLEND_FB = 1000041001
VIEW_CONFIGURATION_DEPTH_RANGE_EXT = 1000046000
GRAPHICS_BINDING_EGL_MNDX = 1000048004
SPATIAL_GRAPH_NODE_SPACE_CREATE_INFO_MSFT = 1000049000
SYSTEM_HAND_TRACKING_PROPERTIES_EXT = 1000051000
HAND_TRACKER_CREATE_INFO_EXT = 1000051001
HAND_JOINTS_LOCATE_INFO_EXT = 1000051002
HAND_JOINT_LOCATIONS_EXT = 1000051003
HAND_JOINT_VELOCITIES_EXT = 1000051004
SYSTEM_HAND_TRACKING_MESH_PROPERTIES_MSFT = 1000052000
HAND_MESH_SPACE_CREATE_INFO_MSFT = 1000052001
HAND_MESH_UPDATE_INFO_MSFT = 1000052002
HAND_MESH_MSFT = 1000052003
HAND_POSE_TYPE_INFO_MSFT = 1000052004
SECONDARY_VIEW_CONFIGURATION_SESSION_BEGIN_INFO_MSFT = 1000053000
SECONDARY_VIEW_CONFIGURATION_STATE_MSFT = 1000053001
SECONDARY_VIEW_CONFIGURATION_FRAME_STATE_MSFT = 1000053002
SECONDARY_VIEW_CONFIGURATION_FRAME_END_INFO_MSFT = 1000053003
SECONDARY_VIEW_CONFIGURATION_LAYER_INFO_MSFT = 1000053004
SECONDARY_VIEW_CONFIGURATION_SWAPCHAIN_CREATE_INFO_MSFT = 1000053005
CONTROLLER_MODEL_KEY_STATE_MSFT = 1000055000
CONTROLLER_MODEL_NODE_PROPERTIES_MSFT = 1000055001
CONTROLLER_MODEL_PROPERTIES_MSFT = 1000055002
CONTROLLER_MODEL_NODE_STATE_MSFT = 1000055003
CONTROLLER_MODEL_STATE_MSFT = 1000055004
VIEW_CONFIGURATION_VIEW_FOV_EPIC = 1000059000
HOLOGRAPHIC_WINDOW_ATTACHMENT_MSFT = 1000063000
COMPOSITION_LAYER_REPROJECTION_INFO_MSFT = 1000066000
COMPOSITION_LAYER_REPROJECTION_PLANE_OVERRIDE_MSFT = 1000066001
ANDROID_SURFACE_SWAPCHAIN_CREATE_INFO_FB = 1000070000
COMPOSITION_LAYER_SECURE_CONTENT_FB = 1000072000
INTERACTION_PROFILE_ANALOG_THRESHOLD_VALVE = 1000079000
HAND_JOINTS_MOTION_RANGE_INFO_EXT = 1000080000
LOADER_INIT_INFO_ANDROID_KHR = 1000089000
VULKAN_INSTANCE_CREATE_INFO_KHR = 1000090000
VULKAN_DEVICE_CREATE_INFO_KHR = 1000090001
VULKAN_GRAPHICS_DEVICE_GET_INFO_KHR = 1000090003
COMPOSITION_LAYER_EQUIRECT2_KHR = 1000091000
SCENE_OBSERVER_CREATE_INFO_MSFT = 1000097000
SCENE_CREATE_INFO_MSFT = 1000097001
NEW_SCENE_COMPUTE_INFO_MSFT = 1000097002
VISUAL_MESH_COMPUTE_LOD_INFO_MSFT = 1000097003
SCENE_COMPONENTS_MSFT = 1000097004
SCENE_COMPONENTS_GET_INFO_MSFT = 1000097005
SCENE_COMPONENT_LOCATIONS_MSFT = 1000097006
SCENE_COMPONENTS_LOCATE_INFO_MSFT = 1000097007
SCENE_OBJECTS_MSFT = 1000097008
SCENE_COMPONENT_PARENT_FILTER_INFO_MSFT = 1000097009
SCENE_OBJECT_TYPES_FILTER_INFO_MSFT = 1000097010
SCENE_PLANES_MSFT = 1000097011
SCENE_PLANE_ALIGNMENT_FILTER_INFO_MSFT = 1000097012
SCENE_MESHES_MSFT = 1000097013
SCENE_MESH_BUFFERS_GET_INFO_MSFT = 1000097014
SCENE_MESH_BUFFERS_MSFT = 1000097015
SCENE_MESH_VERTEX_BUFFER_MSFT = 1000097016
SCENE_MESH_INDICES_UINT32_MSFT = 1000097017
SCENE_MESH_INDICES_UINT16_MSFT = 1000097018
SERIALIZED_SCENE_FRAGMENT_DATA_GET_INFO_MSFT = 1000098000
SCENE_DESERIALIZE_INFO_MSFT = 1000098001
EVENT_DATA_DISPLAY_REFRESH_RATE_CHANGED_FB = 1000101000
SYSTEM_COLOR_SPACE_PROPERTIES_FB = 1000108000
HAND_TRACKING_MESH_FB = 1000110001
HAND_TRACKING_SCALE_FB = 1000110003
HAND_TRACKING_AIM_STATE_FB = 1000111001
HAND_TRACKING_CAPSULES_STATE_FB = 1000112000
FOVEATION_PROFILE_CREATE_INFO_FB = 1000114000
SWAPCHAIN_CREATE_INFO_FOVEATION_FB = 1000114001
SWAPCHAIN_STATE_FOVEATION_FB = 1000114002
FOVEATION_LEVEL_PROFILE_CREATE_INFO_FB = 1000115000
BINDING_MODIFICATIONS_KHR = 1000120000
VIEW_LOCATE_FOVEATED_RENDERING_VARJO = 1000121000
FOVEATED_VIEW_CONFIGURATION_VIEW_VARJO = 1000121001
SYSTEM_FOVEATED_RENDERING_PROPERTIES_VARJO = 1000121002
COMPOSITION_LAYER_DEPTH_TEST_VARJO = 1000122000
SPATIAL_ANCHOR_PERSISTENCE_INFO_MSFT = 1000142000
SPATIAL_ANCHOR_FROM_PERSISTED_ANCHOR_CREATE_INFO_MSFT = 1000142001
SWAPCHAIN_IMAGE_FOVEATION_VULKAN_FB = 1000160000
SWAPCHAIN_STATE_ANDROID_SURFACE_DIMENSIONS_FB = 1000161000
SWAPCHAIN_STATE_SAMPLER_OPENGL_ES_FB = 1000162000
SWAPCHAIN_STATE_SAMPLER_VULKAN_FB = 1000163000
COMPOSITION_LAYER_SPACE_WARP_INFO_FB = 1000171000
SYSTEM_SPACE_WARP_PROPERTIES_FB = 1000171001
GRAPHICS_BINDING_VULKAN2_KHR = 1000025000
SWAPCHAIN_IMAGE_VULKAN2_KHR = 1000025001
GRAPHICS_REQUIREMENTS_VULKAN2_KHR = 1000025002
class FormFactor(EnumBase):
HEAD_MOUNTED_DISPLAY = 1
HANDHELD_DISPLAY = 2
class ViewConfigurationType(EnumBase):
PRIMARY_MONO = 1
PRIMARY_STEREO = 2
PRIMARY_QUAD_VARJO = 1000037000
SECONDARY_MONO_FIRST_PERSON_OBSERVER_MSFT = 1000054000
class EnvironmentBlendMode(EnumBase):
OPAQUE = 1
ADDITIVE = 2
ALPHA_BLEND = 3
class ReferenceSpaceType(EnumBase):
VIEW = 1
LOCAL = 2
STAGE = 3
UNBOUNDED_MSFT = 1000038000
COMBINED_EYE_VARJO = 1000121000
class ActionType(EnumBase):
BOOLEAN_INPUT = 1
FLOAT_INPUT = 2
VECTOR2F_INPUT = 3
POSE_INPUT = 4
VIBRATION_OUTPUT = 100
class EyeVisibility(EnumBase):
BOTH = 0
LEFT = 1
RIGHT = 2
class SessionState(EnumBase):
UNKNOWN = 0
IDLE = 1
READY = 2
SYNCHRONIZED = 3
VISIBLE = 4
FOCUSED = 5
STOPPING = 6
LOSS_PENDING = 7
EXITING = 8
class ObjectType(EnumBase):
UNKNOWN = 0
INSTANCE = 1
SESSION = 2
SWAPCHAIN = 3
SPACE = 4
ACTION_SET = 5
ACTION = 6
DEBUG_UTILS_MESSENGER_EXT = 1000019000
SPATIAL_ANCHOR_MSFT = 1000039000
HAND_TRACKER_EXT = 1000051000
SCENE_OBSERVER_MSFT = 1000097000
SCENE_MSFT = 1000097001
FOVEATION_PROFILE_FB = 1000114000
SPATIAL_ANCHOR_STORE_CONNECTION_MSFT = 1000142000
class VisibilityMaskTypeKHR(EnumBase):
HIDDEN_TRIANGLE_MESH = 1
VISIBLE_TRIANGLE_MESH = 2
LINE_LOOP = 3
class PerfSettingsDomainEXT(EnumBase):
CPU = 1
GPU = 2
class PerfSettingsSubDomainEXT(EnumBase):
COMPOSITING = 1
RENDERING = 2
THERMAL = 3
class PerfSettingsLevelEXT(EnumBase):
POWER_SAVINGS = 0
SUSTAINED_LOW = 25
SUSTAINED_HIGH = 50
BOOST = 75
class PerfSettingsNotificationLevelEXT(EnumBase):
NORMAL = 0
WARNING = 25
IMPAIRED = 75
class BlendFactorFB(EnumBase):
ZERO = 0
ONE = 1
SRC_ALPHA = 2
ONE_MINUS_SRC_ALPHA = 3
DST_ALPHA = 4
ONE_MINUS_DST_ALPHA = 5
class SpatialGraphNodeTypeMSFT(EnumBase):
STATIC = 1
DYNAMIC = 2
class HandEXT(EnumBase):
LEFT = 1
RIGHT = 2
class HandJointEXT(EnumBase):
PALM = 0
WRIST = 1
THUMB_METACARPAL = 2
THUMB_PROXIMAL = 3
THUMB_DISTAL = 4
THUMB_TIP = 5
INDEX_METACARPAL = 6
INDEX_PROXIMAL = 7
INDEX_INTERMEDIATE = 8
INDEX_DISTAL = 9
INDEX_TIP = 10
MIDDLE_METACARPAL = 11
MIDDLE_PROXIMAL = 12
MIDDLE_INTERMEDIATE = 13
MIDDLE_DISTAL = 14
MIDDLE_TIP = 15
RING_METACARPAL = 16
RING_PROXIMAL = 17
RING_INTERMEDIATE = 18
RING_DISTAL = 19
RING_TIP = 20
LITTLE_METACARPAL = 21
LITTLE_PROXIMAL = 22
LITTLE_INTERMEDIATE = 23
LITTLE_DISTAL = 24
LITTLE_TIP = 25
class HandJointSetEXT(EnumBase):
DEFAULT = 0
class HandPoseTypeMSFT(EnumBase):
TRACKED = 0
REFERENCE_OPEN_PALM = 1
class ReprojectionModeMSFT(EnumBase):
DEPTH = 1
PLANAR_FROM_DEPTH = 2
PLANAR_MANUAL = 3
ORIENTATION_ONLY = 4
class HandJointsMotionRangeEXT(EnumBase):
UNOBSTRUCTED = 1
CONFORMING_TO_CONTROLLER = 2
class SceneComputeFeatureMSFT(EnumBase):
PLANE = 1
PLANE_MESH = 2
VISUAL_MESH = 3
COLLIDER_MESH = 4
SERIALIZE_SCENE = 1000098000
class SceneComputeConsistencyMSFT(EnumBase):
SNAPSHOT_COMPLETE = 1
SNAPSHOT_INCOMPLETE_FAST = 2
OCCLUSION_OPTIMIZED = 3
class MeshComputeLodMSFT(EnumBase):
COARSE = 1
MEDIUM = 2
FINE = 3
UNLIMITED = 4
class SceneComponentTypeMSFT(EnumBase):
INVALID = -1
OBJECT = 1
PLANE = 2
VISUAL_MESH = 3
COLLIDER_MESH = 4
SERIALIZED_SCENE_FRAGMENT = 1000098000
class SceneObjectTypeMSFT(EnumBase):
UNCATEGORIZED = -1
BACKGROUND = 1
WALL = 2
FLOOR = 3
CEILING = 4
PLATFORM = 5
INFERRED = 6
class ScenePlaneAlignmentTypeMSFT(EnumBase):
NON_ORTHOGONAL = 0
HORIZONTAL = 1
VERTICAL = 2
class SceneComputeStateMSFT(EnumBase):
NONE = 0
UPDATING = 1
COMPLETED = 2
COMPLETED_WITH_ERROR = 3
class ColorSpaceFB(EnumBase):
UNMANAGED = 0
REC2020 = 1
REC709 = 2
RIFT_CV1 = 3
RIFT_S = 4
QUEST = 5
P3 = 6
ADOBE_RGB = 7
class FoveationLevelFB(EnumBase):
NONE = 0
LOW = 1
MEDIUM = 2
HIGH = 3
class FoveationDynamicFB(EnumBase):
DISABLED = 0
LEVEL_ENABLED = 1
__all__ = [
"Result",
"StructureType",
"FormFactor",
"ViewConfigurationType",
"EnvironmentBlendMode",
"ReferenceSpaceType",
"ActionType",
"EyeVisibility",
"SessionState",
"ObjectType",
"VisibilityMaskTypeKHR",
"PerfSettingsDomainEXT",
"PerfSettingsSubDomainEXT",
"PerfSettingsLevelEXT",
"PerfSettingsNotificationLevelEXT",
"BlendFactorFB",
"SpatialGraphNodeTypeMSFT",
"HandEXT",
"HandJointEXT",
"HandJointSetEXT",
"HandPoseTypeMSFT",
"ReprojectionModeMSFT",
"HandJointsMotionRangeEXT",
"SceneComputeFeatureMSFT",
"SceneComputeConsistencyMSFT",
"MeshComputeLodMSFT",
"SceneComponentTypeMSFT",
"SceneObjectTypeMSFT",
"ScenePlaneAlignmentTypeMSFT",
"SceneComputeStateMSFT",
"ColorSpaceFB",
"FoveationLevelFB",
"FoveationDynamicFB",
]
```
#### File: src/xr/typedefs.py
```python
from ctypes import CFUNCTYPE, POINTER, Structure, c_char, c_char_p, c_float, c_int, c_int16, c_int32, c_int64, c_uint16, c_uint32, c_uint64, c_uint8, c_void_p
from .enums import *
Version = c_uint64
Flags64 = c_uint64
SystemId = c_uint64
Bool32 = c_uint32
Path = c_uint64
Time = c_int64
Duration = c_int64
class Instance_T(Structure):
pass
Instance = POINTER(Instance_T)
class Session_T(Structure):
pass
Session = POINTER(Session_T)
class Space_T(Structure):
pass
Space = POINTER(Space_T)
class Action_T(Structure):
pass
Action = POINTER(Action_T)
class Swapchain_T(Structure):
pass
Swapchain = POINTER(Swapchain_T)
class ActionSet_T(Structure):
pass
ActionSet = POINTER(ActionSet_T)
InstanceCreateFlags = Flags64
SessionCreateFlags = Flags64
SpaceVelocityFlags = Flags64
SpaceLocationFlags = Flags64
SwapchainCreateFlags = Flags64
SwapchainUsageFlags = Flags64
CompositionLayerFlags = Flags64
ViewStateFlags = Flags64
InputSourceLocalizedNameFlags = Flags64
PFN_xrVoidFunction = CFUNCTYPE(None)
class ApiLayerProperties(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.API_LAYER_PROPERTIES.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("layer_name", (c_char * 256)),
("spec_version", Version),
("layer_version", c_uint32),
("description", (c_char * 256)),
]
class ExtensionProperties(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.EXTENSION_PROPERTIES.value,
*args, **kwargs,
)
def __bytes__(self):
return self.extension_name
def __eq__(self, other):
try:
if other.type != self.type:
return False
except AttributeError:
pass # That's OK, objects without those attributes can use string comparison
return str(other) == str(self)
def __str__(self):
return self.extension_name.decode()
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("extension_name", (c_char * 128)),
("extension_version", c_uint32),
]
class ApplicationInfo(Structure):
_fields_ = [
("application_name", (c_char * 128)),
("application_version", c_uint32),
("engine_name", (c_char * 128)),
("engine_version", c_uint32),
("api_version", Version),
]
class InstanceCreateInfo(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.INSTANCE_CREATE_INFO.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("create_flags", InstanceCreateFlags),
("application_info", ApplicationInfo),
("enabled_api_layer_count", c_uint32),
("enabled_api_layer_names", POINTER(c_char_p)),
("enabled_extension_count", c_uint32),
("enabled_extension_names", POINTER(c_char_p)),
]
class InstanceProperties(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.INSTANCE_PROPERTIES.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("runtime_version", Version),
("runtime_name", (c_char * 128)),
]
class EventDataBuffer(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.EVENT_DATA_BUFFER.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("varying", (c_uint8 * 4000)),
]
class SystemGetInfo(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SYSTEM_GET_INFO.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("form_factor", FormFactor.ctype()),
]
class SystemGraphicsProperties(Structure):
_fields_ = [
("max_swapchain_image_height", c_uint32),
("max_swapchain_image_width", c_uint32),
("max_layer_count", c_uint32),
]
class SystemTrackingProperties(Structure):
_fields_ = [
("orientation_tracking", Bool32),
("position_tracking", Bool32),
]
class SystemProperties(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SYSTEM_PROPERTIES.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("system_id", SystemId),
("vendor_id", c_uint32),
("system_name", (c_char * 256)),
("graphics_properties", SystemGraphicsProperties),
("tracking_properties", SystemTrackingProperties),
]
class SessionCreateInfo(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SESSION_CREATE_INFO.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("create_flags", SessionCreateFlags),
("system_id", SystemId),
]
class Vector3f(Structure):
_fields_ = [
("x", c_float),
("y", c_float),
("z", c_float),
]
class SpaceVelocity(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SPACE_VELOCITY.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("velocity_flags", SpaceVelocityFlags),
("linear_velocity", Vector3f),
("angular_velocity", Vector3f),
]
class Quaternionf(Structure):
_fields_ = [
("x", c_float),
("y", c_float),
("z", c_float),
("w", c_float),
]
class Posef(Structure):
_fields_ = [
("orientation", Quaternionf),
("position", Vector3f),
]
class ReferenceSpaceCreateInfo(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.REFERENCE_SPACE_CREATE_INFO.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("reference_space_type", ReferenceSpaceType.ctype()),
("pose_in_reference_space", Posef),
]
class Extent2Df(Structure):
_fields_ = [
("width", c_float),
("height", c_float),
]
class ActionSpaceCreateInfo(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.ACTION_SPACE_CREATE_INFO.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("action", Action),
("subaction_path", Path),
("pose_in_action_space", Posef),
]
class SpaceLocation(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SPACE_LOCATION.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("location_flags", SpaceLocationFlags),
("pose", Posef),
]
class ViewConfigurationProperties(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.VIEW_CONFIGURATION_PROPERTIES.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("view_configuration_type", ViewConfigurationType.ctype()),
("fov_mutable", Bool32),
]
class ViewConfigurationView(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.VIEW_CONFIGURATION_VIEW.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("recommended_image_rect_width", c_uint32),
("max_image_rect_width", c_uint32),
("recommended_image_rect_height", c_uint32),
("max_image_rect_height", c_uint32),
("recommended_swapchain_sample_count", c_uint32),
("max_swapchain_sample_count", c_uint32),
]
class SwapchainCreateInfo(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SWAPCHAIN_CREATE_INFO.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("create_flags", SwapchainCreateFlags),
("usage_flags", SwapchainUsageFlags),
("format", c_int64),
("sample_count", c_uint32),
("width", c_uint32),
("height", c_uint32),
("face_count", c_uint32),
("array_size", c_uint32),
("mip_count", c_uint32),
]
class SwapchainImageBaseHeader(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
0,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
]
class SwapchainImageAcquireInfo(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SWAPCHAIN_IMAGE_ACQUIRE_INFO.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
]
class SwapchainImageWaitInfo(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SWAPCHAIN_IMAGE_WAIT_INFO.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("timeout", Duration),
]
class SwapchainImageReleaseInfo(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SWAPCHAIN_IMAGE_RELEASE_INFO.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
]
class SessionBeginInfo(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SESSION_BEGIN_INFO.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("primary_view_configuration_type", ViewConfigurationType.ctype()),
]
class FrameWaitInfo(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.FRAME_WAIT_INFO.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
]
class FrameState(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.FRAME_STATE.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("predicted_display_time", Time),
("predicted_display_period", Duration),
("should_render", Bool32),
]
class FrameBeginInfo(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.FRAME_BEGIN_INFO.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
]
class CompositionLayerBaseHeader(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.COMPOSITION_LAYER_BASE_HEADER.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("layer_flags", CompositionLayerFlags),
("space", Space),
]
class FrameEndInfo(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.FRAME_END_INFO.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("display_time", Time),
("environment_blend_mode", EnvironmentBlendMode.ctype()),
("layer_count", c_uint32),
("layers", POINTER(POINTER(CompositionLayerBaseHeader))),
]
class ViewLocateInfo(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.VIEW_LOCATE_INFO.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("view_configuration_type", ViewConfigurationType.ctype()),
("display_time", Time),
("space", Space),
]
class ViewState(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.VIEW_STATE.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("view_state_flags", ViewStateFlags),
]
class Fovf(Structure):
_fields_ = [
("angle_left", c_float),
("angle_right", c_float),
("angle_up", c_float),
("angle_down", c_float),
]
class View(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.VIEW.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("pose", Posef),
("fov", Fovf),
]
class ActionSetCreateInfo(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.ACTION_SET_CREATE_INFO.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("action_set_name", (c_char * 64)),
("localized_action_set_name", (c_char * 128)),
("priority", c_uint32),
]
class ActionCreateInfo(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.ACTION_CREATE_INFO.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("action_name", (c_char * 64)),
("action_type", ActionType.ctype()),
("count_subaction_paths", c_uint32),
("subaction_paths", POINTER(c_uint64)),
("localized_action_name", (c_char * 128)),
]
class ActionSuggestedBinding(Structure):
_fields_ = [
("action", Action),
("binding", Path),
]
class InteractionProfileSuggestedBinding(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.INTERACTION_PROFILE_SUGGESTED_BINDING.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("interaction_profile", Path),
("count_suggested_bindings", c_uint32),
("suggested_bindings", POINTER(ActionSuggestedBinding)),
]
class SessionActionSetsAttachInfo(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SESSION_ACTION_SETS_ATTACH_INFO.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("count_action_sets", c_uint32),
("action_sets", POINTER(POINTER(ActionSet_T))),
]
class InteractionProfileState(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.INTERACTION_PROFILE_STATE.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("interaction_profile", Path),
]
class ActionStateGetInfo(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.ACTION_STATE_GET_INFO.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("action", Action),
("subaction_path", Path),
]
class ActionStateBoolean(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.ACTION_STATE_BOOLEAN.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("current_state", Bool32),
("changed_since_last_sync", Bool32),
("last_change_time", Time),
("is_active", Bool32),
]
class ActionStateFloat(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.ACTION_STATE_FLOAT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("current_state", c_float),
("changed_since_last_sync", Bool32),
("last_change_time", Time),
("is_active", Bool32),
]
class Vector2f(Structure):
_fields_ = [
("x", c_float),
("y", c_float),
]
class ActionStateVector2f(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.ACTION_STATE_VECTOR2F.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("current_state", Vector2f),
("changed_since_last_sync", Bool32),
("last_change_time", Time),
("is_active", Bool32),
]
class ActionStatePose(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.ACTION_STATE_POSE.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("is_active", Bool32),
]
class ActiveActionSet(Structure):
_fields_ = [
("action_set", ActionSet),
("subaction_path", Path),
]
class ActionsSyncInfo(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.ACTIONS_SYNC_INFO.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("count_active_action_sets", c_uint32),
("active_action_sets", POINTER(ActiveActionSet)),
]
class BoundSourcesForActionEnumerateInfo(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.BOUND_SOURCES_FOR_ACTION_ENUMERATE_INFO.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("action", Action),
]
class InputSourceLocalizedNameGetInfo(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.INPUT_SOURCE_LOCALIZED_NAME_GET_INFO.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("source_path", Path),
("which_components", InputSourceLocalizedNameFlags),
]
class HapticActionInfo(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.HAPTIC_ACTION_INFO.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("action", Action),
("subaction_path", Path),
]
class HapticBaseHeader(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.HAPTIC_BASE_HEADER.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
]
class BaseInStructure(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.BASE_IN_STRUCTURE.value,
*args, **kwargs,
)
pass
BaseInStructure._fields_ = [
("type", StructureType.ctype()),
("next", POINTER(BaseInStructure)),
]
class BaseOutStructure(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.BASE_OUT_STRUCTURE.value,
*args, **kwargs,
)
pass
BaseOutStructure._fields_ = [
("type", StructureType.ctype()),
("next", POINTER(BaseOutStructure)),
]
class Offset2Di(Structure):
_fields_ = [
("x", c_int32),
("y", c_int32),
]
class Extent2Di(Structure):
_fields_ = [
("width", c_int32),
("height", c_int32),
]
class Rect2Di(Structure):
_fields_ = [
("offset", Offset2Di),
("extent", Extent2Di),
]
class SwapchainSubImage(Structure):
_fields_ = [
("swapchain", Swapchain),
("image_rect", Rect2Di),
("image_array_index", c_uint32),
]
class CompositionLayerProjectionView(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.COMPOSITION_LAYER_PROJECTION_VIEW.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("pose", Posef),
("fov", Fovf),
("sub_image", SwapchainSubImage),
]
class CompositionLayerProjection(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.COMPOSITION_LAYER_PROJECTION.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("layer_flags", CompositionLayerFlags),
("space", Space),
("view_count", c_uint32),
("views", POINTER(CompositionLayerProjectionView)),
]
class CompositionLayerQuad(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.COMPOSITION_LAYER_QUAD.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("layer_flags", CompositionLayerFlags),
("space", Space),
("eye_visibility", EyeVisibility.ctype()),
("sub_image", SwapchainSubImage),
("pose", Posef),
("size", Extent2Df),
]
class EventDataBaseHeader(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.EVENT_DATA_BASE_HEADER.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
]
class EventDataEventsLost(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.EVENT_DATA_EVENTS_LOST.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("lost_event_count", c_uint32),
]
class EventDataInstanceLossPending(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.EVENT_DATA_INSTANCE_LOSS_PENDING.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("loss_time", Time),
]
class EventDataSessionStateChanged(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.EVENT_DATA_SESSION_STATE_CHANGED.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("session", Session),
("state", SessionState.ctype()),
("time", Time),
]
class EventDataReferenceSpaceChangePending(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.EVENT_DATA_REFERENCE_SPACE_CHANGE_PENDING.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("session", Session),
("reference_space_type", ReferenceSpaceType.ctype()),
("change_time", Time),
("pose_valid", Bool32),
("pose_in_previous_space", Posef),
]
class EventDataInteractionProfileChanged(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.EVENT_DATA_INTERACTION_PROFILE_CHANGED.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("session", Session),
]
class HapticVibration(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.HAPTIC_VIBRATION.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("duration", Duration),
("frequency", c_float),
("amplitude", c_float),
]
class Offset2Df(Structure):
_fields_ = [
("x", c_float),
("y", c_float),
]
class Rect2Df(Structure):
_fields_ = [
("offset", Offset2Df),
("extent", Extent2Df),
]
class Vector4f(Structure):
_fields_ = [
("x", c_float),
("y", c_float),
("z", c_float),
("w", c_float),
]
class Color4f(Structure):
_fields_ = [
("r", c_float),
("g", c_float),
("b", c_float),
("a", c_float),
]
PFN_xrGetInstanceProcAddr = CFUNCTYPE(Result.ctype(), Instance, c_char_p, POINTER(PFN_xrVoidFunction))
PFN_xrEnumerateApiLayerProperties = CFUNCTYPE(Result.ctype(), c_uint32, POINTER(c_uint32), POINTER(ApiLayerProperties))
PFN_xrEnumerateInstanceExtensionProperties = CFUNCTYPE(Result.ctype(), c_char_p, c_uint32, POINTER(c_uint32), POINTER(ExtensionProperties))
PFN_xrCreateInstance = CFUNCTYPE(Result.ctype(), POINTER(InstanceCreateInfo), POINTER(Instance))
PFN_xrDestroyInstance = CFUNCTYPE(Result.ctype(), Instance)
PFN_xrGetInstanceProperties = CFUNCTYPE(Result.ctype(), Instance, POINTER(InstanceProperties))
PFN_xrPollEvent = CFUNCTYPE(Result.ctype(), Instance, POINTER(EventDataBuffer))
PFN_xrResultToString = CFUNCTYPE(Result.ctype(), Instance, Result.ctype(), (c_char * 64))
PFN_xrStructureTypeToString = CFUNCTYPE(Result.ctype(), Instance, StructureType.ctype(), (c_char * 64))
PFN_xrGetSystem = CFUNCTYPE(Result.ctype(), Instance, POINTER(SystemGetInfo), POINTER(SystemId))
PFN_xrGetSystemProperties = CFUNCTYPE(Result.ctype(), Instance, SystemId, POINTER(SystemProperties))
PFN_xrEnumerateEnvironmentBlendModes = CFUNCTYPE(Result.ctype(), Instance, SystemId, ViewConfigurationType.ctype(), c_uint32, POINTER(c_uint32), POINTER(EnvironmentBlendMode.ctype()))
PFN_xrCreateSession = CFUNCTYPE(Result.ctype(), Instance, POINTER(SessionCreateInfo), POINTER(Session))
PFN_xrDestroySession = CFUNCTYPE(Result.ctype(), Session)
PFN_xrEnumerateReferenceSpaces = CFUNCTYPE(Result.ctype(), Session, c_uint32, POINTER(c_uint32), POINTER(ReferenceSpaceType.ctype()))
PFN_xrCreateReferenceSpace = CFUNCTYPE(Result.ctype(), Session, POINTER(ReferenceSpaceCreateInfo), POINTER(Space))
PFN_xrGetReferenceSpaceBoundsRect = CFUNCTYPE(Result.ctype(), Session, ReferenceSpaceType.ctype(), POINTER(Extent2Df))
PFN_xrCreateActionSpace = CFUNCTYPE(Result.ctype(), Session, POINTER(ActionSpaceCreateInfo), POINTER(Space))
PFN_xrLocateSpace = CFUNCTYPE(Result.ctype(), Space, Space, Time, POINTER(SpaceLocation))
PFN_xrDestroySpace = CFUNCTYPE(Result.ctype(), Space)
PFN_xrEnumerateViewConfigurations = CFUNCTYPE(Result.ctype(), Instance, SystemId, c_uint32, POINTER(c_uint32), POINTER(ViewConfigurationType.ctype()))
PFN_xrGetViewConfigurationProperties = CFUNCTYPE(Result.ctype(), Instance, SystemId, ViewConfigurationType.ctype(), POINTER(ViewConfigurationProperties))
PFN_xrEnumerateViewConfigurationViews = CFUNCTYPE(Result.ctype(), Instance, SystemId, ViewConfigurationType.ctype(), c_uint32, POINTER(c_uint32), POINTER(ViewConfigurationView))
PFN_xrEnumerateSwapchainFormats = CFUNCTYPE(Result.ctype(), Session, c_uint32, POINTER(c_uint32), POINTER(c_int64))
PFN_xrCreateSwapchain = CFUNCTYPE(Result.ctype(), Session, POINTER(SwapchainCreateInfo), POINTER(Swapchain))
PFN_xrDestroySwapchain = CFUNCTYPE(Result.ctype(), Swapchain)
PFN_xrEnumerateSwapchainImages = CFUNCTYPE(Result.ctype(), Swapchain, c_uint32, POINTER(c_uint32), POINTER(SwapchainImageBaseHeader))
PFN_xrAcquireSwapchainImage = CFUNCTYPE(Result.ctype(), Swapchain, POINTER(SwapchainImageAcquireInfo), POINTER(c_uint32))
PFN_xrWaitSwapchainImage = CFUNCTYPE(Result.ctype(), Swapchain, POINTER(SwapchainImageWaitInfo))
PFN_xrReleaseSwapchainImage = CFUNCTYPE(Result.ctype(), Swapchain, POINTER(SwapchainImageReleaseInfo))
PFN_xrBeginSession = CFUNCTYPE(Result.ctype(), Session, POINTER(SessionBeginInfo))
PFN_xrEndSession = CFUNCTYPE(Result.ctype(), Session)
PFN_xrRequestExitSession = CFUNCTYPE(Result.ctype(), Session)
PFN_xrWaitFrame = CFUNCTYPE(Result.ctype(), Session, POINTER(FrameWaitInfo), POINTER(FrameState))
PFN_xrBeginFrame = CFUNCTYPE(Result.ctype(), Session, POINTER(FrameBeginInfo))
PFN_xrEndFrame = CFUNCTYPE(Result.ctype(), Session, POINTER(FrameEndInfo))
PFN_xrLocateViews = CFUNCTYPE(Result.ctype(), Session, POINTER(ViewLocateInfo), POINTER(ViewState), c_uint32, POINTER(c_uint32), POINTER(View))
PFN_xrStringToPath = CFUNCTYPE(Result.ctype(), Instance, c_char_p, POINTER(Path))
PFN_xrPathToString = CFUNCTYPE(Result.ctype(), Instance, Path, c_uint32, POINTER(c_uint32), c_char_p)
PFN_xrCreateActionSet = CFUNCTYPE(Result.ctype(), Instance, POINTER(ActionSetCreateInfo), POINTER(ActionSet))
PFN_xrDestroyActionSet = CFUNCTYPE(Result.ctype(), ActionSet)
PFN_xrCreateAction = CFUNCTYPE(Result.ctype(), ActionSet, POINTER(ActionCreateInfo), POINTER(Action))
PFN_xrDestroyAction = CFUNCTYPE(Result.ctype(), Action)
PFN_xrSuggestInteractionProfileBindings = CFUNCTYPE(Result.ctype(), Instance, POINTER(InteractionProfileSuggestedBinding))
PFN_xrAttachSessionActionSets = CFUNCTYPE(Result.ctype(), Session, POINTER(SessionActionSetsAttachInfo))
PFN_xrGetCurrentInteractionProfile = CFUNCTYPE(Result.ctype(), Session, Path, POINTER(InteractionProfileState))
PFN_xrGetActionStateBoolean = CFUNCTYPE(Result.ctype(), Session, POINTER(ActionStateGetInfo), POINTER(ActionStateBoolean))
PFN_xrGetActionStateFloat = CFUNCTYPE(Result.ctype(), Session, POINTER(ActionStateGetInfo), POINTER(ActionStateFloat))
PFN_xrGetActionStateVector2f = CFUNCTYPE(Result.ctype(), Session, POINTER(ActionStateGetInfo), POINTER(ActionStateVector2f))
PFN_xrGetActionStatePose = CFUNCTYPE(Result.ctype(), Session, POINTER(ActionStateGetInfo), POINTER(ActionStatePose))
PFN_xrSyncActions = CFUNCTYPE(Result.ctype(), Session, POINTER(ActionsSyncInfo))
PFN_xrEnumerateBoundSourcesForAction = CFUNCTYPE(Result.ctype(), Session, POINTER(BoundSourcesForActionEnumerateInfo), c_uint32, POINTER(c_uint32), POINTER(Path))
PFN_xrGetInputSourceLocalizedName = CFUNCTYPE(Result.ctype(), Session, POINTER(InputSourceLocalizedNameGetInfo), c_uint32, POINTER(c_uint32), c_char_p)
PFN_xrApplyHapticFeedback = CFUNCTYPE(Result.ctype(), Session, POINTER(HapticActionInfo), POINTER(HapticBaseHeader))
PFN_xrStopHapticFeedback = CFUNCTYPE(Result.ctype(), Session, POINTER(HapticActionInfo))
class CompositionLayerCubeKHR(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.COMPOSITION_LAYER_CUBE_KHR.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("layer_flags", CompositionLayerFlags),
("space", Space),
("eye_visibility", EyeVisibility.ctype()),
("swapchain", Swapchain),
("image_array_index", c_uint32),
("orientation", Quaternionf),
]
class CompositionLayerDepthInfoKHR(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.COMPOSITION_LAYER_DEPTH_INFO_KHR.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("sub_image", SwapchainSubImage),
("min_depth", c_float),
("max_depth", c_float),
("near_z", c_float),
("far_z", c_float),
]
class CompositionLayerCylinderKHR(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.COMPOSITION_LAYER_CYLINDER_KHR.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("layer_flags", CompositionLayerFlags),
("space", Space),
("eye_visibility", EyeVisibility.ctype()),
("sub_image", SwapchainSubImage),
("pose", Posef),
("radius", c_float),
("central_angle", c_float),
("aspect_ratio", c_float),
]
class CompositionLayerEquirectKHR(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.COMPOSITION_LAYER_EQUIRECT_KHR.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("layer_flags", CompositionLayerFlags),
("space", Space),
("eye_visibility", EyeVisibility.ctype()),
("sub_image", SwapchainSubImage),
("pose", Posef),
("radius", c_float),
("scale", Vector2f),
("bias", Vector2f),
]
class VisibilityMaskKHR(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.VISIBILITY_MASK_KHR.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("vertex_capacity_input", c_uint32),
("vertex_count_output", c_uint32),
("vertices", POINTER(Vector2f)),
("index_capacity_input", c_uint32),
("index_count_output", c_uint32),
("indices", POINTER(c_uint32)),
]
class EventDataVisibilityMaskChangedKHR(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.EVENT_DATA_VISIBILITY_MASK_CHANGED_KHR.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("session", Session),
("view_configuration_type", ViewConfigurationType.ctype()),
("view_index", c_uint32),
]
PFN_xrGetVisibilityMaskKHR = CFUNCTYPE(Result.ctype(), Session, ViewConfigurationType.ctype(), c_uint32, VisibilityMaskTypeKHR.ctype(), POINTER(VisibilityMaskKHR))
class CompositionLayerColorScaleBiasKHR(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.COMPOSITION_LAYER_COLOR_SCALE_BIAS_KHR.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("color_scale", Color4f),
("color_bias", Color4f),
]
class LoaderInitInfoBaseHeaderKHR(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.LOADER_INIT_INFO_BASE_HEADER_KHR.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
]
PFN_xrInitializeLoaderKHR = CFUNCTYPE(Result.ctype(), POINTER(LoaderInitInfoBaseHeaderKHR))
class CompositionLayerEquirect2KHR(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.COMPOSITION_LAYER_EQUIRECT2_KHR.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("layer_flags", CompositionLayerFlags),
("space", Space),
("eye_visibility", EyeVisibility.ctype()),
("sub_image", SwapchainSubImage),
("pose", Posef),
("radius", c_float),
("central_horizontal_angle", c_float),
("upper_vertical_angle", c_float),
("lower_vertical_angle", c_float),
]
class BindingModificationBaseHeaderKHR(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.BINDING_MODIFICATION_BASE_HEADER_KHR.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
]
class BindingModificationsKHR(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.BINDING_MODIFICATIONS_KHR.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("binding_modification_count", c_uint32),
("binding_modifications", POINTER(POINTER(BindingModificationBaseHeaderKHR))),
]
class EventDataPerfSettingsEXT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.EVENT_DATA_PERF_SETTINGS_EXT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("domain", PerfSettingsDomainEXT.ctype()),
("sub_domain", PerfSettingsSubDomainEXT.ctype()),
("from_level", PerfSettingsNotificationLevelEXT.ctype()),
("to_level", PerfSettingsNotificationLevelEXT.ctype()),
]
PFN_xrPerfSettingsSetPerformanceLevelEXT = CFUNCTYPE(Result.ctype(), Session, PerfSettingsDomainEXT.ctype(), PerfSettingsLevelEXT.ctype())
PFN_xrThermalGetTemperatureTrendEXT = CFUNCTYPE(Result.ctype(), Session, PerfSettingsDomainEXT.ctype(), POINTER(PerfSettingsNotificationLevelEXT.ctype()), POINTER(c_float), POINTER(c_float))
class DebugUtilsMessengerEXT_T(Structure):
pass
DebugUtilsMessengerEXT = POINTER(DebugUtilsMessengerEXT_T)
DebugUtilsMessageSeverityFlagsEXT = Flags64
DebugUtilsMessageTypeFlagsEXT = Flags64
class DebugUtilsObjectNameInfoEXT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.DEBUG_UTILS_OBJECT_NAME_INFO_EXT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("object_type", ObjectType.ctype()),
("object_handle", c_uint64),
("object_name", c_char_p),
]
class DebugUtilsLabelEXT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.DEBUG_UTILS_LABEL_EXT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("label_name", c_char_p),
]
class DebugUtilsMessengerCallbackDataEXT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("message_id", c_char_p),
("function_name", c_char_p),
("message", c_char_p),
("object_count", c_uint32),
("objects", POINTER(DebugUtilsObjectNameInfoEXT)),
("session_label_count", c_uint32),
("session_labels", POINTER(DebugUtilsLabelEXT)),
]
PFN_xrDebugUtilsMessengerCallbackEXT = CFUNCTYPE(Bool32, DebugUtilsMessageSeverityFlagsEXT, DebugUtilsMessageTypeFlagsEXT, POINTER(DebugUtilsMessengerCallbackDataEXT), c_void_p)
class DebugUtilsMessengerCreateInfoEXT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("message_severities", DebugUtilsMessageSeverityFlagsEXT),
("message_types", DebugUtilsMessageTypeFlagsEXT),
("user_callback", PFN_xrDebugUtilsMessengerCallbackEXT),
("user_data", c_void_p),
]
PFN_xrSetDebugUtilsObjectNameEXT = CFUNCTYPE(Result.ctype(), Instance, POINTER(DebugUtilsObjectNameInfoEXT))
PFN_xrCreateDebugUtilsMessengerEXT = CFUNCTYPE(Result.ctype(), Instance, POINTER(DebugUtilsMessengerCreateInfoEXT), POINTER(DebugUtilsMessengerEXT))
PFN_xrDestroyDebugUtilsMessengerEXT = CFUNCTYPE(Result.ctype(), DebugUtilsMessengerEXT)
PFN_xrSubmitDebugUtilsMessageEXT = CFUNCTYPE(Result.ctype(), Instance, DebugUtilsMessageSeverityFlagsEXT, DebugUtilsMessageTypeFlagsEXT, POINTER(DebugUtilsMessengerCallbackDataEXT))
PFN_xrSessionBeginDebugUtilsLabelRegionEXT = CFUNCTYPE(Result.ctype(), Session, POINTER(DebugUtilsLabelEXT))
PFN_xrSessionEndDebugUtilsLabelRegionEXT = CFUNCTYPE(Result.ctype(), Session)
PFN_xrSessionInsertDebugUtilsLabelEXT = CFUNCTYPE(Result.ctype(), Session, POINTER(DebugUtilsLabelEXT))
class SystemEyeGazeInteractionPropertiesEXT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SYSTEM_EYE_GAZE_INTERACTION_PROPERTIES_EXT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("supports_eye_gaze_interaction", Bool32),
]
class EyeGazeSampleTimeEXT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.EYE_GAZE_SAMPLE_TIME_EXT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("time", Time),
]
OverlaySessionCreateFlagsEXTX = Flags64
OverlayMainSessionFlagsEXTX = Flags64
class SessionCreateInfoOverlayEXTX(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SESSION_CREATE_INFO_OVERLAY_EXTX.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("create_flags", OverlaySessionCreateFlagsEXTX),
("session_layers_placement", c_uint32),
]
class EventDataMainSessionVisibilityChangedEXTX(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.EVENT_DATA_MAIN_SESSION_VISIBILITY_CHANGED_EXTX.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("visible", Bool32),
("flags", OverlayMainSessionFlagsEXTX),
]
class SpatialAnchorMSFT_T(Structure):
pass
SpatialAnchorMSFT = POINTER(SpatialAnchorMSFT_T)
class SpatialAnchorCreateInfoMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SPATIAL_ANCHOR_CREATE_INFO_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("space", Space),
("pose", Posef),
("time", Time),
]
class SpatialAnchorSpaceCreateInfoMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SPATIAL_ANCHOR_SPACE_CREATE_INFO_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("anchor", SpatialAnchorMSFT),
("pose_in_anchor_space", Posef),
]
PFN_xrCreateSpatialAnchorMSFT = CFUNCTYPE(Result.ctype(), Session, POINTER(SpatialAnchorCreateInfoMSFT), POINTER(SpatialAnchorMSFT))
PFN_xrCreateSpatialAnchorSpaceMSFT = CFUNCTYPE(Result.ctype(), Session, POINTER(SpatialAnchorSpaceCreateInfoMSFT), POINTER(Space))
PFN_xrDestroySpatialAnchorMSFT = CFUNCTYPE(Result.ctype(), SpatialAnchorMSFT)
CompositionLayerImageLayoutFlagsFB = Flags64
class CompositionLayerImageLayoutFB(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.COMPOSITION_LAYER_IMAGE_LAYOUT_FB.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("flags", CompositionLayerImageLayoutFlagsFB),
]
class CompositionLayerAlphaBlendFB(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.COMPOSITION_LAYER_ALPHA_BLEND_FB.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("src_factor_color", BlendFactorFB.ctype()),
("dst_factor_color", BlendFactorFB.ctype()),
("src_factor_alpha", BlendFactorFB.ctype()),
("dst_factor_alpha", BlendFactorFB.ctype()),
]
class ViewConfigurationDepthRangeEXT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.VIEW_CONFIGURATION_DEPTH_RANGE_EXT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("recommended_near_z", c_float),
("min_near_z", c_float),
("recommended_far_z", c_float),
("max_far_z", c_float),
]
PFN_xrSetInputDeviceActiveEXT = CFUNCTYPE(Result.ctype(), Session, Path, Path, Bool32)
PFN_xrSetInputDeviceStateBoolEXT = CFUNCTYPE(Result.ctype(), Session, Path, Path, Bool32)
PFN_xrSetInputDeviceStateFloatEXT = CFUNCTYPE(Result.ctype(), Session, Path, Path, c_float)
PFN_xrSetInputDeviceStateVector2fEXT = CFUNCTYPE(Result.ctype(), Session, Path, Path, Vector2f)
PFN_xrSetInputDeviceLocationEXT = CFUNCTYPE(Result.ctype(), Session, Path, Path, Space, Posef)
class SpatialGraphNodeSpaceCreateInfoMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SPATIAL_GRAPH_NODE_SPACE_CREATE_INFO_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("node_type", SpatialGraphNodeTypeMSFT.ctype()),
("node_id", (c_uint8 * 16)),
("pose", Posef),
]
PFN_xrCreateSpatialGraphNodeSpaceMSFT = CFUNCTYPE(Result.ctype(), Session, POINTER(SpatialGraphNodeSpaceCreateInfoMSFT), POINTER(Space))
class HandTrackerEXT_T(Structure):
pass
HandTrackerEXT = POINTER(HandTrackerEXT_T)
class SystemHandTrackingPropertiesEXT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SYSTEM_HAND_TRACKING_PROPERTIES_EXT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("supports_hand_tracking", Bool32),
]
class HandTrackerCreateInfoEXT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.HAND_TRACKER_CREATE_INFO_EXT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("hand", HandEXT.ctype()),
("hand_joint_set", HandJointSetEXT.ctype()),
]
class HandJointsLocateInfoEXT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.HAND_JOINTS_LOCATE_INFO_EXT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("base_space", Space),
("time", Time),
]
class HandJointLocationEXT(Structure):
_fields_ = [
("location_flags", SpaceLocationFlags),
("pose", Posef),
("radius", c_float),
]
class HandJointVelocityEXT(Structure):
_fields_ = [
("velocity_flags", SpaceVelocityFlags),
("linear_velocity", Vector3f),
("angular_velocity", Vector3f),
]
class HandJointLocationsEXT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.HAND_JOINT_LOCATIONS_EXT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("is_active", Bool32),
("joint_count", c_uint32),
("joint_locations", POINTER(HandJointLocationEXT)),
]
class HandJointVelocitiesEXT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.HAND_JOINT_VELOCITIES_EXT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("joint_count", c_uint32),
("joint_velocities", POINTER(HandJointVelocityEXT)),
]
PFN_xrCreateHandTrackerEXT = CFUNCTYPE(Result.ctype(), Session, POINTER(HandTrackerCreateInfoEXT), POINTER(HandTrackerEXT))
PFN_xrDestroyHandTrackerEXT = CFUNCTYPE(Result.ctype(), HandTrackerEXT)
PFN_xrLocateHandJointsEXT = CFUNCTYPE(Result.ctype(), HandTrackerEXT, POINTER(HandJointsLocateInfoEXT), POINTER(HandJointLocationsEXT))
class SystemHandTrackingMeshPropertiesMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SYSTEM_HAND_TRACKING_MESH_PROPERTIES_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("supports_hand_tracking_mesh", Bool32),
("max_hand_mesh_index_count", c_uint32),
("max_hand_mesh_vertex_count", c_uint32),
]
class HandMeshSpaceCreateInfoMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.HAND_MESH_SPACE_CREATE_INFO_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("hand_pose_type", HandPoseTypeMSFT.ctype()),
("pose_in_hand_mesh_space", Posef),
]
class HandMeshUpdateInfoMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.HAND_MESH_UPDATE_INFO_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("time", Time),
("hand_pose_type", HandPoseTypeMSFT.ctype()),
]
class HandMeshIndexBufferMSFT(Structure):
_fields_ = [
("index_buffer_key", c_uint32),
("index_capacity_input", c_uint32),
("index_count_output", c_uint32),
("indices", POINTER(c_uint32)),
]
class HandMeshVertexMSFT(Structure):
_fields_ = [
("position", Vector3f),
("normal", Vector3f),
]
class HandMeshVertexBufferMSFT(Structure):
_fields_ = [
("vertex_update_time", Time),
("vertex_capacity_input", c_uint32),
("vertex_count_output", c_uint32),
("vertices", POINTER(HandMeshVertexMSFT)),
]
class HandMeshMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.HAND_MESH_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("is_active", Bool32),
("index_buffer_changed", Bool32),
("vertex_buffer_changed", Bool32),
("index_buffer", HandMeshIndexBufferMSFT),
("vertex_buffer", HandMeshVertexBufferMSFT),
]
class HandPoseTypeInfoMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.HAND_POSE_TYPE_INFO_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("hand_pose_type", HandPoseTypeMSFT.ctype()),
]
PFN_xrCreateHandMeshSpaceMSFT = CFUNCTYPE(Result.ctype(), HandTrackerEXT, POINTER(HandMeshSpaceCreateInfoMSFT), POINTER(Space))
PFN_xrUpdateHandMeshMSFT = CFUNCTYPE(Result.ctype(), HandTrackerEXT, POINTER(HandMeshUpdateInfoMSFT), POINTER(HandMeshMSFT))
class SecondaryViewConfigurationSessionBeginInfoMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SECONDARY_VIEW_CONFIGURATION_SESSION_BEGIN_INFO_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("view_configuration_count", c_uint32),
("enabled_view_configuration_types", POINTER(c_int)),
]
class SecondaryViewConfigurationStateMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SECONDARY_VIEW_CONFIGURATION_STATE_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("view_configuration_type", ViewConfigurationType.ctype()),
("active", Bool32),
]
class SecondaryViewConfigurationFrameStateMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SECONDARY_VIEW_CONFIGURATION_FRAME_STATE_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("view_configuration_count", c_uint32),
("view_configuration_states", POINTER(SecondaryViewConfigurationStateMSFT)),
]
class SecondaryViewConfigurationLayerInfoMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SECONDARY_VIEW_CONFIGURATION_LAYER_INFO_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("view_configuration_type", ViewConfigurationType.ctype()),
("environment_blend_mode", EnvironmentBlendMode.ctype()),
("layer_count", c_uint32),
("layers", POINTER(POINTER(CompositionLayerBaseHeader))),
]
class SecondaryViewConfigurationFrameEndInfoMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SECONDARY_VIEW_CONFIGURATION_FRAME_END_INFO_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("view_configuration_count", c_uint32),
("view_configuration_layers_info", POINTER(SecondaryViewConfigurationLayerInfoMSFT)),
]
class SecondaryViewConfigurationSwapchainCreateInfoMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SECONDARY_VIEW_CONFIGURATION_SWAPCHAIN_CREATE_INFO_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("view_configuration_type", ViewConfigurationType.ctype()),
]
ControllerModelKeyMSFT = c_uint64
class ControllerModelKeyStateMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.CONTROLLER_MODEL_KEY_STATE_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("model_key", ControllerModelKeyMSFT),
]
class ControllerModelNodePropertiesMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.CONTROLLER_MODEL_NODE_PROPERTIES_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("parent_node_name", (c_char * 64)),
("node_name", (c_char * 64)),
]
class ControllerModelPropertiesMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.CONTROLLER_MODEL_PROPERTIES_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("node_capacity_input", c_uint32),
("node_count_output", c_uint32),
("node_properties", POINTER(ControllerModelNodePropertiesMSFT)),
]
class ControllerModelNodeStateMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.CONTROLLER_MODEL_NODE_STATE_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("node_pose", Posef),
]
class ControllerModelStateMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.CONTROLLER_MODEL_STATE_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("node_capacity_input", c_uint32),
("node_count_output", c_uint32),
("node_states", POINTER(ControllerModelNodeStateMSFT)),
]
PFN_xrGetControllerModelKeyMSFT = CFUNCTYPE(Result.ctype(), Session, Path, POINTER(ControllerModelKeyStateMSFT))
PFN_xrLoadControllerModelMSFT = CFUNCTYPE(Result.ctype(), Session, ControllerModelKeyMSFT, c_uint32, POINTER(c_uint32), POINTER(c_uint8))
PFN_xrGetControllerModelPropertiesMSFT = CFUNCTYPE(Result.ctype(), Session, ControllerModelKeyMSFT, POINTER(ControllerModelPropertiesMSFT))
PFN_xrGetControllerModelStateMSFT = CFUNCTYPE(Result.ctype(), Session, ControllerModelKeyMSFT, POINTER(ControllerModelStateMSFT))
class ViewConfigurationViewFovEPIC(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.VIEW_CONFIGURATION_VIEW_FOV_EPIC.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("recommended_fov", Fovf),
("max_mutable_fov", Fovf),
]
class CompositionLayerReprojectionInfoMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.COMPOSITION_LAYER_REPROJECTION_INFO_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("reprojection_mode", ReprojectionModeMSFT.ctype()),
]
class CompositionLayerReprojectionPlaneOverrideMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.COMPOSITION_LAYER_REPROJECTION_PLANE_OVERRIDE_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("position", Vector3f),
("normal", Vector3f),
("velocity", Vector3f),
]
PFN_xrEnumerateReprojectionModesMSFT = CFUNCTYPE(Result.ctype(), Instance, SystemId, ViewConfigurationType.ctype(), c_uint32, POINTER(c_uint32), POINTER(ReprojectionModeMSFT.ctype()))
class SwapchainStateBaseHeaderFB(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SWAPCHAIN_STATE_BASE_HEADER_FB.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
]
PFN_xrUpdateSwapchainFB = CFUNCTYPE(Result.ctype(), Swapchain, POINTER(SwapchainStateBaseHeaderFB))
PFN_xrGetSwapchainStateFB = CFUNCTYPE(Result.ctype(), Swapchain, POINTER(SwapchainStateBaseHeaderFB))
CompositionLayerSecureContentFlagsFB = Flags64
class CompositionLayerSecureContentFB(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.COMPOSITION_LAYER_SECURE_CONTENT_FB.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("flags", CompositionLayerSecureContentFlagsFB),
]
class InteractionProfileAnalogThresholdVALVE(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.INTERACTION_PROFILE_ANALOG_THRESHOLD_VALVE.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("action", Action),
("binding", Path),
("on_threshold", c_float),
("off_threshold", c_float),
("on_haptic", POINTER(HapticBaseHeader)),
("off_haptic", POINTER(HapticBaseHeader)),
]
class HandJointsMotionRangeInfoEXT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.HAND_JOINTS_MOTION_RANGE_INFO_EXT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("hand_joints_motion_range", HandJointsMotionRangeEXT.ctype()),
]
class SceneObserverMSFT_T(Structure):
pass
SceneObserverMSFT = POINTER(SceneObserverMSFT_T)
class SceneMSFT_T(Structure):
pass
SceneMSFT = POINTER(SceneMSFT_T)
class UuidMSFT(Structure):
_fields_ = [
("bytes", (c_uint8 * 16)),
]
class SceneObserverCreateInfoMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SCENE_OBSERVER_CREATE_INFO_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
]
class SceneCreateInfoMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SCENE_CREATE_INFO_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
]
class SceneSphereBoundMSFT(Structure):
_fields_ = [
("center", Vector3f),
("radius", c_float),
]
class SceneOrientedBoxBoundMSFT(Structure):
_fields_ = [
("pose", Posef),
("extents", Vector3f),
]
class SceneFrustumBoundMSFT(Structure):
_fields_ = [
("pose", Posef),
("fov", Fovf),
("far_distance", c_float),
]
class SceneBoundsMSFT(Structure):
_fields_ = [
("space", Space),
("time", Time),
("sphere_count", c_uint32),
("spheres", POINTER(SceneSphereBoundMSFT)),
("box_count", c_uint32),
("boxes", POINTER(SceneOrientedBoxBoundMSFT)),
("frustum_count", c_uint32),
("frustums", POINTER(SceneFrustumBoundMSFT)),
]
class NewSceneComputeInfoMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.NEW_SCENE_COMPUTE_INFO_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("requested_feature_count", c_uint32),
("requested_features", POINTER(c_int)),
("consistency", SceneComputeConsistencyMSFT.ctype()),
("bounds", SceneBoundsMSFT),
]
class VisualMeshComputeLodInfoMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.VISUAL_MESH_COMPUTE_LOD_INFO_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("lod", MeshComputeLodMSFT.ctype()),
]
class SceneComponentMSFT(Structure):
_fields_ = [
("component_type", SceneComponentTypeMSFT.ctype()),
("id", UuidMSFT),
("parent_id", UuidMSFT),
("update_time", Time),
]
class SceneComponentsMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SCENE_COMPONENTS_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("component_capacity_input", c_uint32),
("component_count_output", c_uint32),
("components", POINTER(SceneComponentMSFT)),
]
class SceneComponentsGetInfoMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SCENE_COMPONENTS_GET_INFO_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("component_type", SceneComponentTypeMSFT.ctype()),
]
class SceneComponentLocationMSFT(Structure):
_fields_ = [
("flags", SpaceLocationFlags),
("pose", Posef),
]
class SceneComponentLocationsMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SCENE_COMPONENT_LOCATIONS_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("location_count", c_uint32),
("locations", POINTER(SceneComponentLocationMSFT)),
]
class SceneComponentsLocateInfoMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SCENE_COMPONENTS_LOCATE_INFO_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("base_space", Space),
("time", Time),
("component_id_count", c_uint32),
("component_ids", POINTER(UuidMSFT)),
]
class SceneObjectMSFT(Structure):
_fields_ = [
("object_type", SceneObjectTypeMSFT.ctype()),
]
class SceneObjectsMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SCENE_OBJECTS_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("scene_object_count", c_uint32),
("scene_objects", POINTER(SceneObjectMSFT)),
]
class SceneComponentParentFilterInfoMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SCENE_COMPONENT_PARENT_FILTER_INFO_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("parent_id", UuidMSFT),
]
class SceneObjectTypesFilterInfoMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SCENE_OBJECT_TYPES_FILTER_INFO_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("object_type_count", c_uint32),
("object_types", POINTER(c_int)),
]
class ScenePlaneMSFT(Structure):
_fields_ = [
("alignment", ScenePlaneAlignmentTypeMSFT.ctype()),
("size", Extent2Df),
("mesh_buffer_id", c_uint64),
("supports_indices_uint16", Bool32),
]
class ScenePlanesMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SCENE_PLANES_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("scene_plane_count", c_uint32),
("scene_planes", POINTER(ScenePlaneMSFT)),
]
class ScenePlaneAlignmentFilterInfoMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SCENE_PLANE_ALIGNMENT_FILTER_INFO_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("alignment_count", c_uint32),
("alignments", POINTER(c_int)),
]
class SceneMeshMSFT(Structure):
_fields_ = [
("mesh_buffer_id", c_uint64),
("supports_indices_uint16", Bool32),
]
class SceneMeshesMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SCENE_MESHES_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("scene_mesh_count", c_uint32),
("scene_meshes", POINTER(SceneMeshMSFT)),
]
class SceneMeshBuffersGetInfoMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SCENE_MESH_BUFFERS_GET_INFO_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("mesh_buffer_id", c_uint64),
]
class SceneMeshBuffersMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SCENE_MESH_BUFFERS_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
]
class SceneMeshVertexBufferMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SCENE_MESH_VERTEX_BUFFER_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("vertex_capacity_input", c_uint32),
("vertex_count_output", c_uint32),
("vertices", POINTER(Vector3f)),
]
class SceneMeshIndicesUint32MSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SCENE_MESH_INDICES_UINT32_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("index_capacity_input", c_uint32),
("index_count_output", c_uint32),
("indices", POINTER(c_uint32)),
]
class SceneMeshIndicesUint16MSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SCENE_MESH_INDICES_UINT16_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("index_capacity_input", c_uint32),
("index_count_output", c_uint32),
("indices", POINTER(c_uint16)),
]
PFN_xrEnumerateSceneComputeFeaturesMSFT = CFUNCTYPE(Result.ctype(), Instance, SystemId, c_uint32, POINTER(c_uint32), POINTER(SceneComputeFeatureMSFT.ctype()))
PFN_xrCreateSceneObserverMSFT = CFUNCTYPE(Result.ctype(), Session, POINTER(SceneObserverCreateInfoMSFT), POINTER(SceneObserverMSFT))
PFN_xrDestroySceneObserverMSFT = CFUNCTYPE(Result.ctype(), SceneObserverMSFT)
PFN_xrCreateSceneMSFT = CFUNCTYPE(Result.ctype(), SceneObserverMSFT, POINTER(SceneCreateInfoMSFT), POINTER(SceneMSFT))
PFN_xrDestroySceneMSFT = CFUNCTYPE(Result.ctype(), SceneMSFT)
PFN_xrComputeNewSceneMSFT = CFUNCTYPE(Result.ctype(), SceneObserverMSFT, POINTER(NewSceneComputeInfoMSFT))
PFN_xrGetSceneComputeStateMSFT = CFUNCTYPE(Result.ctype(), SceneObserverMSFT, POINTER(SceneComputeStateMSFT.ctype()))
PFN_xrGetSceneComponentsMSFT = CFUNCTYPE(Result.ctype(), SceneMSFT, POINTER(SceneComponentsGetInfoMSFT), POINTER(SceneComponentsMSFT))
PFN_xrLocateSceneComponentsMSFT = CFUNCTYPE(Result.ctype(), SceneMSFT, POINTER(SceneComponentsLocateInfoMSFT), POINTER(SceneComponentLocationsMSFT))
PFN_xrGetSceneMeshBuffersMSFT = CFUNCTYPE(Result.ctype(), SceneMSFT, POINTER(SceneMeshBuffersGetInfoMSFT), POINTER(SceneMeshBuffersMSFT))
class SerializedSceneFragmentDataGetInfoMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SERIALIZED_SCENE_FRAGMENT_DATA_GET_INFO_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("scene_fragment_id", UuidMSFT),
]
class DeserializeSceneFragmentMSFT(Structure):
_fields_ = [
("buffer_size", c_uint32),
("buffer", POINTER(c_uint8)),
]
class SceneDeserializeInfoMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SCENE_DESERIALIZE_INFO_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("fragment_count", c_uint32),
("fragments", POINTER(DeserializeSceneFragmentMSFT)),
]
PFN_xrDeserializeSceneMSFT = CFUNCTYPE(Result.ctype(), SceneObserverMSFT, POINTER(SceneDeserializeInfoMSFT))
PFN_xrGetSerializedSceneFragmentDataMSFT = CFUNCTYPE(Result.ctype(), SceneMSFT, POINTER(SerializedSceneFragmentDataGetInfoMSFT), c_uint32, POINTER(c_uint32), POINTER(c_uint8))
class EventDataDisplayRefreshRateChangedFB(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.EVENT_DATA_DISPLAY_REFRESH_RATE_CHANGED_FB.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("from_display_refresh_rate", c_float),
("to_display_refresh_rate", c_float),
]
PFN_xrEnumerateDisplayRefreshRatesFB = CFUNCTYPE(Result.ctype(), Session, c_uint32, POINTER(c_uint32), POINTER(c_float))
PFN_xrGetDisplayRefreshRateFB = CFUNCTYPE(Result.ctype(), Session, POINTER(c_float))
PFN_xrRequestDisplayRefreshRateFB = CFUNCTYPE(Result.ctype(), Session, c_float)
class SystemColorSpacePropertiesFB(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SYSTEM_COLOR_SPACE_PROPERTIES_FB.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("color_space", ColorSpaceFB.ctype()),
]
PFN_xrEnumerateColorSpacesFB = CFUNCTYPE(Result.ctype(), Session, c_uint32, POINTER(c_uint32), POINTER(ColorSpaceFB.ctype()))
PFN_xrSetColorSpaceFB = CFUNCTYPE(Result.ctype(), Session, c_int)
class Vector4sFB(Structure):
_fields_ = [
("x", c_int16),
("y", c_int16),
("z", c_int16),
("w", c_int16),
]
class HandTrackingMeshFB(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.HAND_TRACKING_MESH_FB.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("joint_capacity_input", c_uint32),
("joint_count_output", c_uint32),
("joint_bind_poses", POINTER(Posef)),
("joint_radii", POINTER(c_float)),
("joint_parents", POINTER(HandJointEXT.ctype())),
("vertex_capacity_input", c_uint32),
("vertex_count_output", c_uint32),
("vertex_positions", POINTER(Vector3f)),
("vertex_normals", POINTER(Vector3f)),
("vertex_uvs", POINTER(Vector2f)),
("vertex_blend_indices", POINTER(Vector4sFB)),
("vertex_blend_weights", POINTER(Vector4f)),
("index_capacity_input", c_uint32),
("index_count_output", c_uint32),
("indices", POINTER(c_int16)),
]
class HandTrackingScaleFB(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.HAND_TRACKING_SCALE_FB.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("sensor_output", c_float),
("current_output", c_float),
("override_hand_scale", Bool32),
("override_value_input", c_float),
]
PFN_xrGetHandMeshFB = CFUNCTYPE(Result.ctype(), HandTrackerEXT, POINTER(HandTrackingMeshFB))
HandTrackingAimFlagsFB = Flags64
class HandTrackingAimStateFB(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.HAND_TRACKING_AIM_STATE_FB.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("status", HandTrackingAimFlagsFB),
("aim_pose", Posef),
("pinch_strength_index", c_float),
("pinch_strength_middle", c_float),
("pinch_strength_ring", c_float),
("pinch_strength_little", c_float),
]
class HandCapsuleFB(Structure):
_fields_ = [
("points", (Vector3f * 2)),
("radius", c_float),
("joint", HandJointEXT.ctype()),
]
class HandTrackingCapsulesStateFB(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.HAND_TRACKING_CAPSULES_STATE_FB.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("capsules", (HandCapsuleFB * 19)),
]
class FoveationProfileFB_T(Structure):
pass
FoveationProfileFB = POINTER(FoveationProfileFB_T)
SwapchainCreateFoveationFlagsFB = Flags64
SwapchainStateFoveationFlagsFB = Flags64
class FoveationProfileCreateInfoFB(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.FOVEATION_PROFILE_CREATE_INFO_FB.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
]
class SwapchainCreateInfoFoveationFB(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SWAPCHAIN_CREATE_INFO_FOVEATION_FB.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("flags", SwapchainCreateFoveationFlagsFB),
]
class SwapchainStateFoveationFB(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SWAPCHAIN_STATE_FOVEATION_FB.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("flags", SwapchainStateFoveationFlagsFB),
("profile", FoveationProfileFB),
]
PFN_xrCreateFoveationProfileFB = CFUNCTYPE(Result.ctype(), Session, POINTER(FoveationProfileCreateInfoFB), POINTER(FoveationProfileFB))
PFN_xrDestroyFoveationProfileFB = CFUNCTYPE(Result.ctype(), FoveationProfileFB)
class FoveationLevelProfileCreateInfoFB(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.FOVEATION_LEVEL_PROFILE_CREATE_INFO_FB.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("level", FoveationLevelFB.ctype()),
("vertical_offset", c_float),
("dynamic", FoveationDynamicFB.ctype()),
]
class ViewLocateFoveatedRenderingVARJO(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.VIEW_LOCATE_FOVEATED_RENDERING_VARJO.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("foveated_rendering_active", Bool32),
]
class FoveatedViewConfigurationViewVARJO(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.FOVEATED_VIEW_CONFIGURATION_VIEW_VARJO.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("foveated_rendering_active", Bool32),
]
class SystemFoveatedRenderingPropertiesVARJO(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SYSTEM_FOVEATED_RENDERING_PROPERTIES_VARJO.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("supports_foveated_rendering", Bool32),
]
class CompositionLayerDepthTestVARJO(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.COMPOSITION_LAYER_DEPTH_TEST_VARJO.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("depth_test_range_near_z", c_float),
("depth_test_range_far_z", c_float),
]
PFN_xrSetEnvironmentDepthEstimationVARJO = CFUNCTYPE(Result.ctype(), Session, Bool32)
class SpatialAnchorStoreConnectionMSFT_T(Structure):
pass
SpatialAnchorStoreConnectionMSFT = POINTER(SpatialAnchorStoreConnectionMSFT_T)
class SpatialAnchorPersistenceNameMSFT(Structure):
_fields_ = [
("name", (c_char * 256)),
]
class SpatialAnchorPersistenceInfoMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SPATIAL_ANCHOR_PERSISTENCE_INFO_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("spatial_anchor_persistence_name", SpatialAnchorPersistenceNameMSFT),
("spatial_anchor", SpatialAnchorMSFT),
]
class SpatialAnchorFromPersistedAnchorCreateInfoMSFT(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SPATIAL_ANCHOR_FROM_PERSISTED_ANCHOR_CREATE_INFO_MSFT.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("spatial_anchor_store", SpatialAnchorStoreConnectionMSFT),
("spatial_anchor_persistence_name", SpatialAnchorPersistenceNameMSFT),
]
PFN_xrCreateSpatialAnchorStoreConnectionMSFT = CFUNCTYPE(Result.ctype(), Session, POINTER(SpatialAnchorStoreConnectionMSFT))
PFN_xrDestroySpatialAnchorStoreConnectionMSFT = CFUNCTYPE(Result.ctype(), SpatialAnchorStoreConnectionMSFT)
PFN_xrPersistSpatialAnchorMSFT = CFUNCTYPE(Result.ctype(), SpatialAnchorStoreConnectionMSFT, POINTER(SpatialAnchorPersistenceInfoMSFT))
PFN_xrEnumeratePersistedSpatialAnchorNamesMSFT = CFUNCTYPE(Result.ctype(), SpatialAnchorStoreConnectionMSFT, c_uint32, POINTER(c_uint32), POINTER(SpatialAnchorPersistenceNameMSFT))
PFN_xrCreateSpatialAnchorFromPersistedNameMSFT = CFUNCTYPE(Result.ctype(), Session, POINTER(SpatialAnchorFromPersistedAnchorCreateInfoMSFT), POINTER(SpatialAnchorMSFT))
PFN_xrUnpersistSpatialAnchorMSFT = CFUNCTYPE(Result.ctype(), SpatialAnchorStoreConnectionMSFT, POINTER(SpatialAnchorPersistenceNameMSFT))
PFN_xrClearSpatialAnchorStoreMSFT = CFUNCTYPE(Result.ctype(), SpatialAnchorStoreConnectionMSFT)
CompositionLayerSpaceWarpInfoFlagsFB = Flags64
class CompositionLayerSpaceWarpInfoFB(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.COMPOSITION_LAYER_SPACE_WARP_INFO_FB.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("layer_flags", CompositionLayerSpaceWarpInfoFlagsFB),
("motion_vector_sub_image", SwapchainSubImage),
("app_space_delta_pose", Posef),
("depth_sub_image", SwapchainSubImage),
("min_depth", c_float),
("max_depth", c_float),
("near_z", c_float),
("far_z", c_float),
]
class SystemSpaceWarpPropertiesFB(Structure):
def __init__(self, *args, **kwargs):
super().__init__(
StructureType.SYSTEM_SPACE_WARP_PROPERTIES_FB.value,
*args, **kwargs,
)
_fields_ = [
("type", StructureType.ctype()),
("next", c_void_p),
("recommended_motion_vector_image_rect_width", c_uint32),
("recommended_motion_vector_image_rect_height", c_uint32),
]
__all__ = [
"Version",
"Flags64",
"SystemId",
"Bool32",
"Path",
"Time",
"Duration",
"Instance_T",
"Instance",
"Session_T",
"Session",
"Space_T",
"Space",
"Action_T",
"Action",
"Swapchain_T",
"Swapchain",
"ActionSet_T",
"ActionSet",
"InstanceCreateFlags",
"SessionCreateFlags",
"SpaceVelocityFlags",
"SpaceLocationFlags",
"SwapchainCreateFlags",
"SwapchainUsageFlags",
"CompositionLayerFlags",
"ViewStateFlags",
"InputSourceLocalizedNameFlags",
"PFN_xrVoidFunction",
"ApiLayerProperties",
"ExtensionProperties",
"ApplicationInfo",
"InstanceCreateInfo",
"InstanceProperties",
"EventDataBuffer",
"SystemGetInfo",
"SystemGraphicsProperties",
"SystemTrackingProperties",
"SystemProperties",
"SessionCreateInfo",
"Vector3f",
"SpaceVelocity",
"Quaternionf",
"Posef",
"ReferenceSpaceCreateInfo",
"Extent2Df",
"ActionSpaceCreateInfo",
"SpaceLocation",
"ViewConfigurationProperties",
"ViewConfigurationView",
"SwapchainCreateInfo",
"SwapchainImageBaseHeader",
"SwapchainImageAcquireInfo",
"SwapchainImageWaitInfo",
"SwapchainImageReleaseInfo",
"SessionBeginInfo",
"FrameWaitInfo",
"FrameState",
"FrameBeginInfo",
"CompositionLayerBaseHeader",
"FrameEndInfo",
"ViewLocateInfo",
"ViewState",
"Fovf",
"View",
"ActionSetCreateInfo",
"ActionCreateInfo",
"ActionSuggestedBinding",
"InteractionProfileSuggestedBinding",
"SessionActionSetsAttachInfo",
"InteractionProfileState",
"ActionStateGetInfo",
"ActionStateBoolean",
"ActionStateFloat",
"Vector2f",
"ActionStateVector2f",
"ActionStatePose",
"ActiveActionSet",
"ActionsSyncInfo",
"BoundSourcesForActionEnumerateInfo",
"InputSourceLocalizedNameGetInfo",
"HapticActionInfo",
"HapticBaseHeader",
"BaseInStructure",
"BaseOutStructure",
"Offset2Di",
"Extent2Di",
"Rect2Di",
"SwapchainSubImage",
"CompositionLayerProjectionView",
"CompositionLayerProjection",
"CompositionLayerQuad",
"EventDataBaseHeader",
"EventDataEventsLost",
"EventDataInstanceLossPending",
"EventDataSessionStateChanged",
"EventDataReferenceSpaceChangePending",
"EventDataInteractionProfileChanged",
"HapticVibration",
"Offset2Df",
"Rect2Df",
"Vector4f",
"Color4f",
"PFN_xrGetInstanceProcAddr",
"PFN_xrEnumerateApiLayerProperties",
"PFN_xrEnumerateInstanceExtensionProperties",
"PFN_xrCreateInstance",
"PFN_xrDestroyInstance",
"PFN_xrGetInstanceProperties",
"PFN_xrPollEvent",
"PFN_xrResultToString",
"PFN_xrStructureTypeToString",
"PFN_xrGetSystem",
"PFN_xrGetSystemProperties",
"PFN_xrEnumerateEnvironmentBlendModes",
"PFN_xrCreateSession",
"PFN_xrDestroySession",
"PFN_xrEnumerateReferenceSpaces",
"PFN_xrCreateReferenceSpace",
"PFN_xrGetReferenceSpaceBoundsRect",
"PFN_xrCreateActionSpace",
"PFN_xrLocateSpace",
"PFN_xrDestroySpace",
"PFN_xrEnumerateViewConfigurations",
"PFN_xrGetViewConfigurationProperties",
"PFN_xrEnumerateViewConfigurationViews",
"PFN_xrEnumerateSwapchainFormats",
"PFN_xrCreateSwapchain",
"PFN_xrDestroySwapchain",
"PFN_xrEnumerateSwapchainImages",
"PFN_xrAcquireSwapchainImage",
"PFN_xrWaitSwapchainImage",
"PFN_xrReleaseSwapchainImage",
"PFN_xrBeginSession",
"PFN_xrEndSession",
"PFN_xrRequestExitSession",
"PFN_xrWaitFrame",
"PFN_xrBeginFrame",
"PFN_xrEndFrame",
"PFN_xrLocateViews",
"PFN_xrStringToPath",
"PFN_xrPathToString",
"PFN_xrCreateActionSet",
"PFN_xrDestroyActionSet",
"PFN_xrCreateAction",
"PFN_xrDestroyAction",
"PFN_xrSuggestInteractionProfileBindings",
"PFN_xrAttachSessionActionSets",
"PFN_xrGetCurrentInteractionProfile",
"PFN_xrGetActionStateBoolean",
"PFN_xrGetActionStateFloat",
"PFN_xrGetActionStateVector2f",
"PFN_xrGetActionStatePose",
"PFN_xrSyncActions",
"PFN_xrEnumerateBoundSourcesForAction",
"PFN_xrGetInputSourceLocalizedName",
"PFN_xrApplyHapticFeedback",
"PFN_xrStopHapticFeedback",
"CompositionLayerCubeKHR",
"CompositionLayerDepthInfoKHR",
"CompositionLayerCylinderKHR",
"CompositionLayerEquirectKHR",
"VisibilityMaskKHR",
"EventDataVisibilityMaskChangedKHR",
"PFN_xrGetVisibilityMaskKHR",
"CompositionLayerColorScaleBiasKHR",
"LoaderInitInfoBaseHeaderKHR",
"PFN_xrInitializeLoaderKHR",
"CompositionLayerEquirect2KHR",
"BindingModificationBaseHeaderKHR",
"BindingModificationsKHR",
"EventDataPerfSettingsEXT",
"PFN_xrPerfSettingsSetPerformanceLevelEXT",
"PFN_xrThermalGetTemperatureTrendEXT",
"DebugUtilsMessengerEXT_T",
"DebugUtilsMessengerEXT",
"DebugUtilsMessageSeverityFlagsEXT",
"DebugUtilsMessageTypeFlagsEXT",
"DebugUtilsObjectNameInfoEXT",
"DebugUtilsLabelEXT",
"DebugUtilsMessengerCallbackDataEXT",
"PFN_xrDebugUtilsMessengerCallbackEXT",
"DebugUtilsMessengerCreateInfoEXT",
"PFN_xrSetDebugUtilsObjectNameEXT",
"PFN_xrCreateDebugUtilsMessengerEXT",
"PFN_xrDestroyDebugUtilsMessengerEXT",
"PFN_xrSubmitDebugUtilsMessageEXT",
"PFN_xrSessionBeginDebugUtilsLabelRegionEXT",
"PFN_xrSessionEndDebugUtilsLabelRegionEXT",
"PFN_xrSessionInsertDebugUtilsLabelEXT",
"SystemEyeGazeInteractionPropertiesEXT",
"EyeGazeSampleTimeEXT",
"OverlaySessionCreateFlagsEXTX",
"OverlayMainSessionFlagsEXTX",
"SessionCreateInfoOverlayEXTX",
"EventDataMainSessionVisibilityChangedEXTX",
"SpatialAnchorMSFT_T",
"SpatialAnchorMSFT",
"SpatialAnchorCreateInfoMSFT",
"SpatialAnchorSpaceCreateInfoMSFT",
"PFN_xrCreateSpatialAnchorMSFT",
"PFN_xrCreateSpatialAnchorSpaceMSFT",
"PFN_xrDestroySpatialAnchorMSFT",
"CompositionLayerImageLayoutFlagsFB",
"CompositionLayerImageLayoutFB",
"CompositionLayerAlphaBlendFB",
"ViewConfigurationDepthRangeEXT",
"PFN_xrSetInputDeviceActiveEXT",
"PFN_xrSetInputDeviceStateBoolEXT",
"PFN_xrSetInputDeviceStateFloatEXT",
"PFN_xrSetInputDeviceStateVector2fEXT",
"PFN_xrSetInputDeviceLocationEXT",
"SpatialGraphNodeSpaceCreateInfoMSFT",
"PFN_xrCreateSpatialGraphNodeSpaceMSFT",
"HandTrackerEXT_T",
"HandTrackerEXT",
"SystemHandTrackingPropertiesEXT",
"HandTrackerCreateInfoEXT",
"HandJointsLocateInfoEXT",
"HandJointLocationEXT",
"HandJointVelocityEXT",
"HandJointLocationsEXT",
"HandJointVelocitiesEXT",
"PFN_xrCreateHandTrackerEXT",
"PFN_xrDestroyHandTrackerEXT",
"PFN_xrLocateHandJointsEXT",
"SystemHandTrackingMeshPropertiesMSFT",
"HandMeshSpaceCreateInfoMSFT",
"HandMeshUpdateInfoMSFT",
"HandMeshIndexBufferMSFT",
"HandMeshVertexMSFT",
"HandMeshVertexBufferMSFT",
"HandMeshMSFT",
"HandPoseTypeInfoMSFT",
"PFN_xrCreateHandMeshSpaceMSFT",
"PFN_xrUpdateHandMeshMSFT",
"SecondaryViewConfigurationSessionBeginInfoMSFT",
"SecondaryViewConfigurationStateMSFT",
"SecondaryViewConfigurationFrameStateMSFT",
"SecondaryViewConfigurationLayerInfoMSFT",
"SecondaryViewConfigurationFrameEndInfoMSFT",
"SecondaryViewConfigurationSwapchainCreateInfoMSFT",
"ControllerModelKeyMSFT",
"ControllerModelKeyStateMSFT",
"ControllerModelNodePropertiesMSFT",
"ControllerModelPropertiesMSFT",
"ControllerModelNodeStateMSFT",
"ControllerModelStateMSFT",
"PFN_xrGetControllerModelKeyMSFT",
"PFN_xrLoadControllerModelMSFT",
"PFN_xrGetControllerModelPropertiesMSFT",
"PFN_xrGetControllerModelStateMSFT",
"ViewConfigurationViewFovEPIC",
"CompositionLayerReprojectionInfoMSFT",
"CompositionLayerReprojectionPlaneOverrideMSFT",
"PFN_xrEnumerateReprojectionModesMSFT",
"SwapchainStateBaseHeaderFB",
"PFN_xrUpdateSwapchainFB",
"PFN_xrGetSwapchainStateFB",
"CompositionLayerSecureContentFlagsFB",
"CompositionLayerSecureContentFB",
"InteractionProfileAnalogThresholdVALVE",
"HandJointsMotionRangeInfoEXT",
"SceneObserverMSFT_T",
"SceneObserverMSFT",
"SceneMSFT_T",
"SceneMSFT",
"UuidMSFT",
"SceneObserverCreateInfoMSFT",
"SceneCreateInfoMSFT",
"SceneSphereBoundMSFT",
"SceneOrientedBoxBoundMSFT",
"SceneFrustumBoundMSFT",
"SceneBoundsMSFT",
"NewSceneComputeInfoMSFT",
"VisualMeshComputeLodInfoMSFT",
"SceneComponentMSFT",
"SceneComponentsMSFT",
"SceneComponentsGetInfoMSFT",
"SceneComponentLocationMSFT",
"SceneComponentLocationsMSFT",
"SceneComponentsLocateInfoMSFT",
"SceneObjectMSFT",
"SceneObjectsMSFT",
"SceneComponentParentFilterInfoMSFT",
"SceneObjectTypesFilterInfoMSFT",
"ScenePlaneMSFT",
"ScenePlanesMSFT",
"ScenePlaneAlignmentFilterInfoMSFT",
"SceneMeshMSFT",
"SceneMeshesMSFT",
"SceneMeshBuffersGetInfoMSFT",
"SceneMeshBuffersMSFT",
"SceneMeshVertexBufferMSFT",
"SceneMeshIndicesUint32MSFT",
"SceneMeshIndicesUint16MSFT",
"PFN_xrEnumerateSceneComputeFeaturesMSFT",
"PFN_xrCreateSceneObserverMSFT",
"PFN_xrDestroySceneObserverMSFT",
"PFN_xrCreateSceneMSFT",
"PFN_xrDestroySceneMSFT",
"PFN_xrComputeNewSceneMSFT",
"PFN_xrGetSceneComputeStateMSFT",
"PFN_xrGetSceneComponentsMSFT",
"PFN_xrLocateSceneComponentsMSFT",
"PFN_xrGetSceneMeshBuffersMSFT",
"SerializedSceneFragmentDataGetInfoMSFT",
"DeserializeSceneFragmentMSFT",
"SceneDeserializeInfoMSFT",
"PFN_xrDeserializeSceneMSFT",
"PFN_xrGetSerializedSceneFragmentDataMSFT",
"EventDataDisplayRefreshRateChangedFB",
"PFN_xrEnumerateDisplayRefreshRatesFB",
"PFN_xrGetDisplayRefreshRateFB",
"PFN_xrRequestDisplayRefreshRateFB",
"SystemColorSpacePropertiesFB",
"PFN_xrEnumerateColorSpacesFB",
"PFN_xrSetColorSpaceFB",
"Vector4sFB",
"HandTrackingMeshFB",
"HandTrackingScaleFB",
"PFN_xrGetHandMeshFB",
"HandTrackingAimFlagsFB",
"HandTrackingAimStateFB",
"HandCapsuleFB",
"HandTrackingCapsulesStateFB",
"FoveationProfileFB_T",
"FoveationProfileFB",
"SwapchainCreateFoveationFlagsFB",
"SwapchainStateFoveationFlagsFB",
"FoveationProfileCreateInfoFB",
"SwapchainCreateInfoFoveationFB",
"SwapchainStateFoveationFB",
"PFN_xrCreateFoveationProfileFB",
"PFN_xrDestroyFoveationProfileFB",
"FoveationLevelProfileCreateInfoFB",
"ViewLocateFoveatedRenderingVARJO",
"FoveatedViewConfigurationViewVARJO",
"SystemFoveatedRenderingPropertiesVARJO",
"CompositionLayerDepthTestVARJO",
"PFN_xrSetEnvironmentDepthEstimationVARJO",
"SpatialAnchorStoreConnectionMSFT_T",
"SpatialAnchorStoreConnectionMSFT",
"SpatialAnchorPersistenceNameMSFT",
"SpatialAnchorPersistenceInfoMSFT",
"SpatialAnchorFromPersistedAnchorCreateInfoMSFT",
"PFN_xrCreateSpatialAnchorStoreConnectionMSFT",
"PFN_xrDestroySpatialAnchorStoreConnectionMSFT",
"PFN_xrPersistSpatialAnchorMSFT",
"PFN_xrEnumeratePersistedSpatialAnchorNamesMSFT",
"PFN_xrCreateSpatialAnchorFromPersistedNameMSFT",
"PFN_xrUnpersistSpatialAnchorMSFT",
"PFN_xrClearSpatialAnchorStoreMSFT",
"CompositionLayerSpaceWarpInfoFlagsFB",
"CompositionLayerSpaceWarpInfoFB",
"SystemSpaceWarpPropertiesFB",
]
``` |
{
"source": "jherland/asyncjobs",
"score": 3
} |
#### File: asyncjobs/asyncjobs/basic.py
```python
import asyncio
import concurrent.futures
import logging
import time
logger = logging.getLogger(__name__)
class Context:
"""Helper class instantiated per job and passed to the job coroutine.
This is the interface available to the job coroutine for communicating
with the Scheduler, and other parts of the surrounding scheduler
environment.
"""
def __init__(self, name, scheduler):
self.name = name
self.deps = None # set by the coroutine wrapper in Scheduler.add_job()
self._scheduler = scheduler
def event(self, event, **kwargs):
"""Emit a scheduling event from this job."""
self._scheduler.event(event, job=self.name, **kwargs)
async def results(self, *job_names):
"""Wait until the given jobs are finished, and return their results.
Returns a dictionary mapping job names to their results.
Raise KeyError if any of the given jobs have not been added to the
scheduler. If any of the given jobs fail (either by raising an
exception or by being cancelled) then raise CancelledError here to
cancel this job as a consequence.
"""
assert self._scheduler.running
tasks = [self._scheduler.tasks[n] for n in job_names]
pending = [n for n, t in zip(job_names, tasks) if not t.done()]
self.event('await results', jobs=list(job_names), pending=pending)
if pending:
logger.debug(f'{self.name} waiting for {", ".join(pending)}…')
try:
results = dict(zip(job_names, await asyncio.gather(*tasks)))
except Exception:
logger.info(f'{self.name} cancelled due to failed dependency')
raise asyncio.CancelledError
self.event('awaited results')
logger.debug(f'{self.name} returning {results} from .results()')
return results
def add_job(self, name, coro, deps=None):
"""Add a job to be run.
Schedule the job to start immediately. Raise ValueError if a job with
the same name has already been added.
"""
self._scheduler.add_job(name, coro, deps)
class Scheduler:
"""Async job scheduler. Run job coroutines as concurrent tasks.
Jobs are added by passing their names and coroutines to .add_job(). The
coroutines will be scheduled for execution when .run() is called. While
running, jobs may use the Context object passed to the coroutine to await
each other's results or spawn new jobs. When all jobs are completed
(successfully or otherwise), .run() will return a dictionary with all the
job results.
"""
def __init__(self, *, event_handler=None, context_class=Context):
self.jobs = {} # job name -> coroutine
self.tasks = {} # job name -> Task object, aka. (future) job result
self.running = False
self.event_handler = event_handler
assert issubclass(context_class, Context)
self.context_class = context_class
def __contains__(self, job_name):
return job_name in self.jobs
def event(self, event, **kwargs):
if self.event_handler is None:
return
d = {'timestamp': time.time(), 'event': event}
d.update(kwargs)
logger.debug(f'Posting event: {d}')
self.event_handler(d)
@staticmethod
def _fate(future):
"""Return a word describing the state of the given future."""
if not future.done():
return 'unfinished'
elif future.cancelled():
return 'cancelled'
elif future.exception() is not None:
return 'failed'
else:
return 'success'
def _start_job(self, name):
assert name in self.jobs
assert name not in self.tasks
ctx = self.context_class(name, self)
task = asyncio.create_task(self.jobs[name](ctx), name=name)
self.tasks[name] = task
self.event('start', job=name)
task.add_done_callback(
lambda task: self.event('finish', job=name, fate=self._fate(task))
)
def add_job(self, name, coro, deps=None):
"""Add a job (aka. named coroutine) to be run.
If 'deps' is given, it must be a set of names of other jobs that will
be awaited before coro is started. The results of those jobs is made
available in the ctx.deps dict.
If we're already running (i.e. inside .run()) schedule the job
immediately, otherwise the job will be scheduled when .run() is called.
Raise ValueError if a job with the same name has already been added.
"""
if name in self.jobs:
raise ValueError(f'Cannot add job named {name}. Already added!')
logger.debug(f'Adding job named {name} with deps={deps!r}')
if deps is not None:
async def deps_before_coro(ctx, wrapped_coro=coro):
assert ctx.deps is None
logger.debug(f'Awaiting dependencies: {deps}…')
ctx.deps = await ctx.results(*deps)
return await wrapped_coro(ctx)
coro = deps_before_coro
self.jobs[name] = coro
self.event('add', job=name)
if self.running:
self._start_job(name)
async def _run_tasks(self, return_when):
logger.info('Waiting for all jobs to complete…')
self.event('await tasks', jobs=list(self.tasks.keys()))
shutting_down = False
while any(not t.done() for t in self.tasks.values()):
# NEVER exit this loop while there are tasks still running.
try:
# Await the tasks that are currently running. More tasks may be
# spawned while we're waiting, and those are not awaited here.
await asyncio.wait(
self.tasks.values(), return_when=return_when
)
# We know _some_ task completed, successfully or not
assert any(t.done() for t in self.tasks.values())
if not shutting_down:
# It is time to shut down, yet?
if return_when == concurrent.futures.FIRST_COMPLETED:
shutting_down = True
elif return_when == concurrent.futures.FIRST_EXCEPTION:
shutting_down = any(
t.done() and (t.cancelled() or t.exception())
for t in self.tasks.values()
)
else:
assert return_when == concurrent.futures.ALL_COMPLETED
shutting_down = all(
t.done() for t in self.tasks.values()
)
except BaseException as e:
logger.warning(f'{self.__class__.__name__} aborted by {e!r}')
shutting_down = True
finally:
if shutting_down:
# Keep cancelling tasks until all are finished
logger.info('Shutting down…')
self.event('cancelling tasks')
return_when = concurrent.futures.ALL_COMPLETED
for name, task in self.tasks.items():
if not task.done():
logger.info(f'Cancelling {name}…')
task.cancel()
self.event('awaited tasks')
async def run(self, *, keep_going=False):
"""Run until all jobs are finished.
If keep_going is disabled (the default), the first failing job (i.e.
a job that raises an exception) will cause us to cancel all other
concurrent and remaining jobs and return as soon as possible.
If keep_going is enabled, we will keep running as long as there are
jobs to do. Only the jobs that depend (directly or indirectly) on the
failing job(s) will be cancelled.
Return a dictionary mapping job names to the corresponding
asyncio.Future objects that encapsulate their result (return value,
exception, or cancellation).
"""
logger.debug('Running…')
self.event('start', keep_going=keep_going, num_jobs=len(self.jobs))
if keep_going:
return_when = concurrent.futures.ALL_COMPLETED
else:
return_when = concurrent.futures.FIRST_EXCEPTION
if self.jobs:
for name in self.jobs.keys():
self._start_job(name)
self.running = True
await self._run_tasks(return_when)
self.running = False
else:
logger.warning('Nothing to do!')
self.event('finish', num_tasks=len(self.tasks))
return self.tasks
```
#### File: asyncjobs/tests/subprocess_helper.py
```python
import os
import logging
from pathlib import Path
import signal
import sys
import time
logger = logging.getLogger('subprocess_helper')
def main(args):
out = sys.stdout
for arg in args:
if arg == 'err:':
out = sys.stderr
elif arg == 'out:':
out = sys.stdout
elif arg == 'in:':
print(sys.stdin.readline().rstrip(), file=out)
elif arg == 'cwd:':
print(Path.cwd(), file=out)
elif arg.startswith('env:'):
print(os.environ[arg[4:]], file=out)
elif arg.startswith('sleep:'):
time.sleep(float(arg[6:]))
elif arg.startswith('touch:'):
Path(arg[6:]).touch()
elif arg.startswith('log:'):
logger.error(arg[4:])
elif arg.startswith('ignore:'):
signal.signal(getattr(signal, arg[7:]), signal.SIG_IGN)
elif arg.startswith('exit:'):
return int(arg[5:])
else:
print(arg, file=out)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
```
#### File: asyncjobs/tests/test_stream_mux.py
```python
import asyncio
import pytest
from asyncjobs.stream_mux import StreamMux
pytestmark = pytest.mark.asyncio
async def test_no_output(verify_output):
with StreamMux():
pass
assert verify_output([])
async def test_one_stream_undecorated(verify_output):
with StreamMux() as mux, mux.new_stream() as f:
print('This is the first line', file=f)
print('This is the second line', file=f)
assert verify_output(
[['This is the first line', 'This is the second line']]
)
async def test_two_streams_undecorated(verify_output):
with StreamMux() as mux, mux.new_stream() as f:
print('This is stream 1 line 1', file=f)
with mux.new_stream() as g:
print('This is stream 2 line 1', file=g)
print('This is stream 2 line 2', file=g)
print('This is stream 1 line 2', file=f)
assert verify_output(
[
['This is stream 1 line 1', 'This is stream 1 line 2'],
['This is stream 2 line 1', 'This is stream 2 line 2'],
],
)
async def test_one_stream_decorated(verify_output):
with StreamMux() as mux:
decorator = StreamMux.simple_decorator('[pre]{}[post]') # long-winded
with mux.new_stream(decorator) as f:
print('This is the first line', file=f)
print('This is the second line', file=f)
assert verify_output(
[
[
'[pre]This is the first line[post]',
'[pre]This is the second line[post]',
]
]
)
async def test_two_streams_decorated(verify_output):
with StreamMux() as mux:
with mux.new_stream(b'1>>{}<<1') as f: # shorter version
print('This is stream 1 line 1', file=f)
with mux.new_stream('2>>{}<<2') as g:
print('This is stream 2 line 1', file=g)
print('This is stream 2 line 2', file=g)
print('This is stream 1 line 2', file=f)
assert verify_output(
[
['1>>This is stream 1 line 1<<1', '1>>This is stream 1 line 2<<1'],
['2>>This is stream 2 line 1<<2', '2>>This is stream 2 line 2<<2'],
],
)
async def test_one_charwise_stream_decorated(verify_output):
s = 'foo\nbar\nbaz'
with StreamMux() as mux, mux.new_stream('<{}>') as f:
for c in s:
f.write(c)
assert verify_output([['<foo>', '<bar>', '<baz>']])
async def test_one_charwise_interrupted_stream_decorated(verify_output):
s = 'foo\nbar\nbaz'
with StreamMux() as mux, mux.new_stream('<{}>') as f:
for c in s:
f.write(c)
f.flush()
await asyncio.sleep(0.001)
assert verify_output([['<foo>', '<bar>', '<baz>']])
async def test_two_charwise_streams_decorated(verify_output):
s = 'foo\nbar\nbaz'
t = '123\n456\n789'
with StreamMux() as mux:
with mux.new_stream(b'<{}>') as f, mux.new_stream('[{}]') as g:
for c, d in zip(s, t):
f.write(c)
g.write(d)
assert verify_output(
[['<foo>', '<bar>', '<baz>'], ['[123]', '[456]', '[789]']]
)
async def test_two_charwise_interrupted_streams_decorated(verify_output):
s = 'foo\nbar\nbaz'
t = '123\n456\n789'
with StreamMux() as mux:
with mux.new_stream(b'<{}>') as f, mux.new_stream('[{}]') as g:
for c, d in zip(s, t):
f.write(c)
g.write(d)
f.flush()
g.flush()
await asyncio.sleep(0.001)
assert verify_output(
[['<foo>', '<bar>', '<baz>'], ['[123]', '[456]', '[789]']]
)
async def test_one_bytewise_stream_with_garbage(capfdbinary):
lines = [
b'first line...',
b'latin-1: \xc6\xd8\xc5...',
b'utf-8: \xe2\x9c\x94\xe2\x88\x80\xe2\x9c\x98...',
b'f8 - ff: \xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff...',
b'last line without newline',
]
bytestring = b'\n'.join(lines)
prefix, suffix = '❰'.encode('utf-8'), '❱\n'.encode('utf-8')
expect_bytestring = b''.join(prefix + line + suffix for line in lines)
with StreamMux() as mux, mux.new_stream('❰{}❱') as f:
f.buffer.write(bytestring)
actual = capfdbinary.readouterr()
assert actual.out == expect_bytestring
assert actual.err == b''
async def test_one_bytewise_stream_in_binary_mode_with_garbage(capfdbinary):
lines = [
b'first line...',
b'latin-1: \xc6\xd8\xc5...',
b'utf-8: \xe2\x9c\x94\xe2\x88\x80\xe2\x9c\x98...',
b'f8 - ff: \xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff...',
b'last line without newline',
]
bytestring = b'\n'.join(lines)
prefix = b'>>> ' # test passing bytes w/o placeholder to simple_decorator
expect_bytestring = b''.join(prefix + line + b'\n' for line in lines)
with StreamMux() as mux, mux.new_stream(prefix, mode='wb') as f:
f.write(bytestring)
actual = capfdbinary.readouterr()
assert actual.out == expect_bytestring
assert actual.err == b''
async def test_write_to_file(tmp_path):
path = tmp_path / 'file'
with path.open('w') as f, StreamMux(f) as mux, mux.new_stream() as g:
g.write('first line\n')
await asyncio.sleep(0.001)
g.write('second line\n')
assert path.read_text() == 'first line\nsecond line\n'
async def test_follow_file_reads_from_beginning(tmp_path, verify_output):
path = tmp_path / 'file'
with path.open('w') as f, StreamMux() as mux:
print('first line', file=f, flush=True)
with mux.follow_file(path, '<{}>'):
print('second line', file=f, flush=True)
print('third line', file=f, flush=True) # not watched
assert verify_output([['<first line>', '<second line>']])
async def test_follow_file_first_read_is_immediate(tmp_path, verify_output):
path = tmp_path / 'file'
with path.open('w') as f, StreamMux() as mux:
print('first line', file=f, flush=True)
with mux.follow_file(path, '<{}>'):
await asyncio.sleep(0.001)
assert verify_output([['<first line>']])
assert verify_output([])
async def test_follow_file_second_read_after_period(tmp_path, verify_output):
path = tmp_path / 'file'
with path.open('w') as f, StreamMux() as mux:
print('first line', file=f, flush=True)
with mux.follow_file(path, '<{}>', period=0.01):
await asyncio.sleep(0.001)
assert verify_output([['<first line>']])
print('second line', file=f, flush=True)
await asyncio.sleep(0.001)
assert verify_output([])
await asyncio.sleep(0.01)
assert verify_output([['<second line>']])
assert verify_output([])
async def test_follow_file_read_after_writer_close(tmp_path, verify_output):
path = tmp_path / 'file'
f = path.open('w')
print('first line', file=f, flush=True)
with StreamMux() as mux, mux.follow_file(path, '<{}>', period=0.01):
await asyncio.sleep(0.001)
assert verify_output([['<first line>']])
print('second line', file=f, flush=True)
f.close()
await asyncio.sleep(0.01)
assert verify_output([['<second line>']])
assert verify_output([])
async def test_follow_file_ignores_short_rewrites(tmp_path, verify_output):
path = tmp_path / 'file'
f = path.open('w')
print('this is the first line', file=f, flush=True)
with StreamMux() as mux, mux.follow_file(path, '<{}>', period=0.01):
await asyncio.sleep(0.001)
assert verify_output([['<this is the first line>']])
print('second line', file=f, flush=True)
f.close()
f = path.open('w')
print('short rewrite', file=f, flush=True) # not seen by reader
f.close()
assert verify_output([]) # misses both 'second line' and 'short rewrite'
async def test_follow_file_reads_appends(tmp_path, verify_output):
path = tmp_path / 'file'
f = path.open('w')
print('first line', file=f, flush=True)
with StreamMux() as mux, mux.follow_file(path, '<{}>', period=0.01):
await asyncio.sleep(0.001)
assert verify_output([['<first line>']])
print('second line', file=f, flush=True)
f.close()
f = path.open('a')
print('third line', file=f, flush=True)
f.close()
assert verify_output([['<second line>', '<third line>']])
async def test_internal_errors_are_propagated(tmp_path):
path = tmp_path / 'file'
f = path.open('w')
with pytest.raises(ValueError):
with StreamMux(f) as mux, mux.new_stream() as g:
g.write('first line\n')
g.flush()
await asyncio.sleep(0.001)
f.flush()
f.close()
g.write('second line\n') # raises ValueError: write to closed file
# context exit raises ValueError: flush of closed file
assert path.read_text() == 'first line\n'
``` |
{
"source": "jherland/browson",
"score": 2
} |
#### File: browson/browson/__main__.py
```python
from contextlib import suppress
from functools import partial
import json
import logging
import signal
import sys
from blessed import Terminal
from . import style
from .lineinput import LineInput
from .nodeview import NodeView
from .utils import debug_time, signal_handler
__version__ = "0.1"
logger = logging.getLogger("browson")
class ResizeEvent(Exception):
pass
class UI:
def __init__(self, root, term, style):
self.term = term
self.view = NodeView(root, term, *self.view_size, style)
self.input = None
# Misc. state/communication variables
self._resized = True # must handle recent window resize
self._quit = False # time to quit
self._status = None # custom status line
self._status_color = self.term.normal # status line color
self._timeout = None # used to reset status line
@property
def view_size(self):
return self.term.width, self.term.height - 2
@property
def status_y(self):
return self.term.height - 1
# Input handlers
def redraw(self):
self.view.need_redraw = True
def quit(self):
self._quit = True
def warning(self, message):
logger.warning(message)
self._status = message
self._status_color = self.term.bright_red
self._timeout = 3
self.draw_status()
def search(self):
self.input = LineInput(self.term, "Search: ", self.view.search)
self._status_color = self.term.bright_yellow
self.draw_status()
# Keyboard input
@debug_time
def handle_key(self, keystroke):
logger.debug(f"Got keystroke: {(str(keystroke), keystroke.name)!r}")
keymap = {
# focus movement
"k": partial(self.view.move_focus, -1),
"j": partial(self.view.move_focus, 1),
"KEY_UP": partial(self.view.move_focus, -1),
"KEY_DOWN": partial(self.view.move_focus, 1),
"KEY_SUP": partial(self.view.move_focus, -5),
"KEY_SDOWN": partial(self.view.move_focus, 5),
"KEY_PGUP": partial(self.view.move_focus, -(self.view.height)),
"KEY_PGDOWN": partial(self.view.move_focus, self.view.height),
"KEY_HOME": partial(self.view.move_focus, -sys.maxsize),
"KEY_END": partial(self.view.move_focus, +sys.maxsize),
"[": partial(self.view.jump_node, forwards=False),
"]": partial(self.view.jump_node, forwards=True),
# collapse/expand
"KEY_LEFT": self.view.collapse_current,
"c": self.view.collapse_other,
"C": self.view.collapse_all,
"KEY_RIGHT": self.view.expand_current,
"x": self.view.expand_below,
"X": self.view.expand_all,
# search
"/": self.search,
"n": partial(self.view.jump_match, forwards=True),
"p": partial(self.view.jump_match, forwards=False),
# re-render/re-draw
"KEY_F5": self.view.rerender_all,
"\x0c": self.redraw, # Ctrl+L
# quitting
"q": self.quit,
"\x04": self.quit, # EOF, Ctrl+D
None: partial(self.warning, f"Unknown key {keystroke!r}!"),
}
for key in [str(keystroke), keystroke.name, None]:
with suppress(KeyError):
return keymap[key]
# Resize handling
def handle_resize(self):
self.view.resize(*self.view_size)
self._resized = False
def on_resize(self):
self._resized = True
raise ResizeEvent() # trigger exit from keyboard polling
# Status bar
def reset_status(self):
self._status = None
self._status_color = self.term.normal
self.input = None
self._timeout = None
self.draw_status()
def status_text(self):
if self._status is not None:
return self._status
else: # use default
node = self.view.current_node()
cur, total = self.view.current_position()
message = f"{node.name} - ({cur}/{total} lines) -"
return message
def draw_status(self):
pre = self.term.move_xy(0, self.status_y) + self._status_color
if self.input: # show line input + cursor
text = self.input.draw()
cursor = (self.term.length(text), self.status_y)
post = self.term.move_xy(*cursor) + self.term.normal_cursor
else: # show status text
text = self.status_text()
post = self.term.hide_cursor
line = self.term.reverse(self.term.ljust(text))
print(pre + line + post, end="", flush=True)
@debug_time
def draw(self):
print(self.term.home + self.term.clear, end="")
for line in self.view.draw():
print(line)
self.draw_status()
# Main UI loop
def run(self):
term = self.term
with term.fullscreen(), term.cbreak(), term.hidden_cursor():
with signal_handler(signal.SIGWINCH, self.on_resize):
while not self._quit: # main loop
try:
if self._resized:
self.handle_resize()
if self.view.need_redraw:
self.draw()
keystroke = term.inkey(timeout=self._timeout)
if keystroke and self.input: # redirect to line input
self.input.handle_key(keystroke)()
self.view.search = self.input.value
self.draw_status()
if self.input.done:
self.reset_status()
else:
self.reset_status()
if keystroke:
self.handle_key(keystroke)()
except ResizeEvent:
self._resized = True
logger.info("Bye!")
class MyStyle(style.TruncateLines, style.SyntaxColor, style.JSONStyle):
pass
# TODO:
# - We need to _pull_ rendered strings on demand. Too expensive to render
# everything up-front.
# - help window with keymap
# - filter
# - expand only nodes that match the current search
def dump(root, style):
for line, _ in root.render(style):
print(line)
def main():
# TODO: argparse
logging.basicConfig(
level=logging.DEBUG,
filename="./browson.log",
format="%(asctime)s %(name)s:%(levelname)s %(message)s",
)
logger.info(f"Loading data structure from {sys.argv[1]}...")
with open(sys.argv[1], "rb") as f:
data = json.load(f)
logger.info("Building nodes...")
root = style.DrawableNode.build(data)
term = Terminal()
mystyle = MyStyle(term=term)
if term.is_a_tty:
logger.info("Running app...")
ui = UI(root, term, mystyle)
ui.run()
else:
dump(root, mystyle)
if __name__ == "__main__":
main()
```
#### File: browson/browson/node.py
```python
from typing import Any, List, Optional
class Node:
"""Encapsulate the tree of a Python (or JSON) data structure."""
@staticmethod
def is_scalar(value):
"""Return True iff 'value' should be represented by a leaf node."""
return not isinstance(value, (dict, list, tuple, set))
@classmethod
def build(cls, obj, name="", parent=None, **kwargs):
if cls.is_scalar(obj):
return cls(name, type(obj), obj, parent=parent, **kwargs)
else:
children = []
ret = cls(
name,
type(obj),
obj,
parent=parent,
_children=children,
**kwargs,
)
if isinstance(obj, dict):
children.extend(
cls.build(v, f"{name}.{k}", parent=ret, key=k)
for k, v in obj.items()
)
else:
children.extend(
cls.build(v, f"{name}[{i}]", parent=ret)
for i, v in enumerate(obj)
)
return ret
def __init__(self, name, kind, value, **kwargs):
self.name: str = name
self.kind: type = kind
self.value: Any = value
self.parent: "Optional[Node]" = None
self._children: "Optional[List[Node]]" = None
self.__dict__.update(kwargs)
def __str__(self):
num_children = "*" if self.is_leaf else len(self._children)
return f"{self.name}/{self.kind.__name__}/{num_children}"
def __repr__(self):
args = [f"{k}={v!r}" for k, v in self.__dict__.items()]
return f"{self.__class__.__name__}({', '.join(args)})"
def __eq__(self, other):
assert isinstance(other, Node), repr(other)
result = self.name == other.name and self.value == other.value
if result:
assert self.kind is other.kind, f"{self} != {other}"
return result
@property
def is_leaf(self):
"""Return True iff this is a leaf node (i.e. cannot have any children).
This is different from an empty container, i.e. an "internal" node
whose list of children is empty."""
return self._children is None
@property
def children(self):
"""Return this node's children.
Return an empty list for leaf nodes, as a convenience for callers that
typically iterated over this methods return value."""
return [] if self._children is None else self._children
@property
def is_child(self):
return self.parent is not None
@property
def is_first_child(self):
return self.is_child and self is self.parent.children[0]
@property
def is_last_child(self):
return self.is_child and self is self.parent.children[-1]
@property
def level(self):
return 0 if self.parent is None else (self.parent.level + 1)
@property
def has_key(self):
return hasattr(self, "key")
def ancestors(self, include_self=False):
"""Yield transitive parents of this node."""
if include_self:
yield self
if self.parent is not None:
yield from self.parent.ancestors(include_self=True)
def yield_node(node):
yield node
def dfwalk(self, preorder=yield_node, postorder=None):
"""Depth-first walk, yields values yielded from visitor function."""
if preorder is not None:
yield from preorder(self)
for child in self.children:
yield from child.dfwalk(preorder, postorder)
if postorder is not None:
yield from postorder(self)
```
#### File: jherland/browson/noxfile.py
```python
import nox
# Run everything but 'dist' by default
nox.options.keywords = "not dist"
def _install_this_editable(session, *, extras=None):
extras = [] if extras is None else extras
session.install("flit")
session.run(
"flit", "install", "-s", "--extras", ",".join(extras), silent=True
)
@nox.session(python=["3.6", "3.7", "3.8", "3.9"], reuse_venv=True)
def test(session):
_install_this_editable(session, extras=["test"])
session.run("pytest", "-x", "--log-level=debug", *session.posargs)
@nox.session(reuse_venv=True)
def format(session):
_install_this_editable(session, extras=["dev"])
session.run("black", ".")
@nox.session(reuse_venv=True)
def lint(session):
_install_this_editable(session, extras=["dev"])
session.run("flake8")
@nox.session(reuse_venv=True)
def dist(session):
_install_this_editable(session)
session.run("flit", "publish", *session.posargs)
print("*** Don't forget to tag and push!")
``` |
{
"source": "jhermann/cobblestones",
"score": 3
} |
#### File: src/cobblestones/tools.py
```python
from paver.options import Bunch
def bunchify(obj, _seen=None):
""" Recursively convert all dicts found in `obj` to Paver bunches.
That includes `obj` itself; if it's already a `Bunch`, the original
object is returned. Replacement of inner dicts by `Bunch` objects
happens in-place. Other dict-like objects are scanned, but their
type is retained.
"""
_seen = _seen or {}
if id(obj) in _seen:
return _seen[id(obj)]
_seen[id(obj)] = obj
if type(obj) is dict:
_seen[id(obj)] = Bunch(obj)
obj = _seen[id(obj)]
try:
items = obj.iteritems
except AttributeError:
pass # obj is not a dict-like object
else:
for key, val in items():
obj[key] = bunchify(val, _seen)
return obj
``` |
{
"source": "jhermann/cookiecutter",
"score": 2
} |
#### File: cookiecutter/cookiecutter/config.py
```python
from __future__ import unicode_literals
import copy
import logging
import os
import io
import yaml
from .exceptions import ConfigDoesNotExistException
from .exceptions import InvalidConfiguration
logger = logging.getLogger(__name__)
DEFAULT_CONFIG = {
'cookiecutters_dir': os.path.expanduser('~/.cookiecutters/'),
'default_context': {}
}
# TODO: test on windows...
USER_CONFIG_PATH = '~/.cookiecutterrc'
def get_config(config_path):
"""
Retrieve the config from the specified path, returning it as a config dict.
"""
if not os.path.exists(config_path):
raise ConfigDoesNotExistException
logger.debug('config_path is {0}'.format(config_path))
with io.open(config_path, encoding='utf-8') as file_handle:
try:
yaml_dict = yaml.safe_load(file_handle)
except yaml.scanner.ScannerError:
raise InvalidConfiguration(
'{0} is no a valid YAML file'.format(config_path))
config_dict = copy.copy(DEFAULT_CONFIG)
config_dict.update(yaml_dict)
return config_dict
def get_user_config(rc_file=USER_CONFIG_PATH):
"""
Retrieve config from the given path, if it exists.
Otherwise, return a deep copy of the defaults.
:param rc_file: Path to the user configuration file
"""
rc_file = os.path.expanduser(rc_file or '')
if rc_file and os.path.exists(rc_file):
return get_config(rc_file)
else:
return copy.copy(DEFAULT_CONFIG)
```
#### File: cookiecutter/tests/test_compat.py
```python
from cookiecutter.compat import which
def test_existing_command():
assert which('cookiecutter')
def test_non_existing_command():
assert not which('stringthatisntashellcommand')
```
#### File: cookiecutter/tests/test_is_vcs_installed.py
```python
from cookiecutter import vcs
def test_existing_repo_type():
assert vcs.is_vcs_installed("git")
def test_non_existing_repo_type():
assert not vcs.is_vcs_installed("stringthatisntashellcommand")
``` |
{
"source": "jhermann/dephell_archive",
"score": 2
} |
#### File: dephell_archive/dephell_archive/_stream.py
```python
from contextlib import suppress
from pathlib import Path, PurePath
from typing import List, Optional, Set
# external
import attr
# app
from ._cached_property import cached_property
def _dir_list(filelist: List[str]) -> Set[str]:
# paths starting with '/' or containing '.' are not supported
dir_list = set() # type: Set[str]
for path in filelist:
while path:
path, _, _ = path.rpartition('/')
if not path or path in dir_list:
break
dir_list.add(path)
return dir_list
@attr.s()
class ArchiveStream:
descriptor = attr.ib()
cache_path = attr.ib(type=Path)
member_path = attr.ib(type=PurePath)
mode = attr.ib(type=str, default='r')
encoding = attr.ib(type=Optional[str], default=None)
# private
@cached_property
def _is_tar(self) -> bool:
return hasattr(self.descriptor, 'getmember')
@cached_property
def _dir_list(self) -> Set[str]:
return _dir_list(self.descriptor.namelist())
@cached_property
def _info(self):
path = self.member_path.as_posix()
with suppress(KeyError):
if self._is_tar:
return self.descriptor.getmember(path)
try:
return self.descriptor.getinfo(path) # zip file
except KeyError:
return self.descriptor.getinfo(path + '/') # zip dir
return None
@cached_property
def _is_implicit_dir(self) -> bool:
# Only zip have implicit dirs
if self._is_tar:
return False
path = self.member_path.as_posix()
return path in self._dir_list
# used from ArchivePath
def exists(self) -> bool:
return self.is_file() or self.is_dir()
def is_file(self) -> bool:
if self._info is None:
return False
if self._is_tar:
return self._info.isfile()
# zip
return self._info.filename[-1] != '/'
def is_dir(self) -> bool:
if self._info is None:
return self._is_implicit_dir
if self._is_tar:
return self._info.isdir()
# zip explicit dir entry
return self._info.filename[-1] == '/'
# public interface
def read(self):
if not self.member_path.name:
raise NotImplementedError
path = self.cache_path / self.member_path
if path.exists():
raise FileExistsError('file in cache created between open and read')
# extract to cache
self.descriptor.extract(member=self._info, path=str(self.cache_path))
# read from cache
with path.open(self.mode, encoding=self.encoding) as stream:
return stream.read()
```
#### File: dephell_archive/tests/test_path_zip.py
```python
from pathlib import Path
# project
from dephell_archive import ArchivePath
wheel_path = Path(__file__).parent / 'requirements' / 'wheel.whl'
def test_open_zip(tmpdir):
path = ArchivePath(
archive_path=wheel_path,
cache_path=Path(str(tmpdir)),
)
subpath = path / 'dephell' / '__init__.py'
with subpath.open() as stream:
content = stream.read()
assert 'from .controllers' in content
def test_glob_zip(tmpdir):
path = ArchivePath(
archive_path=wheel_path,
cache_path=Path(str(tmpdir)),
)
paths = list(path.glob('*/__init__.py'))
assert len(paths) == 1
assert paths[0].member_path.as_posix() == 'dephell/__init__.py'
def test_exists(tmpdir):
path = ArchivePath(
archive_path=wheel_path,
cache_path=Path(str(tmpdir)),
)
subpath = path / 'dephell' / '__init__.py'
assert subpath.exists() is True
subpath = path / 'dephell' / 'some_junk.py'
assert subpath.exists() is False
def test_is_file(tmpdir):
path = ArchivePath(
archive_path=wheel_path,
cache_path=Path(str(tmpdir)),
)
subpath = path / 'dephell' / '__init__.py'
assert subpath.is_file() is True
subpath = path / 'dephell'
assert subpath.is_file() is False
def test_is_dir(tmpdir):
path = ArchivePath(
archive_path=wheel_path,
cache_path=Path(str(tmpdir)),
)
subpath = path / 'dephell' / '__init__.py'
assert subpath.is_dir() is False
subpath = path / 'dephell'
assert subpath.exists() is True
assert subpath.is_dir() is True
def test_is_dir_explicit_entry(tmpdir):
path = ArchivePath(
archive_path=Path('tests', 'requirements', 'graphviz-0.13.2.zip'),
cache_path=Path(str(tmpdir)),
)
subpath = path / 'graphviz-0.13.2'
assert subpath.is_dir() is True
subpath = subpath / 'graphviz'
assert subpath.exists() is True
assert subpath.is_dir() is True
subpath = subpath / '__init__.py'
assert subpath.is_dir() is False
def test_iterdir_non_recursive(tmpdir):
path = ArchivePath(
archive_path=Path('tests', 'requirements', 'dnspython-1.16.0.zip'),
cache_path=Path(str(tmpdir)),
)
paths = [str(subpath) for subpath in path.iterdir(_recursive=False)]
assert paths == ['dnspython-1.16.0']
def test_iterdir_recursive(tmpdir):
path = ArchivePath(
archive_path=Path('tests', 'requirements', 'dnspython-1.16.0.zip'),
cache_path=Path(str(tmpdir)),
)
paths = [str(subpath) for subpath in path.iterdir(_recursive=True)]
assert 'dnspython-1.16.0' in paths
assert str(Path('dnspython-1.16.0', 'setup.py')) in paths
assert str(Path('dnspython-1.16.0', 'dns', '__init__.py')) in paths
assert str(Path('dnspython-1.16.0', 'dns', 'rdtypes')) in paths
assert str(Path('dnspython-1.16.0', 'dns', 'rdtypes', 'ANY')) in paths
for path in paths:
assert paths.count(path) == 1, 'duplicate dir: ' + path
def test_iterdir_subpath_non_recursive(tmpdir):
path = ArchivePath(
archive_path=Path('tests', 'requirements', 'dnspython-1.16.0.zip'),
cache_path=Path(str(tmpdir)),
)
subpath = path / 'dnspython-1.16.0'
paths = [str(item) for item in subpath.iterdir(_recursive=False)]
for path in paths:
assert paths.count(path) == 1, 'duplicate dir: ' + path
assert 'dns' in paths
assert 'dnspython.egg-info' in paths
assert 'setup.py' in paths
subpath = subpath / 'dns'
paths = [str(item) for item in subpath.iterdir(_recursive=False)]
assert 'rdtypes' in paths
for path in paths:
assert paths.count(path) == 1, 'duplicate dir: ' + path
def test_iterdir_subpath_recursive(tmpdir):
path = ArchivePath(
archive_path=Path('tests', 'requirements', 'dnspython-1.16.0.zip'),
cache_path=Path(str(tmpdir)),
)
subpath = path / 'dnspython-1.16.0'
paths = [str(item) for item in subpath.iterdir(_recursive=True)]
assert 'setup.py' in paths
assert Path('dnspython-1.16.0', 'dns') not in paths
assert 'dns' in paths
assert str(Path('dns', '__init__.py')) in paths
for path in paths:
assert paths.count(path) == 1, 'duplicate dir: ' + path
def test_iterdir_non_recursive_with_dirs(tmpdir):
path = ArchivePath(
archive_path=Path('tests', 'requirements', 'graphviz-0.13.2.zip'),
cache_path=Path(str(tmpdir)),
)
paths = [str(subpath) for subpath in path.iterdir(_recursive=False)]
assert paths == ['graphviz-0.13.2']
def test_iterdir_recursive_with_dirs(tmpdir):
path = ArchivePath(
archive_path=Path('tests', 'requirements', 'graphviz-0.13.2.zip'),
cache_path=Path(str(tmpdir)),
)
paths = [str(subpath) for subpath in path.iterdir(_recursive=True)]
assert 'graphviz-0.13.2' in paths
assert str(Path('graphviz-0.13.2', 'setup.py')) in paths
assert str(Path('graphviz-0.13.2', 'graphviz', '__init__.py')) in paths
for path in paths:
assert paths.count(path) == 1, 'duplicate dir: ' + path
def test_iterdir_subpath_non_recursive_with_dirs(tmpdir):
path = ArchivePath(
archive_path=Path('tests', 'requirements', 'graphviz-0.13.2.zip'),
cache_path=Path(str(tmpdir)),
)
subpath = path / 'graphviz-0.13.2'
paths = [str(item) for item in subpath.iterdir(_recursive=False)]
assert 'graphviz' in paths
assert 'graphviz.egg-info' in paths
assert 'setup.py' in paths
for path in paths:
assert paths.count(path) == 1, 'duplicate dir: ' + path
subpath = subpath / 'graphviz.egg-info'
paths = [str(item) for item in subpath.iterdir(_recursive=False)]
for path in paths:
assert paths.count(path) == 1, 'duplicate dir: ' + path
assert set(paths) == {
'dependency_links.txt',
'PKG-INFO',
'requires.txt',
'SOURCES.txt',
'top_level.txt',
}
def test_iterdir_subpath_recursive_with_dirs(tmpdir):
path = ArchivePath(
archive_path=Path('tests', 'requirements', 'graphviz-0.13.2.zip'),
cache_path=Path(str(tmpdir)),
)
subpath = path / 'graphviz-0.13.2'
paths = [str(item) for item in subpath.iterdir(_recursive=True)]
assert 'graphviz' in paths
assert str(Path('graphviz', '__init__.py')) in paths
for path in paths:
assert paths.count(path) == 1, 'duplicate dir: ' + path
def test_iterdir_non_recursive_wheel(tmpdir):
path = ArchivePath(
archive_path=wheel_path,
cache_path=Path(str(tmpdir)),
)
paths = [str(subpath) for subpath in path.iterdir(_recursive=False)]
assert len(paths) == 2
assert 'dephell' in paths
assert 'dephell-0.2.0.dist-info' in paths
def test_iterdir_recursive_wheel(tmpdir):
path = ArchivePath(
archive_path=wheel_path,
cache_path=Path(str(tmpdir)),
)
paths = [str(subpath) for subpath in path.iterdir(_recursive=True)]
assert 'dephell' in paths
assert str(Path('dephell', '__init__.py')) in paths
assert 'dephell-0.2.0.dist-info' in paths
assert str(Path('dephell-0.2.0.dist-info', 'WHEEL')) in paths
for path in paths:
assert paths.count(path) == 1, 'duplicate dir: ' + path
``` |
{
"source": "jhermann/dephell_argparse",
"score": 3
} |
#### File: dephell_argparse/dephell_argparse/_command.py
```python
from collections import Counter, defaultdict
from typing import DefaultDict, FrozenSet, Iterable, List, Optional, Tuple
# app
from ._cached_property import cached_property
class Command:
def __init__(self, argv: Iterable[str], commands: Iterable[str]):
self._argv = tuple(argv)
self.commands = {c.lower() for c in commands}
@cached_property
def groups(self) -> FrozenSet[str]:
groups = set()
for command in self.commands:
group, _, _ = command.rpartition(' ')
if group:
groups.add(group)
return frozenset(groups)
@cached_property
def words(self) -> int:
return len(self.match.split())
@property
def argv(self) -> Tuple[str, ...]:
return self._argv[self.words:]
@property
def group(self) -> Optional[str]:
group = self._argv[0]
if group in self.groups:
return group
return None
@staticmethod
def _similar(cmd1: str, cmd2: str, threshold: int = 1) -> bool:
given = Counter(cmd1)
guess = Counter(cmd2)
counter_diff = (given - guess) + (guess - given)
diff = sum(counter_diff.values())
return diff <= threshold
@cached_property
def match(self) -> Optional[str]:
if not self._argv:
return None
for size, direction in ((2, 1), (1, 1), (2, -1)):
command_name = ' '.join(self._argv[:size][::direction])
if command_name in self.commands:
return command_name
# specified the only one word from command
commands_by_parts = defaultdict(list) # type: DefaultDict[str, List[str]]
for command_name in self.commands:
for part in command_name.split():
commands_by_parts[part].append(command_name)
command_names = commands_by_parts[self._argv[0]]
if len(command_names) == 1:
return command_names[0]
# typo in command name
for size in (1, 2):
command_name = ' '.join(self._argv[:size])
for command_guess in self.commands:
if self._similar(command_name, command_guess):
return command_guess
return None
@cached_property
def guesses(self) -> FrozenSet[str]:
guesses = set()
if self.group:
for command in self.commands:
if command.startswith(self.group + ' '):
guesses.add(command)
return frozenset(guesses)
# typed only one word from two words
for name in self.argv[:2]:
for command_name in self.commands:
_, _, subcommand_name = command_name.rpartition(' ')
if name == subcommand_name:
guesses.add(command_name)
if guesses:
return frozenset(guesses)
# typed fully but with too many mistakes
for size in 1, 2:
name = ' '.join(self._argv[:size])
for command_name in self.commands:
if self._similar(name, command_name, threshold=3):
guesses.add(command_name)
if guesses:
return frozenset(guesses)
# typed only one word from two, and it contains typos
for name in self.argv[:2]:
for command_name in self.commands:
for part in command_name.split():
if self._similar(name, part):
guesses.add(command_name)
if guesses:
return frozenset(guesses)
return frozenset()
```
#### File: dephell_argparse/examples/hello.py
```python
from dephell_argparse import Parser
def hello(args) -> int:
"""Say "hello"!
"""
print('hello!')
return 0
parser = Parser()
parser.add_command(hello)
if __name__ == '__main__':
exit(parser.handle())
```
#### File: dephell_argparse/tests/test_command.py
```python
import pytest
# project
from dephell_argparse._command import Command
def test_groups():
commands = ('math sum', 'math prod', 'http get', 'hello')
groups = ('math', 'http')
assert Command(argv=[], commands=commands).groups == frozenset(groups)
@pytest.mark.parametrize('left, right, result', [
('twilight', 'twilight', True),
('twilight', 'twiligth', True),
('twilight', 'twiight', True),
('twilight', 'sparkle', False),
('twilight', 'ghttw', False),
])
def test_similar(left, right, result):
assert Command._similar(left, right) == result
TEST_COMMANDS = (
'math prod',
'math sum',
'http get',
'http post',
'http patch',
'http delete',
'do something',
'auth',
'hello',
'hello there',
'bye there',
)
@pytest.mark.parametrize('argv, match', [
# full match
(['math', 'prod'], 'math prod'),
(['math', 'prod', 'some', 'junk'], 'math prod'),
# args don't affect
(['junk', 'math', 'prod'], None),
(['math', '--prod'], None),
# partial match
(['prod'], 'math prod'),
(['prod', 'junk'], 'math prod'),
(['do'], 'do something'),
(['something'], 'do something'),
(['something', 'else'], 'do something'),
(['do', 'else'], 'do something'),
(['hello'], 'hello'),
(['hello', 'there'], 'hello there'),
(['hello', 'not', 'there'], 'hello'),
(['math'], None),
(['there'], None),
])
def test_match(argv, match):
cmd = Command(argv=argv, commands=TEST_COMMANDS)
assert cmd.match == match
```
#### File: dephell_argparse/tests/test_handler.py
```python
import pytest
# project
from dephell_argparse import CommandHandler
@pytest.mark.parametrize('given, expected', [
('hello', 'hello'),
('Hello', 'hello'),
('HelloCommand', 'hello'),
('MathSum', 'math sum'),
('MathSumCommand', 'math sum'),
('MathSumHandler', 'math sum'),
('MathSumCommandHandler', 'math sum'),
('MathCommand', 'math'),
('URL', 'url'),
('ParseURL', 'parse url'),
('URLParse', 'urlparse'),
])
def test_name_class(given: str, expected: str):
cls = type(given, (CommandHandler, ), {})
assert cls().name == expected
@pytest.mark.parametrize('given, expected', [
('hello', 'hello'),
('math_sum', 'math sum'),
('math_sum_command', 'math sum'),
])
def test_name_func(given: str, expected: str):
cls = type(given, (), {})
assert CommandHandler(handler=cls).name == expected
def test_description():
def handler():
"""test me!
"""
assert CommandHandler(handler=handler).description == 'test me!'
class Handler(CommandHandler):
"""test me!
"""
assert Handler().description == 'test me!'
class Handler:
"""test me!
"""
assert CommandHandler(handler=Handler()).description == 'test me!'
``` |
{
"source": "jhermann/ezpy",
"score": 2
} |
#### File: ezpy/tests/conftest.py
```python
import logging
import pytest
# Globally available fixtures
@pytest.fixture(scope='session')
def logger():
"""Test logger instance as a fixture."""
logging.basicConfig(level=logging.DEBUG)
return logging.getLogger('tests')
``` |
{
"source": "jhermann/gh-commander",
"score": 2
} |
#### File: gh_commander/commands/label.py
```python
from __future__ import absolute_import, unicode_literals, print_function
import os
import re
import click
from click.exceptions import UsageError
import tablib
# TODO: clear up license situation before a final release, or switch to something else
import qstatpretty.ttyutil.color as ttycolor
import qstatpretty.ttyutil.table as ttytable
import qstatpretty.ttyutil.resize as ttyresize
# import qstatpretty.ttyutil.size as ttysize
from .. import config, github
from .._compat import text_type, string_types
from ..util import dclick
DESERIALIZERS = ('json', 'yaml', 'csv', 'tsv')
SERIALIZERS_NEED_NL = ('dict', 'json', 'html')
SERIALIZERS_TEXT = SERIALIZERS_NEED_NL + ('yaml', 'csv', 'tsv')
SERIALIZERS_BINARY = ('ods', 'xls') # this just doesn't work right (Unicode issues): , 'xlsx')
SERIALIZERS = SERIALIZERS_TEXT + SERIALIZERS_BINARY # TODO: export to 'tty'
HEADERS = ('Name', 'Color')
DEFAULT_TABLE_FORMAT = [
{
'key': 'name',
'title': 'name',
'color': lambda x: ttycolor.COLOR_GREEN,
'ellipsis': ttyresize.simple_ellipsis(),
'fval': ttyresize.simple_value(factor=10, overflow=2),
},
{
'key': 'color',
'title': 'user',
'color': lambda x: ttycolor.COLOR_YELLOW,
'ellipsis': ttyresize.simple_ellipsis(),
'fval': ttyresize.simple_value(factor=3),
},
]
def get_repo(api, repo):
"""Get account name, repo name, and repository object from name ``repo``."""
if '/' in repo:
user, repo = repo.split('/', 1)
else:
user = api.gh_config.user
return user, repo, api.repository(user, repo)
def get_labels(api, repo):
"""Get label dataset for a repo."""
user, repo, gh_repo = get_repo(api, repo)
data = sorted((label.name, '#' + label.color) for label in gh_repo.labels())
return user, repo, data
def dump_labels(api, repo):
"""Dump labels of a repo."""
def padded(rows):
"Helper"
for row in rows:
yield tuple(' {} '.format(cell) for cell in row)
user, repo, data = get_labels(api, repo)
data = padded([HEADERS] + list(data))
# terminal_width = ttysize.terminal_size()[0]
table_format = DEFAULT_TABLE_FORMAT
delimiters = ttytable.DELIMITERS_FULL
table = list(data)
# table = ttyresize.grow_table(data, terminal_width, table_format, delimiters)
click.secho('⎇ {}/{}'.format(user, repo), fg='white', bg='blue', bold=True)
click.echo(ttytable.pretty_table(table, table_format, delimiters=delimiters))
class LabelAliases(dclick.AliasedGroup):
"""Alias mapping for 'label' commands."""
MAP = dict(
ls='list',
)
@config.cli.group(cls=LabelAliases)
def label():
"""Managing issue labels."""
@label.command(name='list')
@click.argument('repo', nargs=-1)
def label_list(repo=None):
"""Dump labels within the given repo(s)."""
api = github.api(config=None) # TODO: config object
for idx, reponame in enumerate(repo or []):
if idx:
click.echo('')
dump_labels(api, reponame)
@label.command()
@click.option('-f', '--format', 'serializer', default=None, type=click.Choice(SERIALIZERS),
help="Output format (defaults to extension of OUTFILE).",
)
@click.argument('repo', nargs=-1)
@click.argument('outfile', type=click.File('wb'))
@click.pass_context
def export(ctx, repo, outfile, serializer):
"""Export labels of the given repo(s) to a file."""
api = github.api(config=None) # TODO: config object
tabdata = tablib.Dataset()
if repo and repo[-1].lower() == 'to':
repo = repo[:-1]
if not repo:
raise UsageError("You provided no repository names!", ctx=ctx)
outname = getattr(outfile, 'name', None)
if serializer is None:
_, ext = os.path.splitext(outname or '<stream>')
ext = ext.lstrip('.')
if ext in SERIALIZERS:
serializer = ext
else:
raise UsageError('No --format given, and extension of "{}" is not one of {}.'
.format(outname or '<stream>', ', '.join(SERIALIZERS)), ctx=ctx)
for idx, reponame in enumerate(repo):
user, repo, data = get_labels(api, reponame)
if not idx:
tabdata.headers = HEADERS
tabdata.append_separator('⎇ {}/{}'.format(user, repo))
tabdata.extend(data)
text = getattr(tabdata, serializer)
if not isinstance(text, string_types):
text = repr(text)
if serializer in SERIALIZERS_NEED_NL:
text += '\n'
if isinstance(text, text_type):
text = text.encode('utf-8')
try:
outfile.write(text)
except EnvironmentError as cause:
raise dclick.LoggedFailure('Error while writing "{}" ({})'
.format(outname or '<stream>', cause))
@label.command(name='import')
@click.option('-f', '--format', 'serializer', default=None, type=click.Choice(DESERIALIZERS),
help="Input format (defaults to extension of INFILE).",
)
@click.argument('repo', nargs=-1)
@click.argument('infile', type=click.File('r'))
@click.pass_context
def label_import(ctx, repo, infile, serializer):
"""Import labels to the given repo(s) out of a file."""
# TODO: refactor prep code to function, see export for dupe code
api = github.api(config=None) # TODO: config object
tabdata = tablib.Dataset()
if repo and repo[-1].lower() == 'from':
repo = repo[:-1]
if not repo:
raise UsageError("You provided no repository names!", ctx=ctx)
inname = getattr(infile, 'name', None)
if serializer is None:
_, ext = os.path.splitext(inname or '<stream>')
ext = ext.lstrip('.')
if ext in DESERIALIZERS:
serializer = ext
else:
raise UsageError('No --format given, and extension of "{}" is not one of {}.'
.format(inname, ', '.join(DESERIALIZERS)), ctx=ctx)
try:
data = infile.read()
except EnvironmentError as cause:
raise dclick.LoggedFailure('Error while reading "{}" ({})'.format(inname, cause))
# Read label data, and make it unique
setattr(tabdata, serializer, data)
import_labels = {}
for import_label in tabdata.dict:
name, color = import_label[HEADERS[0]], import_label[HEADERS[1]].lstrip('#').lower()
if not re.match("[0-9a-f]{6}", color):
raise dclick.LoggedFailure('Bad color <{}> for label "{}"'.format(color, name))
if name in import_labels and color != import_labels[name]:
click.echo('INFO Changing color from #{} to #{} for duplicate import label "{}"'
.format(import_labels[name], color, name))
import_labels[name] = color
# Update given repos
for reponame in repo:
user, repo, gh_repo = get_repo(api, reponame)
if not gh_repo:
click.secho('ERR Non-existing repo "{}"!'.format(reponame), fg='black', bg='yellow', bold=True)
continue
labels = import_labels.copy()
changed = False
unique = {}
click.secho('⎇ {}/{}'.format(user, repo), fg='white', bg='blue', bold=True)
# Check if existing labels need updating
for existing in gh_repo.labels():
if existing.name in labels:
if existing.color != labels[existing.name]:
status = 'OK' if existing.update(existing.name, labels[existing.name]) else 'ERR'
click.echo('{:4s} Updated label "{}" with color #{}'
.format(status, existing.name, labels[existing.name]))
changed = True
del labels[existing.name]
else:
unique[existing.name] = existing
# Create any remaining labels
if labels:
for name, color in sorted(labels.items()):
status = 'OK' if gh_repo.create_label(name, color) else 'ERR'
click.echo('{:4s} Created label "{}" with color #{}'.format(status, name, color))
elif not changed:
click.echo('INFO No changes.')
# Show info on labels not in the import set
if unique:
click.echo("INFO Unique labels in this repo: {}".format(', '.join(sorted(unique.keys()))))
``` |
{
"source": "jhermann/HPI",
"score": 3
} |
#### File: HPI/my/common.py
```python
from pathlib import Path
import functools
from typing import Union, Callable, Dict, List, Iterable, TypeVar, Sequence, List
# some helper functions
def import_file(p: Union[str, Path], name=None):
p = Path(p)
if name is None:
name = p.stem
import importlib.util
spec = importlib.util.spec_from_file_location(name, p) # type: ignore
foo = importlib.util.module_from_spec(spec)
spec.loader.exec_module(foo) # type: ignore
return foo
def import_from(path, name):
path = str(path)
import sys
try:
sys.path.append(path)
import importlib
return importlib.import_module(name)
finally:
sys.path.remove(path)
T = TypeVar('T')
K = TypeVar('K')
V = TypeVar('V')
def the(l: Iterable[T]) -> T:
it = iter(l)
try:
first = next(it)
except StopIteration as ee:
raise RuntimeError('Empty iterator?')
assert all(e == first for e in it)
return first
def group_by_key(l: Iterable[T], key: Callable[[T], K]) -> Dict[K, List[T]]:
res: Dict[K, List[T]] = {}
for i in l:
kk = key(i)
lst = res.get(kk, [])
lst.append(i)
res[kk] = lst
return res
Cl = TypeVar('Cl')
R = TypeVar('R')
def cproperty(f: Callable[[Cl], R]) -> R:
return property(functools.lru_cache(maxsize=1)(f)) # type: ignore
# https://stackoverflow.com/a/12377059/706389
def listify(fn=None, wrapper=list):
"""
Wraps a function's return value in wrapper (e.g. list)
Useful when an algorithm can be expressed more cleanly as a generator
"""
def listify_return(fn):
@functools.wraps(fn)
def listify_helper(*args, **kw):
return wrapper(fn(*args, **kw))
return listify_helper
if fn is None:
return listify_return
return listify_return(fn)
# TODO FIXME use in bluemaestro
# def dictify(fn=None, key=None, value=None):
# def md(it):
# return make_dict(it, key=key, value=value)
# return listify(fn=fn, wrapper=md)
from .kython.klogging import setup_logger, LazyLogger
PathIsh = Union[Path, str]
Paths = Union[Sequence[PathIsh], PathIsh]
def get_files(pp: Paths, glob: str, sort=True) -> List[Path]:
"""
Helper function to avoid boilerplate.
"""
# TODO FIXME mm, some wrapper to assert iterator isn't empty?
sources: List[Path] = []
if isinstance(pp, (str, Path)):
sources.append(Path(pp))
else:
sources.extend(map(Path, pp))
paths: List[Path] = []
for src in sources:
if src.is_dir():
gp: Iterable[Path] = src.glob(glob)
paths.extend(gp)
else:
assert src.is_file(), src
# TODO FIXME assert matches glob??
paths.append(src)
if sort:
paths = list(sorted(paths))
return paths
def mcachew(*args, **kwargs):
"""
Stands for 'Maybe cachew'.
Defensive wrapper around @cachew to make it an optional dependency.
"""
try:
import cachew
except ModuleNotFoundError:
import warnings
warnings.warn('cachew library not found. You might want to install it to speed things up. See https://github.com/karlicoss/cachew')
return lambda orig_func: orig_func
else:
import cachew.experimental
cachew.experimental.enable_exceptions() # TODO do it only once?
return cachew.cachew(*args, **kwargs)
@functools.lru_cache(1)
def _magic():
import magic # type: ignore
return magic.Magic(mime=True)
# TODO could reuse in pdf module?
import mimetypes # TODO do I need init()?
def fastermime(path: str) -> str:
# mimetypes is faster
(mime, _) = mimetypes.guess_type(path)
if mime is not None:
return mime
# magic is slower but returns more stuff
# TODO FIXME Result type; it's inherently racey
return _magic().from_file(path)
```
#### File: my/jawbone/__init__.py
```python
from typing import Dict, Any, List
import json
from functools import lru_cache
from datetime import datetime, date, time, timedelta
from pathlib import Path
import logging
import pytz
from mycfg import paths
BDIR = paths.jawbone.export_dir
PHASES_FILE = BDIR / 'phases.json'
SLEEPS_FILE = BDIR / 'sleeps.json'
GRAPHS_DIR = BDIR / 'graphs'
def get_logger():
return logging.getLogger('jawbone-provider')
XID = str # TODO how to shared with backup thing?
Phases = Dict[XID, Any]
@lru_cache(1)
def get_phases() -> Phases:
return json.loads(PHASES_FILE.read_text())
# TODO use awakenings and quality
class SleepEntry:
def __init__(self, js) -> None:
self.js = js
# TODO @memoize decorator?
@property
def date_(self) -> date:
return self.sleep_end.date()
def _fromts(self, ts: int) -> datetime:
return pytz.utc.localize(datetime.utcfromtimestamp(ts)).astimezone(self._tz).astimezone(self._tz)
@property
def _tz(self):
return pytz.timezone(self._details['tz'])
@property
def title(self) -> str:
return self.js['title']
@property
def xid(self) -> XID:
return self.js['xid']
@property
def _details(self):
return self.js['details']
# TODO figure out timezones..
# not sure how.. I guess by the american ones
@property
def created(self) -> datetime:
return self._fromts(self.js['time_created'])
@property
def completed(self) -> datetime:
return self._fromts(self.js['time_completed'])
@property
def asleep(self) -> datetime:
return self._fromts(self._details['asleep_time'])
@property
def sleep_start(self) -> datetime:
return self.asleep # TODO careful, maybe use same logic as emfit
@property
def bed_time(self) -> int:
return int((self.sleep_end - self.sleep_start).total_seconds()) // 60
@property
def sleep_end(self) -> datetime:
return self._fromts(self._details['awake_time'])
@property
def graph(self) -> Path:
return GRAPHS_DIR / (self.xid + ".png")
# TODO might be useful to cache these??
@property
def phases(self) -> List[datetime]:
# TODO make sure they are consistent with emfit?
return [self._fromts(i['time']) for i in get_phases()[self.xid]]
def __str__(self) -> str:
return f"{self.date_.strftime('%a %d %b')} {self.title}"
def __repr__(self) -> str:
return str(self)
def load_sleeps() -> List[SleepEntry]:
sleeps = json.loads(SLEEPS_FILE.read_text())
return [SleepEntry(js) for js in sleeps]
import numpy as np # type: ignore
import matplotlib.pyplot as plt # type: ignore
from matplotlib.figure import Figure # type: ignore
from matplotlib.axes import Axes # type: ignore
# pip install imageio
from imageio import imread # type: ignore
def hhmm(time: datetime):
return time.strftime("%H:%M")
# def xpos(time: datetime) -> float:
# tick = span / width
# fromstart = time - sleep.created
# return fromstart / tick
import matplotlib.dates as mdates # type: ignore
from matplotlib.ticker import MultipleLocator, FixedLocator # type: ignore
def plot_one(sleep: SleepEntry, fig: Figure, axes: Axes, xlims=None, showtext=True):
span = sleep.completed - sleep.created
print(f"{sleep.xid} span: {span}")
img = imread(sleep.graph)
# all of them are 300x300 images apparently
# span for image
xspan = [sleep.created, sleep.completed]
xspan = [mdates.date2num(i) for i in xspan]
if xlims is None:
tt = sleep.created
hour = tt.hour
# TODO maybe assert that hour is somewhere between 20 and 8 or something
start: datetime
starttime = time(23, 00)
if hour >= 20:
# went to bed before midnight
start = datetime.combine(tt.date(), starttime)
elif hour <= 8:
# went to bed after midnight
start = datetime.combine(tt.date() - timedelta(days=1), starttime)
else:
print("wtf??? weird time for sleep...")
# choosing at random
start = datetime.combine(tt.date(), starttime)
end = start + timedelta(hours=10)
xlims = [start, end]
# axes.figure(figsize=(10, 5))
axes.set_xlim(xlims)
hhmm_fmt = mdates.DateFormatter('%H:%M')
axes.xaxis.set_major_formatter(hhmm_fmt)
ticks = sleep.phases if showtext else []
axes.xaxis.set_ticks(ticks)
axes.yaxis.set_ticks([])
axes.tick_params(
axis='both',
which='major',
length=0,
labelsize=7,
rotation=30,
pad=-14, # err... hacky
)
ylims = [0, 50]
axes.set_ylim(ylims)
axes.imshow(
img,
zorder=0,
extent=[
xspan[0], xspan[1],
ylims[0], ylims[1],
],
aspect='auto',
)
# axes.set_title(str(sleep))
# axes.title.set_size(10)
if showtext:
axes.text(xlims[1] - timedelta(hours=1.5), 20, str(sleep),)
# plt.text(sleep.asleep(), 0, hhmm(sleep.asleep()))
from kython import make_dict, group_by_key
def sleeps_by_date() -> Dict[date, SleepEntry]:
logger = get_logger()
sleeps = load_sleeps()
sleeps = [s for s in sleeps if s.graph.exists()] # TODO careful..
res = {}
for dd, group in group_by_key(sleeps, key=lambda s: s.date_).items():
if len(group) == 1:
res[dd] = group[0]
else:
# TODO short ones I can ignore I guess. but won't bother now
logger.error('multiple sleeps on %s: %s', dd, group)
return res
# sleeps_count = 35 # len(sleeps) # apparently MPL fails at 298 with outofmemory or something
# start = 40
# 65 is arount 1 july
# sleeps = sleeps[start: start + sleeps_count]
# sleeps = sleeps[:sleeps_count]
# dt = {k: v for k, v in dt.items() if v is not None}
# TODO not really sure it belongs here...
# import melatonin
# dt = melatonin.get_data()
def predicate(sleep: SleepEntry):
"""
Filter for comparing similar sleep sesssions
"""
start = sleep.created.time()
end = sleep.completed.time()
if (time(23, 0) <= start <= time(23, 30)) and (time(5, 30) <= end <= time(6, 30)):
return True
return False
def plot():
# TODO FIXME melatonin data
melatonin_data = {} # type: ignore[var-annotated]
# TODO ??
sleeps = list(filter(predicate, load_sleeps()))
sleeps_count = len(sleeps)
print(sleeps_count)
fig: Figure = plt.figure(figsize=(15, sleeps_count * 1))
axarr = fig.subplots(nrows=len(sleeps))
for i, (sleep, axes) in enumerate(zip(sleeps, axarr)):
plot_one(sleep, fig, axes, showtext=True)
used = melatonin_data.get(sleep.date_, None)
sused: str
color: str
# used = True if used is None else False # TODO?
if used is True:
sused = "YES"
color = 'green'
elif used is False:
sused = "NO"
color = 'red'
else:
sused = "??"
color = 'white'
axes.text(axes.get_xlim()[0], 20, sused)
axes.patch.set_alpha(0.5)
axes.set_facecolor(color)
plt.tight_layout()
plt.subplots_adjust(hspace=0.0)
# er... this saves with a different aspect ratio for some reason.
# tap 'ctrl-s' on mpl plot window to save..
# plt.savefig('res.png', asp)
plt.show()
import pandas as pd # type: ignore
def get_dataframe():
sleeps = sleeps_by_date()
items = []
for dd, s in sleeps.items():
items.append({
'date' : dd, # TODO not sure... # TODO would also be great to sync column names...
'sleep_start': s.sleep_start,
'sleep_end' : s.sleep_end,
'bed_time' : s.bed_time,
})
# TODO tz is in sleeps json
res = pd.DataFrame(items)
return res
def test_tz():
sleeps = sleeps_by_date()
for s in sleeps.values():
assert s.sleep_start.tzinfo is not None
assert s.sleep_end.tzinfo is not None
for dd, exp in [
(date(year=2015, month=8 , day=28), time(hour=7, minute=20)),
(date(year=2015, month=9 , day=15), time(hour=6, minute=10)),
]:
sleep = sleeps[dd]
end = sleep.sleep_end
assert end.time() == exp
# TODO fuck. on 0909 I woke up at around 6 according to google timeline
# but according to jawbone, it was on 0910?? eh. I guess it's jus shitty tracking.
def main():
# TODO eh. vendorize klogging already?
from kython.klogging import setup_logzero
setup_logzero(get_logger())
test_tz()
# print(get_dataframe())
if __name__ == '__main__':
main()
```
#### File: my/lastfm/__init__.py
```python
from functools import lru_cache
from typing import NamedTuple, Dict, Any
from datetime import datetime
from pathlib import Path
import json
import pytz
from mycfg import paths
# TODO Json type?
# TODO memoised properties?
# TODO lazy mode and eager mode?
# lazy is a bit nicer in terms of more flexibility and less processing?
# eager is a bit more explicit for error handling
class Scrobble(NamedTuple):
raw: Dict[str, Any]
@property
def dt(self) -> datetime:
ts = int(self.raw['date'])
return datetime.fromtimestamp(ts, tz=pytz.utc)
@property
def artist(self) -> str:
return self.raw['artist']
@property
def name(self) -> str:
return self.raw['name']
@property
def track(self) -> str:
return f'{self.artist} — {self.name}'
# TODO __repr__, __str__
# TODO could also be nice to make generic? maybe even depending on eagerness
# TODO memoise...?
# TODO watch out, if we keep the app running it might expire
def _iter_scrobbles():
last = max(Path(paths.lastfm.export_path).glob('*.json'))
# TODO mm, no timezone? hopefuly it's UTC
j = json.loads(last.read_text())
for raw in j:
yield Scrobble(raw=raw)
@lru_cache(1)
def get_scrobbles():
return list(sorted(_iter_scrobbles(), key=lambda s: s.dt))
def test():
assert len(get_scrobbles()) > 1000
```
#### File: my/notes/orgmode.py
```python
from glob import glob
from typing import List, Sequence, Iterator
from pathlib import Path
from ..common import PathIsh
from mycfg import orgmode as config
from porg import Org
# TODO not sure about symlinks?
def _org_files_in(ppp: Path, archived: bool=False) -> Iterator[Path]:
assert ppp.exists(), ppp
# TODO reuse get_files somehow?
if ppp.is_file():
return [ppp]
yield from ppp.rglob('*.org')
if archived:
yield from ppp.rglob('*.org_archive')
def org_files(roots=config.roots, archived: bool=False) -> Iterator[Path]:
for p in config.roots:
yield from _org_files_in(Path(p), archived=archived)
# TODO move to porg?
class PorgAll:
# TODO *roots?
def __init__(self, roots: Sequence[PathIsh]) -> None:
self.files = org_files(roots=roots)
def xpath_all(self, query: str):
return self.query_all(lambda x: x.xpath_all(query))
def get_all(self):
return self.xpath_all('/')
def query_all(self, query):
res: List[Org] = []
for of in self.files:
org = Org.from_file(str(of))
res.extend(query(org))
return res
def query():
return PorgAll(roots=config.roots)
```
#### File: HPI/my/pdfs.py
```python
from concurrent.futures import ProcessPoolExecutor
from datetime import datetime
import re
import sys
import io
import logging
from pathlib import Path
from typing import NamedTuple, List, Optional, Iterator
from contextlib import redirect_stderr
from .common import import_file, mcachew, group_by_key
from .error import Res, split_errors
# path to pdfannots (https://github.com/0xabu/pdfannots)
import mycfg.repos.pdfannots.pdfannots as pdfannots
from mycfg import paths
def get_logger():
return logging.getLogger('my.pdfs')
def is_ignored(p: Path) -> bool:
# ignore some extremely heavy files
return paths.pdfs.is_ignored(p)
def candidates(roots=None) -> Iterator[Path]:
if roots is None:
roots = paths.pdfs.roots
for r in roots:
for p in Path(r).rglob('*.pdf'):
if not is_ignored(p):
yield p
# TODO canonical names
# TODO defensive if pdf was removed, also cachew key needs to be defensive
class Annotation(NamedTuple):
path: str
author: Optional[str]
page: int
highlight: Optional[str]
comment: Optional[str]
date: Optional[datetime]
def as_annotation(*, raw_ann, path: str) -> Annotation:
d = vars(raw_ann)
d['page'] = raw_ann.page.pageno
for a in ('boxes', 'rect'):
if a in d:
del d[a]
dates = d['date']
date: Optional[datetime] = None
if dates is not None:
dates = dates.replace("'", "")
# 20190630213504+0100
dates = re.sub('Z0000$', '+0000', dates)
FMT = '%Y%m%d%H%M%S'
# TODO is it utc if there is not timestamp?
for fmt in [FMT, FMT + '%z']:
try:
date = datetime.strptime(dates, fmt)
break
except ValueError:
pass
else:
# TODO defensive?
raise RuntimeError(dates)
return Annotation(
path = path,
author = d['author'],
page = d['page'],
highlight = d['text'],
comment = d['contents'],
date = date,
)
def get_annots(p: Path) -> List[Annotation]:
with p.open('rb') as fo:
f = io.StringIO()
with redirect_stderr(f):
(annots, outlines) = pdfannots.process_file(fo, emit_progress=False)
# outlines are kinda like TOC, I don't really need them
return [as_annotation(raw_ann=a, path=str(p)) for a in annots]
# TODO stderr?
def hash_files(pdfs: List[Path]):
# if mtime hasn't changed then the file hasn't changed either
return [(pdf, pdf.stat().st_mtime) for pdf in pdfs]
# TODO might make more sense to be more fine grained here, e.g. cache annotations for indifidual files
@mcachew(hashf=hash_files)
def _iter_annotations(pdfs: List[Path]) -> Iterator[Res[Annotation]]:
logger = get_logger()
logger.info('processing %d pdfs', len(pdfs))
# TODO how to print to stdout synchronously?
with ProcessPoolExecutor() as pool:
futures = [
pool.submit(get_annots, pdf)
for pdf in pdfs
]
for f, pdf in zip(futures, pdfs):
try:
yield from f.result()
except Exception as e:
logger.error('While processing %s:', pdf)
logger.exception(e)
# TODO not sure if should attach pdf as well; it's a bit annoying to pass around?
# also really have to think about interaction with cachew...
yield e
def iter_annotations(roots=None) -> Iterator[Res[Annotation]]:
pdfs = list(sorted(candidates(roots=roots)))
yield from _iter_annotations(pdfs=pdfs)
class Pdf(NamedTuple):
path: Path
annotations: List[Annotation]
@property
def date(self):
return self.annotations[-1].date
def annotated_pdfs(roots=None) -> Iterator[Res[Pdf]]:
it = iter_annotations(roots=roots)
vit, eit = split_errors(it, ET=Exception)
for k, g in group_by_key(vit, key=lambda a: a.path).items():
yield Pdf(path=Path(k), annotations=g)
yield from eit
def test():
res = get_annots(Path('/L/zzz_syncthing/TODO/TOREAD/done/mature-optimization_wtf.pdf'))
assert len(res) > 3
def test2():
res = get_annots(Path('/L/zzz_borg/downloads/nonlinear2.pdf'))
print(res)
def test_with_error():
# TODO need example of pdf file...
import tempfile
with tempfile.TemporaryDirectory() as td:
root = Path(td)
g = root / 'garbage.pdf'
g.write_text('garbage')
roots = [
root,
# '/usr/share/doc/texlive-doc/latex/amsrefs/',
]
# TODO find some pdfs that actually has annotations...
annots = list(iter_annotations(roots=roots))
assert len(annots) == 1
assert isinstance(annots[0], Exception)
def main():
from pprint import pprint
logger = get_logger()
from .common import setup_logger
setup_logger(logger, level=logging.DEBUG)
collected = list(annotated_pdfs())
if len(collected) > 0:
for r in collected:
if isinstance(r, Exception):
logger.exception(r)
else:
logger.info('collected annotations in: %s', r.path)
for a in r.annotations:
pprint(a)
```
#### File: HPI/my/reddit.py
```python
from pathlib import Path
from typing import List, Sequence, Mapping, Iterator
from .kython.kompress import CPath
from .common import mcachew, get_files, LazyLogger
from mycfg import paths
import mycfg.repos.rexport.dal as rexport
def get_sources() -> Sequence[Path]:
# TODO use zstd?
# TODO maybe add assert to get_files? (and allow to suppress it)
files = get_files(paths.rexport.export_dir, glob='*.json.xz')
res = list(map(CPath, files)); assert len(res) > 0
return tuple(res)
logger = LazyLogger('my.reddit', level='debug')
Sid = rexport.Sid
Save = rexport.Save
Comment = rexport.Comment
Submission = rexport.Submission
Upvote = rexport.Upvote
def dal():
# TODO lru cache? but be careful when it runs continuously
return rexport.DAL(get_sources())
@mcachew(hashf=lambda: get_sources())
def saved() -> Iterator[Save]:
return dal().saved()
@mcachew(hashf=lambda: get_sources())
def comments() -> Iterator[Comment]:
return dal().comments()
@mcachew(hashf=lambda: get_sources())
def submissions() -> Iterator[Submission]:
return dal().submissions()
@mcachew(hashf=lambda: get_sources())
def upvoted() -> Iterator[Upvote]:
return dal().upvoted()
from typing import Dict, Union, Iterable, Iterator, NamedTuple, Any
from functools import lru_cache
import pytz
import re
from datetime import datetime
from multiprocessing import Pool
# TODO hmm. apparently decompressing takes quite a bit of time...
def reddit(suffix: str) -> str:
return 'https://reddit.com' + suffix
class SaveWithDt(NamedTuple):
save: Save
backup_dt: datetime
def __getattr__(self, x):
return getattr(self.save, x)
# TODO for future events?
EventKind = SaveWithDt
class Event(NamedTuple):
dt: datetime
text: str
kind: EventKind
eid: str
title: str
url: str
@property
def cmp_key(self):
return (self.dt, (1 if 'unfavorited' in self.text else 0))
Url = str
def _get_bdate(bfile: Path) -> datetime:
RE = re.compile(r'reddit-(\d{14})')
match = RE.search(bfile.stem)
assert match is not None
bdt = pytz.utc.localize(datetime.strptime(match.group(1), "%Y%m%d%H%M%S"))
return bdt
def _get_state(bfile: Path) -> Dict[Sid, SaveWithDt]:
logger.debug('handling %s', bfile)
bdt = _get_bdate(bfile)
saves = [SaveWithDt(save, bdt) for save in rexport.DAL([bfile]).saved()]
# TODO FIXME remove kython?
from kython import make_dict
return make_dict(
sorted(saves, key=lambda p: p.save.created),
key=lambda s: s.save.sid,
)
@mcachew('/L/data/.cache/reddit-events.cache')
def _get_events(backups: Sequence[Path]=get_sources(), parallel: bool=True) -> Iterator[Event]:
# TODO cachew: let it transform return type? so you don't have to write a wrapper for lists?
# parallel = False # NOTE: eh, not sure if still necessary? I think glumov didn't like it?
prev_saves: Mapping[Sid, SaveWithDt] = {}
# TODO suppress first batch??
# TODO for initial batch, treat event time as creation time
states: Iterable[Mapping[Sid, SaveWithDt]]
if parallel:
with Pool() as p:
states = p.map(_get_state, backups)
else:
# also make it lazy...
states = map(_get_state, backups)
# TODO mm, need to make that iterative too?
for i, (bfile, saves) in enumerate(zip(backups, states)):
bdt = _get_bdate(bfile)
first = i == 0
for key in set(prev_saves.keys()).symmetric_difference(set(saves.keys())):
ps = prev_saves.get(key, None)
if ps is not None:
# TODO use backup date, that is more precise...
# eh. I guess just take max and it will always be correct?
assert not first
yield Event(
dt=bdt, # TODO average wit ps.save_dt?
text=f"unfavorited",
kind=ps,
eid=f'unf-{ps.sid}',
url=ps.url,
title=ps.title,
)
else: # already in saves
s = saves[key]
last_saved = s.backup_dt
yield Event(
dt=s.created if first else last_saved,
text=f"favorited{' [initial]' if first else ''}",
kind=s,
eid=f'fav-{s.sid}',
url=s.url,
title=s.title,
)
prev_saves = saves
# TODO a bit awkward, favorited should compare lower than unfavorited?
@lru_cache(1)
def get_events(*args, **kwargs) -> List[Event]:
evit = _get_events(*args, **kwargs)
return list(sorted(evit, key=lambda e: e.cmp_key))
def test():
get_events(backups=get_sources()[-1:])
list(saved())
def test_unfav():
events = get_events()
url = 'https://reddit.com/r/QuantifiedSelf/comments/acxy1v/personal_dashboard/'
uevents = [e for e in events if e.url == url]
assert len(uevents) == 2
ff = uevents[0]
assert ff.text == 'favorited'
uf = uevents[1]
assert uf.text == 'unfavorited'
def test_get_all_saves():
# TODO not sure if this is necesasry anymore?
saves = list(saved())
# just check that they are unique..
from kython import make_dict
make_dict(saves, key=lambda s: s.sid)
def test_disappearing():
# eh. so for instance, 'metro line colors' is missing from reddit-20190402005024.json for no reason
# but I guess it was just a short glitch... so whatever
saves = get_events()
favs = [s.kind for s in saves if s.text == 'favorited']
[deal_with_it] = [f for f in favs if f.title == '"Deal with it!"']
assert deal_with_it.backup_dt == datetime(2019, 4, 1, 23, 10, 25, tzinfo=pytz.utc)
def test_unfavorite():
events = get_events()
unfavs = [s for s in events if s.text == 'unfavorited']
[xxx] = [u for u in unfavs if u.eid == 'unf-19ifop']
assert xxx.dt == datetime(2019, 1, 28, 8, 10, 20, tzinfo=pytz.utc)
def main():
# TODO eh. not sure why but parallel on seems to mess glumov up and cause OOM...
events = get_events(parallel=False)
print(len(events))
for e in events:
print(e.text, e.url)
# for e in get_
# 509 with urls..
# for e in get_events():
# print(e)
if __name__ == '__main__':
main()
```
#### File: HPI/my/stackexchange.py
```python
import mycfg.repos.stexport.model as stexport
from mycfg import paths
def get_data():
sources = [max(paths.stexport.export_dir.glob('*.json'))]
return stexport.Model(sources).site_model('stackoverflow')
```
#### File: HPI/my/vk.py
```python
from datetime import datetime
import json
from typing import NamedTuple, Iterator, Dict, Union, Sequence, Optional
from mycfg import paths
class Favorite(NamedTuple):
dt: datetime
title: str
url: Optional[str]
text: str
Res = Union[Favorite, Exception]
skip = (
'graffiti',
'poll',
# TODO could be useful..
'note',
'doc',
'audio',
'photo',
'album',
'video',
'page',
)
def parse_fav(j: Dict) -> Favorite:
# TODO copy_history??
url = None
title = '' # TODO ???
atts = j.get('attachments', [])
for a in atts:
if any(k in a for k in skip):
continue
link = a['link']
title = link['title']
url = link['url']
# TODOlink['description'] ?
# TODO would be nice to include user
return Favorite(
dt=datetime.utcfromtimestamp(j['date']),
title=title,
url=url,
text=j['text'],
)
def _iter_favs() -> Iterator[Res]:
jj = json.loads(paths.vk.favs_file.read_text())
for j in jj:
try:
yield parse_fav(j)
except Exception as e:
ex = RuntimeError(f"Error while processing\n{j}")
ex.__cause__ = e
yield ex
def favorites() -> Sequence[Res]:
it = _iter_favs()
# trick to sort errors along with the actual objects
# TODO wonder if there is a shorter way?
# TODO add to the error handling post?
favs = list(it)
prev = datetime.min
keys = []
for i, f in enumerate(favs):
if not isinstance(f, Exception):
prev = f.dt
keys.append((prev, i)) # include index to resolve ties
sorted_items = [p[1] for p in sorted(zip(keys, favs))]
#
return sorted_items
``` |
{
"source": "jhermann/jmx4py",
"score": 2
} |
#### File: src/tests/test_jolokia_client.py
```python
from __future__ import absolute_import, unicode_literals, print_function
import logging
import unittest
import urllib2
from conftest import JmxMockedConnection, JvmTestCase
from jmx4py.jolokia.connection import JmxHttpConnection
from jmx4py.jolokia.client import * #@UnusedWildImport
log = logging.getLogger(__name__)
class JmxEscapingTest(unittest.TestCase):
# Unescaped and escaped test data
DATA = (
(None, None),
("", ""),
("a", "a"),
("!", "!!"),
("a/b", "a!/b"),
("a/b/c", "a!/b!/c"),
("a!b/c", "a!!b!/c"),
)
def test_quote(self):
for text, quoted in self.DATA:
self.assertEqual(quote(text), quoted)
def test_unquote(self):
for text, quoted in self.DATA:
self.assertEqual(text, unquote(quoted))
def test_unquote_extra(self):
self.assertEqual("ab!/z", unquote("!a!b!!!/!z"))
def test_unquote_trail(self):
self.failUnlessRaises(ValueError, unquote, "!")
self.failUnlessRaises(ValueError, unquote, "!!!")
class JmxClientTest(unittest.TestCase):
def test_client_connection(self):
proxy = JmxClient("mock:")
self.failUnless(isinstance(proxy.connection, JmxMockedConnection))
def test_host_port(self):
proxy = JmxClient(("localhost", "8080"))
self.failUnless(isinstance(proxy.connection, JmxHttpConnection))
self.failUnlessEqual(proxy.connection.url, "http://localhost:8080/jolokia/")
def test_bad_scheme(self):
self.failUnlessRaises(urllib2.URLError, JmxClient, "foobar:")
def test_bad_port(self):
self.failUnlessRaises(urllib2.URLError, JmxClient, ("localhost", "x"))
class JmxClientBasicsTest(JvmTestCase):
def test_repr(self):
self.failUnless("localhost" in repr(self.proxy))
def test_bad_type(self):
self.failUnlessRaises(JmxResponseError, self.proxy._execute, type="foo bar baz")
self.failUnlessEqual(self.proxy.connection.calls, 1)
self.failUnlessEqual(self.proxy.connection.errors, 1)
class JmxClientReadTest(JvmTestCase):
def test_read(self):
resp = self.proxy.read("java.lang:type=Memory")
self.failUnless(all(i in resp.value for i in ["HeapMemoryUsage", "NonHeapMemoryUsage"]))
def test_read_with_path(self):
resp = self.proxy.read("java.lang:type=Memory", "HeapMemoryUsage", "used")
self.failUnless(isinstance(resp.value, int))
def test_multi_read(self):
resp = self.proxy.read("java.lang:type=Memory", ["HeapMemoryUsage", "NonHeapMemoryUsage"])
self.failUnlessEqual(set(resp.value.keys()), set(["HeapMemoryUsage", "NonHeapMemoryUsage"]))
def test_multi_read_with_path(self):
self.failUnlessRaises(JmxResponseError, self.proxy.read,
"java.lang:type=Memory", ["HeapMemoryUsage", "NonHeapMemoryUsage"], "used")
class JmxClientWriteTest(JvmTestCase):
def test_write(self):
pass
#TODO: resp = self.proxy.write("java.lang:type=...", ...)
class JmxClientInvokeTest(JvmTestCase):
def test_invoke(self):
pass # TODO: write test
class JmxClientSearchTest(JvmTestCase):
def test_search(self):
pass # TODO: write test
class JmxClientVersionTest(JvmTestCase):
def test_version(self):
version = self.proxy.version()
self.failUnlessEqual(self.proxy.connection.calls, 1)
self.failUnlessEqual(self.proxy.connection.errors, 0)
self.failUnlessEqual(version["status"], 200)
self.failUnless(isinstance(version["timestamp"], int))
self.failUnlessEqual(version["request"]["type"], "version")
self.failUnless(version.protocol.startswith("6."))
``` |
{
"source": "jhermann/pex",
"score": 2
} |
#### File: pex/pex/interpreter.py
```python
from __future__ import absolute_import
import hashlib
import json
import os
import platform
import re
import subprocess
import sys
from textwrap import dedent
from pex import third_party
from pex.common import safe_rmtree
from pex.compatibility import string
from pex.executor import Executor
from pex.jobs import Job, SpawnedJob, execute_parallel
from pex.third_party.packaging import markers, tags
from pex.third_party.pkg_resources import Distribution, Requirement
from pex.tracer import TRACER
from pex.util import CacheHelper
from pex.variables import ENV
class PythonIdentity(object):
class Error(Exception): pass
class InvalidError(Error): pass
class UnknownRequirement(Error): pass
# TODO(wickman) Support interpreter-specific versions, e.g. PyPy-2.2.1
INTERPRETER_NAME_TO_HASHBANG = {
'CPython': 'python%(major)d.%(minor)d',
'Jython': 'jython',
'PyPy': 'pypy',
'IronPython': 'ipy',
}
ABBR_TO_INTERPRETER_NAME = {
'pp': 'PyPy',
'jy': 'Jython',
'ip': 'IronPython',
'cp': 'CPython',
}
@classmethod
def get(cls):
supported_tags = tuple(tags.sys_tags())
preferred_tag = supported_tags[0]
return cls(
binary=sys.executable,
python_tag=preferred_tag.interpreter,
abi_tag=preferred_tag.abi,
platform_tag=preferred_tag.platform,
version=sys.version_info[:3],
supported_tags=supported_tags,
env_markers=markers.default_environment()
)
@classmethod
def decode(cls, encoded):
TRACER.log('creating PythonIdentity from encoded: %s' % encoded, V=9)
values = json.loads(encoded)
if len(values) != 7:
raise cls.InvalidError("Invalid interpreter identity: %s" % encoded)
supported_tags = values.pop('supported_tags')
def iter_tags():
for (interpreter, abi, platform) in supported_tags:
yield tags.Tag(interpreter=interpreter, abi=abi, platform=platform)
return cls(supported_tags=iter_tags(), **values)
@classmethod
def _find_interpreter_name(cls, python_tag):
for abbr, interpreter in cls.ABBR_TO_INTERPRETER_NAME.items():
if python_tag.startswith(abbr):
return interpreter
raise ValueError('Unknown interpreter: {}'.format(python_tag))
def __init__(
self,
binary,
python_tag,
abi_tag,
platform_tag,
version,
supported_tags,
env_markers
):
# N.B.: We keep this mapping to support historical values for `distribution` and `requirement`
# properties.
self._interpreter_name = self._find_interpreter_name(python_tag)
self._binary = binary
self._python_tag = python_tag
self._abi_tag = abi_tag
self._platform_tag = platform_tag
self._version = tuple(version)
self._supported_tags = tuple(supported_tags)
self._env_markers = dict(env_markers)
def encode(self):
values = dict(
binary=self._binary,
python_tag=self._python_tag,
abi_tag=self._abi_tag,
platform_tag=self._platform_tag,
version=self._version,
supported_tags=[(tag.interpreter, tag.abi, tag.platform) for tag in self._supported_tags],
env_markers=self._env_markers
)
return json.dumps(values)
@property
def binary(self):
return self._binary
@property
def python_tag(self):
return self._python_tag
@property
def abi_tag(self):
return self._abi_tag
@property
def platform_tag(self):
return self._platform_tag
@property
def version(self):
return self._version
@property
def version_str(self):
return '.'.join(map(str, self.version))
@property
def supported_tags(self):
return self._supported_tags
@property
def env_markers(self):
return dict(self._env_markers)
@property
def interpreter(self):
return self._interpreter_name
@property
def requirement(self):
return self.distribution.as_requirement()
@property
def distribution(self):
return Distribution(project_name=self.interpreter, version=self.version_str)
@classmethod
def parse_requirement(cls, requirement, default_interpreter='CPython'):
if isinstance(requirement, Requirement):
return requirement
elif isinstance(requirement, string):
try:
requirement = Requirement.parse(requirement)
except ValueError:
try:
requirement = Requirement.parse('%s%s' % (default_interpreter, requirement))
except ValueError:
raise ValueError('Unknown requirement string: %s' % requirement)
return requirement
else:
raise ValueError('Unknown requirement type: %r' % (requirement,))
def matches(self, requirement):
"""Given a Requirement, check if this interpreter matches."""
try:
requirement = self.parse_requirement(requirement, self._interpreter_name)
except ValueError as e:
raise self.UnknownRequirement(str(e))
return self.distribution in requirement
def hashbang(self):
hashbang_string = self.INTERPRETER_NAME_TO_HASHBANG.get(self._interpreter_name, 'CPython') % {
'major': self._version[0],
'minor': self._version[1],
'patch': self._version[2],
}
return '#!/usr/bin/env %s' % hashbang_string
@property
def python(self):
# return the python version in the format of the 'python' key for distributions
# specifically, '2.7', '3.2', etc.
return '%d.%d' % (self.version[0:2])
def __str__(self):
# N.B.: Kept as distinct from __repr__ to support legacy str(identity) used by Pants v1 when
# forming cache locations.
return '{interpreter_name}-{major}.{minor}.{patch}'.format(
interpreter_name=self._interpreter_name,
major=self._version[0],
minor=self._version[1],
patch=self._version[2]
)
def __repr__(self):
return '{type}({binary!r}, {python_tag!r}, {abi_tag!r}, {platform_tag!r}, {version!r})'.format(
type=self.__class__.__name__,
binary=self._binary,
python_tag=self._python_tag,
abi_tag=self._abi_tag,
platform_tag=self._platform_tag,
version=self._version
)
def _tup(self):
return self._binary, self._python_tag, self._abi_tag, self._platform_tag, self._version
def __eq__(self, other):
if type(other) is not type(self):
return NotImplemented
return self._tup() == other._tup()
def __hash__(self):
return hash(self._tup())
class PythonInterpreter(object):
_REGEXEN = (
re.compile(r'jython$'),
# NB: OSX ships python binaries named Python so we allow for capital-P.
re.compile(r'[Pp]ython$'),
re.compile(r'python[23]$'),
re.compile(r'python[23].[0-9]$'),
# Some distributions include a suffix on the in the interpreter name, similar to PEP-3149
# E.g. Gentoo has /usr/bin/python3.6m to indicate it was built with pymalloc
re.compile(r'python[23].[0-9][a-z]$'),
re.compile(r'pypy$'),
re.compile(r'pypy-1.[0-9]$'),
)
_PYTHON_INTERPRETER_BY_NORMALIZED_PATH = {}
@staticmethod
def _normalize_path(path):
return os.path.realpath(path)
class Error(Exception): pass
class IdentificationError(Error): pass
class InterpreterNotFound(Error): pass
@classmethod
def get(cls):
return cls.from_binary(sys.executable)
@staticmethod
def _paths(paths=None):
return paths or os.getenv('PATH', '').split(os.pathsep)
@classmethod
def iter(cls, paths=None):
"""Iterate all interpreters found in `paths`.
NB: The paths can either be directories to search for python binaries or the paths of python
binaries themselves.
:param paths: The paths to look for python interpreters; by default the `PATH`.
:type paths: list str
"""
return cls._filter(cls._find(cls._paths(paths=paths)))
@classmethod
def all(cls, paths=None):
return list(cls.iter(paths=paths))
@classmethod
def _create_isolated_cmd(cls, binary, args=None, pythonpath=None, env=None):
cmd = [binary]
# Don't add the user site directory to `sys.path`.
#
# Additionally, it would be nice to pass `-S` to disable adding site-packages but unfortunately
# some python distributions include portions of the standard library there.
cmd.append('-s')
env = cls._sanitized_environment(env=env)
pythonpath = list(pythonpath or ())
if pythonpath:
env['PYTHONPATH'] = os.pathsep.join(pythonpath)
else:
# Turn off reading of PYTHON* environment variables.
cmd.append('-E')
if args:
cmd.extend(args)
rendered_command = ' '.join(cmd)
if pythonpath:
rendered_command = 'PYTHONPATH={} {}'.format(env['PYTHONPATH'], rendered_command)
TRACER.log('Executing: {}'.format(rendered_command), V=3)
return cmd, env
@classmethod
def _execute(cls, binary, args=None, pythonpath=None, env=None, stdin_payload=None, **kwargs):
cmd, env = cls._create_isolated_cmd(binary, args=args, pythonpath=pythonpath, env=env)
stdout, stderr = Executor.execute(cmd, stdin_payload=stdin_payload, env=env, **kwargs)
return cmd, stdout, stderr
INTERP_INFO_FILE = 'INTERP-INFO'
@classmethod
def clear_cache(cls):
interpreter_cache_dir = os.path.join(ENV.PEX_ROOT, 'interpreters')
safe_rmtree(interpreter_cache_dir)
@classmethod
def _spawn_from_binary_external(cls, binary):
def create_interpreter(stdout):
identity = stdout.decode('utf-8').strip()
if not identity:
raise cls.IdentificationError('Could not establish identity of %s' % binary)
return cls(PythonIdentity.decode(identity))
# Part of the PythonInterpreter data are environment markers that depend on the current OS
# release. That data can change when the OS is upgraded but (some of) the installed interpreters
# remain the same. As such, include the OS in the hash structure for cached interpreters.
os_digest = hashlib.sha1()
for os_identifier in platform.release(), platform.version():
os_digest.update(os_identifier.encode('utf-8'))
os_hash = os_digest.hexdigest()
interpreter_cache_dir = os.path.join(ENV.PEX_ROOT, 'interpreters')
os_cache_dir = os.path.join(interpreter_cache_dir, os_hash)
if os.path.isdir(interpreter_cache_dir) and not os.path.isdir(os_cache_dir):
with TRACER.timed('GCing interpreter cache from prior OS version'):
safe_rmtree(interpreter_cache_dir)
interpreter_hash = CacheHelper.hash(binary)
cache_dir = os.path.join(os_cache_dir, interpreter_hash)
cache_file = os.path.join(cache_dir, cls.INTERP_INFO_FILE)
if os.path.isfile(cache_file):
try:
with open(cache_file, 'rb') as fp:
return SpawnedJob.completed(create_interpreter(fp.read()))
except (IOError, OSError, cls.Error, PythonIdentity.Error):
safe_rmtree(cache_dir)
return cls._spawn_from_binary_external(binary)
else:
pythonpath = third_party.expose(['pex'])
cmd, env = cls._create_isolated_cmd(
binary,
args=[
'-c',
dedent("""\
import os
import sys
from pex.common import atomic_directory, safe_open
from pex.interpreter import PythonIdentity
encoded_identity = PythonIdentity.get().encode()
sys.stdout.write(encoded_identity)
with atomic_directory({cache_dir!r}) as cache_dir:
if cache_dir:
with safe_open(os.path.join(cache_dir, {info_file!r}), 'w') as fp:
fp.write(encoded_identity)
""".format(cache_dir=cache_dir, info_file=cls.INTERP_INFO_FILE))
],
pythonpath=pythonpath
)
process = Executor.open_process(cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
job = Job(command=cmd, process=process)
return SpawnedJob.stdout(job, result_func=create_interpreter)
@classmethod
def _expand_path(cls, path):
if os.path.isfile(path):
return [path]
elif os.path.isdir(path):
return sorted(os.path.join(path, fn) for fn in os.listdir(path))
return []
@classmethod
def from_env(cls, hashbang):
"""Resolve a PythonInterpreter as /usr/bin/env would.
:param hashbang: A string, e.g. "python3.3" representing some binary on the $PATH.
:return: the first matching interpreter found or `None`.
:rtype: :class:`PythonInterpreter`
"""
def hashbang_matches(fn):
basefile = os.path.basename(fn)
return hashbang == basefile
for interpreter in cls._identify_interpreters(filter=hashbang_matches):
return interpreter
@classmethod
def _spawn_from_binary(cls, binary):
normalized_binary = cls._normalize_path(binary)
if not os.path.exists(normalized_binary):
raise cls.InterpreterNotFound(normalized_binary)
# N.B.: The cache is written as the last step in PythonInterpreter instance initialization.
cached_interpreter = cls._PYTHON_INTERPRETER_BY_NORMALIZED_PATH.get(normalized_binary)
if cached_interpreter is not None:
return SpawnedJob.completed(cached_interpreter)
if normalized_binary == cls._normalize_path(sys.executable):
current_interpreter = cls(PythonIdentity.get())
return SpawnedJob.completed(current_interpreter)
return cls._spawn_from_binary_external(normalized_binary)
@classmethod
def from_binary(cls, binary):
"""Create an interpreter from the given `binary`.
:param str binary: The path to the python interpreter binary.
:return: an interpreter created from the given `binary`.
:rtype: :class:`PythonInterpreter`
"""
return cls._spawn_from_binary(binary).await_result()
@classmethod
def _matches_binary_name(cls, path):
basefile = os.path.basename(path)
return any(matcher.match(basefile) is not None for matcher in cls._REGEXEN)
@classmethod
def _find(cls, paths):
"""Given a list of files or directories, try to detect python interpreters amongst them.
Returns an iterator over PythonInterpreter objects.
"""
return cls._identify_interpreters(filter=cls._matches_binary_name, paths=paths)
@classmethod
def _identify_interpreters(cls, filter, paths=None):
def iter_candidates():
for path in cls._paths(paths=paths):
for fn in cls._expand_path(path):
if filter(fn):
yield fn
return execute_parallel(inputs=list(iter_candidates()), spawn_func=cls._spawn_from_binary)
@classmethod
def _filter(cls, pythons):
"""Filters duplicate python interpreters and versions we don't support.
Returns an iterator over PythonInterpreters.
"""
MAJOR, MINOR, SUBMINOR = range(3)
def version_filter(version):
return (version[MAJOR] == 2 and version[MINOR] >= 7 or
version[MAJOR] == 3 and version[MINOR] >= 5)
seen = set()
for interp in pythons:
version = interp.identity.version
if version not in seen and version_filter(version):
seen.add(version)
yield interp
@classmethod
def _sanitized_environment(cls, env=None):
# N.B. This is merely a hack because sysconfig.py on the default OS X
# installation of 2.7 breaks.
env_copy = (env or os.environ).copy()
env_copy.pop('MACOSX_DEPLOYMENT_TARGET', None)
return env_copy
def __init__(self, identity):
"""Construct a PythonInterpreter.
You should probably use `PythonInterpreter.from_binary` instead.
:param identity: The :class:`PythonIdentity` of the PythonInterpreter.
"""
self._identity = identity
self._binary = self._normalize_path(self.identity.binary)
self._PYTHON_INTERPRETER_BY_NORMALIZED_PATH[self._binary] = self
@property
def binary(self):
return self._binary
@property
def identity(self):
return self._identity
@property
def python(self):
return self._identity.python
@property
def version(self):
return self._identity.version
@property
def version_string(self):
return str(self._identity)
def execute(self, args=None, stdin_payload=None, pythonpath=None, env=None, **kwargs):
return self._execute(self.binary,
args=args,
stdin_payload=stdin_payload,
pythonpath=pythonpath,
env=env,
**kwargs)
def open_process(self, args=None, pythonpath=None, env=None, **kwargs):
cmd, env = self._create_isolated_cmd(self.binary, args=args, pythonpath=pythonpath, env=env)
process = Executor.open_process(cmd, env=env, **kwargs)
return cmd, process
def _tup(self):
return self._binary, self._identity
def __hash__(self):
return hash(self._tup())
def __eq__(self, other):
if type(other) is not type(self):
return NotImplemented
return self._tup() == other._tup()
def __lt__(self, other):
if type(other) is not type(self):
return NotImplemented
return self.version < other.version
def __repr__(self):
return '{type}({binary!r}, {identity!r})'.format(
type=self.__class__.__name__,
binary=self._binary,
identity=self._identity
)
def spawn_python_job(
args,
env=None,
interpreter=None,
expose=None,
pythonpath=None,
**subprocess_kwargs
):
"""Spawns a python job.
:param args: The arguments to pass to the python interpreter.
:type args: list of str
:param env: The environment to spawn the python interpreter process in. Defaults to the ambient
environment.
:type env: dict of (str, str)
:param interpreter: The interpreter to use to spawn the python job. Defaults to the current
interpreter.
:type interpreter: :class:`PythonInterpreter`
:param expose: The names of any vendored distributions to expose to the spawned python process.
These will be appended to `pythonpath` if passed.
:type expose: list of str
:param pythonpath: The PYTHONPATH to expose to the spawned python process. These will be
pre-pended to the `expose` path if passed.
:type pythonpath: list of str
:param subprocess_kwargs: Any additional :class:`subprocess.Popen` kwargs to pass through.
:returns: A job handle to the spawned python process.
:rtype: :class:`Job`
"""
pythonpath = list(pythonpath or ())
if expose:
subprocess_env = (env or os.environ).copy()
# In order to expose vendored distributions with their un-vendored import paths in-tact, we
# need to set `__PEX_UNVENDORED__`. See: vendor.__main__.ImportRewriter._modify_import.
subprocess_env['__PEX_UNVENDORED__'] = '1'
pythonpath.extend(third_party.expose(expose))
else:
subprocess_env = env
interpreter = interpreter or PythonInterpreter.get()
cmd, process = interpreter.open_process(
args=args,
pythonpath=pythonpath,
env=subprocess_env,
**subprocess_kwargs
)
return Job(command=cmd, process=process)
``` |
{
"source": "jhermann/pip-upgrader",
"score": 2
} |
#### File: pip-upgrader/tests/test_cli.py
```python
from __future__ import unicode_literals
from subprocess import PIPE, Popen as popen
from unittest import TestCase
import responses
from packaging.utils import canonicalize_name
from pip_upgrader import __version__ as VERSION
from pip_upgrader import cli
from pip_upgrader.packages_status_detector import PackagesStatusDetector
try:
from unittest.mock import patch
except ImportError:
from mock import patch
try:
from io import StringIO
except ImportError: # pragma: nocover
from cStringIO import StringIO
class TestHelp(TestCase):
def test_returns_usage_information(self):
output = popen(['pip-upgrade', '-h'], stdout=PIPE).communicate()[0]
self.assertTrue('Usage:' in output.decode('utf-8'))
output = popen(['pip-upgrade', '--help'], stdout=PIPE).communicate()[0]
self.assertTrue('Usage:' in output.decode('utf-8'))
class TestVersion(TestCase):
def test_returns_version_information(self):
output = popen(['pip-upgrade', '--version'], stdout=PIPE).communicate()[0]
self.assertEqual(output.strip().decode('utf-8'), VERSION)
@patch('pip_upgrader.packages_interactive_selector.user_input', return_value='all')
@patch('pip_upgrader.virtualenv_checker.is_virtualenv', return_value=True)
class TestCommand(TestCase):
def _add_responses_mocks(self):
for package in ['Django', 'celery', 'django-rest-auth', 'ipython']:
with open('tests/fixtures/{}.json'.format(package)) as fh:
body = fh.read()
responses.add(responses.GET,
"https://pypi.python.org/pypi/{}/json".format(package),
body=body,
content_type="application/json")
with open('tests/fixtures/{}.html'.format(canonicalize_name(package))) as fh:
body_html = fh.read()
responses.add(responses.GET,
"https://pypi.python.org/simple/{}".format(canonicalize_name(package)),
body=body_html)
def setUp(self):
self._add_responses_mocks()
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '-p': []})
def test_command_basic_usage(self, options_mock, is_virtualenv_mock, user_input_mock):
with patch('sys.stdout', new_callable=StringIO) as stdout_mock:
cli.main()
output = stdout_mock.getvalue()
self.assertTrue(user_input_mock.called)
self.assertIn('Available upgrades', output)
self.assertIn('ipython ... up to date', output)
self.assertIn('django-rest-auth ... upgrade available: 0.9.0 ==>', output)
self.assertNotIn('ipdb', output)
self.assertIn('Successfully upgraded', output)
self.assertIn('this was a simulation using --dry-run', output)
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '-p': []})
def test_command_simple_html_index_url(self, options_mock, is_virtualenv_mock, user_input_mock):
with patch('sys.stdout', new_callable=StringIO) as stdout_mock, \
patch('pip_upgrader.packages_status_detector.PackagesStatusDetector.pip_config_locations',
new=PackagesStatusDetector.pip_config_locations + ['pip.test.conf']):
cli.main()
output = stdout_mock.getvalue()
self.assertTrue(user_input_mock.called)
# checks if new index-url was discovered from config file
self.assertIn('Setting API url', output)
self.assertIn('https://pypi.python.org/simple/{package}', output)
self.assertIn('Available upgrades', output)
self.assertIn('ipython ... up to date', output)
self.assertIn('django-rest-auth ... upgrade available: 0.9.0 ==>', output)
self.assertNotIn('ipdb', output)
self.assertIn('Successfully upgraded', output)
self.assertIn('this was a simulation using --dry-run', output)
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '-p': []})
@patch.dict('os.environ', {'PIP_INDEX_URL': 'https://pypi.python.org/simple/'})
def test_command_pip_index_url_environ(self, options_mock, is_virtualenv_mock, user_input_mock):
with patch('sys.stdout', new_callable=StringIO) as stdout_mock:
cli.main()
output = stdout_mock.getvalue()
self.assertTrue(user_input_mock.called)
# checks if new index-url was discovered from config file
self.assertIn('Setting API url', output)
self.assertIn('https://pypi.python.org/simple/{package}', output)
self.assertIn('Available upgrades', output)
self.assertIn('ipython ... up to date', output)
self.assertIn('django-rest-auth ... upgrade available: 0.9.0 ==>', output)
self.assertNotIn('ipdb', output)
self.assertIn('Successfully upgraded', output)
self.assertIn('this was a simulation using --dry-run', output)
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '-p': [], '--use-default-index': True})
def test_command__use_default_index(self, options_mock, is_virtualenv_mock, user_input_mock):
with patch('sys.stdout', new_callable=StringIO) as stdout_mock, \
patch('pip_upgrader.packages_status_detector.PackagesStatusDetector.pip_config_locations',
new=PackagesStatusDetector.pip_config_locations + ['pip.test.conf']):
cli.main()
output = stdout_mock.getvalue()
# checks if new index-url was discovered from config file
self.assertNotIn('Setting API url', output)
self.assertIn('Successfully upgraded', output)
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '-p': []})
def test_command_interactive_bad_choices(self, options_mock, is_virtualenv_mock, user_input_mock):
user_input_mock.return_value = ''
with patch('sys.stdout', new_callable=StringIO) as stdout_mock:
cli.main()
output = stdout_mock.getvalue()
self.assertTrue(user_input_mock.called)
self.assertIn('No choice selected', output)
self.assertNotIn('Setting API url', output)
user_input_mock.return_value = '5 6 7'
with patch('sys.stdout', new_callable=StringIO) as stdout_mock:
cli.main()
output = stdout_mock.getvalue()
self.assertTrue(user_input_mock.called)
self.assertIn('No valid choice selected.', output)
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '-p': ['all']})
def test_command_not_interactive_all_packages(self, options_mock, is_virtualenv_mock, user_input_mock):
with patch('sys.stdout', new_callable=StringIO) as stdout_mock:
cli.main()
output = stdout_mock.getvalue()
# no user_input should be called
self.assertFalse(user_input_mock.called)
self.assertNotIn('Setting API url', output)
self.assertNotIn('Available upgrades', output)
self.assertIn('Django ... upgrade available: 1.10 ==>', output)
self.assertIn('django-rest-auth ... upgrade available: 0.9.0 ==>', output)
self.assertIn('ipython ... up to date', output)
self.assertNotIn('ipdb', output)
self.assertIn('celery ... upgrade available: 3.1.1 ==>', output)
self.assertIn('Successfully upgraded', output)
self.assertIn('this was a simulation using --dry-run', output)
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '-p': ['django', 'bad_package']})
def test_command_not_interactive_specific_package(self, options_mock, is_virtualenv_mock, user_input_mock):
with patch('sys.stdout', new_callable=StringIO) as stdout_mock:
cli.main()
output = stdout_mock.getvalue()
# no user_input should be called
self.assertFalse(user_input_mock.called)
self.assertNotIn('Setting API url', output)
self.assertIn('Django ... upgrade available: 1.10 ==>', output)
self.assertNotIn('django-rest-auth', output)
self.assertNotIn('ipython ... up to date', output)
self.assertNotIn('ipdb', output)
self.assertNotIn('celery ... upgrade available: 3.1.1 ==>', output)
self.assertIn('Successfully upgraded', output)
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '-p': ['ipython']})
def test_command_not_interactive_all_packages_up_to_date(self, options_mock, is_virtualenv_mock, user_input_mock):
with patch('sys.stdout', new_callable=StringIO) as stdout_mock:
cli.main()
output = stdout_mock.getvalue()
# no user_input should be called
self.assertFalse(user_input_mock.called)
self.assertNotIn('Setting API url', output)
self.assertIn('All packages are up-to-date.', output)
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '-p': ['all'],
'<requirements_file>': ['requirements/production.txt']})
def test_command_not_interactive_explicit_requirements(self, options_mock, is_virtualenv_mock, user_input_mock):
with patch('sys.stdout', new_callable=StringIO) as stdout_mock:
cli.main()
output = stdout_mock.getvalue()
# no user_input should be called
self.assertFalse(user_input_mock.called)
self.assertNotIn('Setting API url', output)
self.assertNotIn('Django ... upgrade available: 1.10 ==>', output)
self.assertNotIn('django-rest-auth', output)
self.assertNotIn('ipython ... up to date', output)
self.assertNotIn('ipdb', output)
self.assertIn('celery ... upgrade available: 3.1.1 ==>', output)
self.assertIn('Successfully upgraded', output)
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '-p': ['all'],
'<requirements_file>': ['requirements/local.txt']})
def test_command_not_recursive_requirements_include(self, options_mock, is_virtualenv_mock, user_input_mock):
with patch('sys.stdout', new_callable=StringIO) as stdout_mock:
cli.main()
output = stdout_mock.getvalue()
# no user_input should be called
self.assertFalse(user_input_mock.called)
self.assertIn('celery ... upgrade available: 3.1.1 ==>', output)
self.assertIn('requirements/local.txt', output)
self.assertIn('requirements/production.txt', output)
self.assertIn('requirements/extra/debug.txt', output)
self.assertIn('requirements/extra/debug2.txt', output)
self.assertNotIn('requirements/extra/bad_file.txt', output)
self.assertIn('Successfully upgraded', output)
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '-p': ['django'], '--prerelease': True})
def test_command_not_specific_package_prerelease(self, options_mock, is_virtualenv_mock, user_input_mock):
with patch('sys.stdout', new_callable=StringIO) as stdout_mock:
cli.main()
output = stdout_mock.getvalue()
# no user_input should be called
self.assertFalse(user_input_mock.called)
self.assertNotIn('Setting API url', output)
self.assertIn('Django ... upgrade available: 1.10 ==> 1.11rc1', output)
self.assertNotIn('django-rest-auth', output)
self.assertNotIn('ipython ... up to date', output)
self.assertNotIn('ipdb', output)
self.assertNotIn('celery ... upgrade available: 3.1.1 ==>', output)
self.assertIn('Successfully upgraded', output)
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '-p': ['django'], '--prerelease': True})
def test_command_not_specific_package_prerelease_html_api(self, options_mock, is_virtualenv_mock, user_input_mock):
with patch('sys.stdout', new_callable=StringIO) as stdout_mock, \
patch('pip_upgrader.packages_status_detector.PackagesStatusDetector.pip_config_locations',
new=PackagesStatusDetector.pip_config_locations + ['pip.test.conf']):
cli.main()
output = stdout_mock.getvalue()
# no user_input should be called
self.assertFalse(user_input_mock.called)
self.assertIn('Setting API url', output)
self.assertIn('Django ... upgrade available: 1.10 ==> 1.11rc1', output)
self.assertNotIn('django-rest-auth', output)
self.assertNotIn('ipython ... up to date', output)
self.assertNotIn('ipdb', output)
self.assertNotIn('celery ... upgrade available: 3.1.1 ==>', output)
self.assertIn('Successfully upgraded', output)
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '--skip-virtualenv-check': False,
'-p': ['django']})
def test_command_not_interactive_not_virtualenv(self, options_mock, is_virtualenv_mock, user_input_mock):
is_virtualenv_mock.return_value = False
with patch('sys.stdout', new_callable=StringIO) as stdout_mock:
cli.main()
output = stdout_mock.getvalue()
self.assertNotIn('Setting API url', output)
self.assertIn("It seems you haven't activated a virtualenv", output)
self.assertNotIn('Successfully upgraded', output)
@responses.activate
@patch('pip_upgrader.cli.get_options', return_value={'--dry-run': True, '--skip-virtualenv-check': True,
'-p': ['django']})
def test_command_not_interactive_not_virtualenv_skip(self, options_mock, is_virtualenv_mock, user_input_mock):
is_virtualenv_mock.return_value = False
with patch('sys.stdout', new_callable=StringIO) as stdout_mock:
cli.main()
output = stdout_mock.getvalue()
self.assertFalse(user_input_mock.called)
self.assertNotIn('Setting API url', output)
self.assertIn('Django ... upgrade available: 1.10 ==>', output)
self.assertNotIn('django-rest-auth', output)
self.assertNotIn('ipython ... up to date', output)
self.assertNotIn('ipdb', output)
self.assertNotIn('celery ... upgrade available: 3.1.1 ==>', output)
self.assertIn('Successfully upgraded', output)
``` |
{
"source": "jhermann/pipx",
"score": 2
} |
#### File: jhermann/pipx/setup.py
```python
from setuptools import setup, find_packages
import sys, os
import json
# https://docs.python.org/2/distutils/setupscript.html
def read_file(filename):
f = open(filename)
data = f.read()
f.close
return data
kwargs = {
"keywords": "pip",
"version": "0.1.1",
"packages": find_packages(exclude=["tests"]),
"url": "https://github.com/janakitech/pipx",
"entry_points": {
"console_scripts": [
"pipx=pipx:main",
"px=pipx:main",
]
},
"description": "pip extended",
"install_requires": json.loads(read_file("project.json"))["dependencies"],
"long_description": read_file("readme.md"),
"name": "pipx",
"license": read_file("license.txt"),
"author_email": "<EMAIL>",
"author": "ludbek",
"include_package_data": True
}
setup(**kwargs)
``` |
{
"source": "jhermann/rudiments",
"score": 2
} |
#### File: src/tests/test_security.py
```python
from __future__ import absolute_import, unicode_literals, print_function
import os
import getpass
# TODO: use mocker.call from upcoming "pytest-mock" release
try:
from unittest.mock import call
except ImportError:
from mock import call
import pytest
from rudiments.security import *
NETRC_TEST_CASES = [
('http://foo.example.com', 'john', 'doe'),
('http://[email protected]', 'jane', 'snafu'),
('https://foo.example.com', 'john', 'doe'),
('ftp://foo.example.com', 'john', 'doe'),
('ftps://foo.example.com', 'john', 'doe'),
]
def test_credentials_lookup_from_netrc_is_optional(mocker):
mocker.patch('getpass.getpass', return_value='sesame')
netrc_call = mocker.patch('rudiments.security.netrc')
netrc_call.side_effect = IOError(2, "not found", "netrc")
access = Credentials('http://[email protected]')
auth = access.auth_pair()
assert access.source == 'console'
with pytest.raises(IOError):
netrc_call.side_effect = IOError(13, "cannot open", "netrc")
access = Credentials('http://[email protected]')
auth = access.auth_pair()
@pytest.mark.parametrize('target', (None, ''))
def test_credentials_lookup_for_empty_target(target):
access = Credentials(target)
with pytest.raises(ValueError):
auth = access.auth_pair()
def test_credentials_lookup_for_non_url_target(mocker):
console_input = mocker.patch(__name__ + '.Credentials._raw_input', return_value='')
mocker.patch('getpass.getpass', return_value='sesame')
access = Credentials('some custom target')
auth = access.auth_pair()
console_input.assert_called_once()
assert auth == (getpass.getuser(), 'sesame')
assert access.source == 'console'
def test_credentials_lookup_from_console(mocker):
console_input = mocker.patch(__name__ + '.Credentials._raw_input', return_value='')
mocker.patch('getpass.getpass', return_value='sesame')
access = Credentials('http://console.example.com')
console_input.assert_not_called()
auth = access.auth_pair()
console_input.assert_called_once()
assert auth == (getpass.getuser(), 'sesame')
assert Credentials.AUTH_MEMOIZE_INPUT['http://console.example.com'] == access.auth_pair()
# test memoization explicitly
access = Credentials('http://console.example.com')
auth = access.auth_pair()
console_input.assert_called_once()
# test with other realm
access = Credentials('http://terminal.example.com')
auth = access.auth_pair()
assert console_input.call_count == 2
assert 'http://terminal.example.com' in Credentials.AUTH_MEMOIZE_INPUT
assert access.source == 'console'
def test_credentials_lookup_from_url():
access = Credentials('http://jane:<EMAIL>@<EMAIL>')
assert access.auth_pair() == ('jane', 'bar')
assert access.source == 'url'
@pytest.mark.parametrize('url, name, pwd', NETRC_TEST_CASES)
def test_credentials_lookup_from_netrc(datadir, url, name, pwd):
Credentials.NETRC_FILE = os.path.join(datadir, 'netrc')
try:
access = Credentials(url)
assert not access.auth_valid(), "Should be False"
pair = access.auth_pair()
assert name == pair[0], "Wrong username"
assert pwd == pair[1], "Wrong password"
assert access.auth_valid(), "Should be True"
assert access.source == 'netrc'
finally:
Credentials.NETRC_FILE = None
def test_credentials_lookup_from_keyring(mocker):
from rudiments import security
if security.keyring:
url = 'http://<EMAIL>'
get_pwd = mocker.patch('keyring.get_password', return_value='round')
access = Credentials(url)
assert access.auth_pair() == ('jane', 'round')
get_pwd.assert_called_once_with(url, 'jane')
assert access.source == 'keyring'
```
#### File: src/tests/test_www.py
```python
from __future__ import absolute_import, unicode_literals, print_function
import os
from contextlib import contextmanager
# import pytest
import responses
from rudiments.www import *
class URLAsFileTests():
URL = 'http://example.com/index.html'
BODY = 'Hi there!'
@contextmanager
def index_html(self):
mock = responses.RequestsMock()
mock.start()
try:
mock.add(mock.GET, self.URL, status=200, content_type='text/plain', body=self.BODY)
yield mock
finally:
mock.stop()
mock.reset()
def test_url_as_file_works(self):
with self.index_html():
with url_as_file(self.URL, ext='html') as filename:
assert os.path.getsize(filename) == len(self.BODY)
assert 'example.com' in filename
assert filename.endswith('.html')
def test_url_as_file_cleanup_survives_file_deletion(self):
with self.index_html():
with url_as_file(self.URL) as filename:
os.remove(filename)
# if the context manager now raises, pytest will fail this
def test_url_as_file_without_extension(self):
with self.index_html():
with url_as_file(self.URL) as filename:
assert '.' not in os.path.basename(filename).replace('.com', '')
``` |
{
"source": "jhermann/scanapi",
"score": 3
} |
#### File: scanapi/tree/testing_node.py
```python
import logging
from scanapi.session import session
from scanapi.test_status import TestStatus
from scanapi.tree.tree_keys import ASSERT_KEY, NAME_KEY
from scanapi.utils import validate_keys
logger = logging.getLogger(__name__)
class TestingNode:
__test__ = False
SCOPE = "test"
ALLOWED_KEYS = (ASSERT_KEY, NAME_KEY)
REQUIRED_KEYS = (NAME_KEY, ASSERT_KEY)
def __init__(self, spec, request):
self.spec = spec
self.request = request
self._validate()
def __getitem__(self, item):
return self.spec[item]
@property
def name(self):
return self[NAME_KEY]
@property
def assertion(self):
return self[ASSERT_KEY]
@property
def full_name(self):
return f"{self.request.endpoint.name}::{self.request.name}::{self.name}"
def run(self):
try:
passed, failure = self.request.endpoint.vars.evaluate_assertion(
self.assertion
)
status = TestStatus.PASSED if passed else TestStatus.FAILED
error = None
except Exception as e:
status = TestStatus.ERROR
failure = None
error = str(e)
self._process_result(status)
self._log_result(status, failure)
return {
"name": self.full_name,
"status": status,
"failure": failure,
"error": error,
}
@staticmethod
def _process_result(status):
"""Increment the number of session errors/failures/successes
depending on the test status.
Args:
status [string]: the status of the test: passed, failed or error.
"""
if status == TestStatus.ERROR:
session.increment_errors()
return
if status == TestStatus.FAILED:
session.increment_failures()
return
if status == TestStatus.PASSED:
session.increment_successes()
def _log_result(self, status, failure):
logger.debug("\a [%s] %s", status.upper(), self.full_name)
if failure:
logger.debug("\t %s is false", failure)
def _validate(self):
validate_keys(
self.spec.keys(), self.ALLOWED_KEYS, self.REQUIRED_KEYS, self.SCOPE
)
``` |
{
"source": "jhermann/shiv",
"score": 2
} |
#### File: shiv/test/conftest.py
```python
import os
from pathlib import Path
import pytest
from shiv.bootstrap.environment import Environment
@pytest.fixture
def zip_location():
return Path(__file__).absolute().parent / "test.zip"
@pytest.fixture(params=[True, False], ids=[".", "absolute-path"])
def package_location(request):
package_location = Path(__file__).absolute().parent / "package"
if request.param is True:
# test building from the current directory
cwd = os.getcwd()
os.chdir(package_location)
yield Path(".")
os.chdir(cwd)
else:
# test building an absolute path
yield package_location
@pytest.fixture
def sp():
return [Path(__file__).absolute().parent / 'sp' / 'site-packages']
@pytest.fixture
def env():
return Environment(
built_at=str("2019-01-01 12:12:12"),
build_id=str("test_id"),
entry_point="test_entry_point",
script="test_console_script",
compile_pyc=False,
extend_pythonpath=False,
shiv_version="0.0.1",
)
``` |
{
"source": "jhermann/tablemate",
"score": 2
} |
#### File: tablemate/util/dclick.py
```python
from __future__ import absolute_import, unicode_literals, print_function
import os
import re
import click
def pretty_path(path, _home_re=re.compile('^' + re.escape(os.path.expanduser('~') + os.sep))):
"""Prettify path for humans, and make it Unicode."""
path = click.format_filename(path)
path = _home_re.sub('~' + os.sep, path)
return path
def serror(message, *args, **kwargs):
"""Print a styled error message."""
if args or kwargs:
message = message.format(*args, **kwargs)
return click.secho(message, fg='white', bg='red', bold=True)
class LoggedFailure(click.UsageError):
"""Report a failure condition to the user."""
def __init__(self, message):
message = click.style(message, fg='white', bg='red', bold=True)
click.UsageError.__init__(self, message)
class AliasedGroup(click.Group):
""" A command group with alias names.
Inherit from this class and define a ``MAP`` class variable,
which is a mapping from alias names to canonical command names.
Then use that derived class as the ``cls`` parameter for a
``click.group`` decorator.
"""
MAP = {}
def get_command(self, ctx, cmd_name):
"""Map some aliases to their 'real' names."""
cmd_name = self.MAP.get(cmd_name, cmd_name)
return click.Group.get_command(self, ctx, cmd_name)
``` |
{
"source": "jhermann/xmlschema",
"score": 2
} |
#### File: xmlschema/tests/test_helpers.py
```python
from __future__ import unicode_literals
import unittest
import os
import sys
try:
import xmlschema
except ImportError:
# Adds the package base dir path as first search path for imports
pkg_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
sys.path.insert(0, pkg_base_dir)
import xmlschema
from xmlschema.etree import etree_element
from xmlschema.namespaces import XSD_NAMESPACE, XSI_NAMESPACE
from xmlschema.helpers import ISO_TIMEZONE_PATTERN, DURATION_PATTERN, HEX_BINARY_PATTERN, \
NOT_BASE64_BINARY_PATTERN, get_xsd_annotation, iter_xsd_components, get_namespace, \
get_qname, local_name, prefixed_to_qname, qname_to_prefixed, has_xsd_components, \
get_xsd_component, get_xml_bool_attribute, get_xsd_derivation_attribute, get_xpath_default_namespace
from xmlschema.qnames import XSI_TYPE, XSD_SCHEMA, XSD_ELEMENT, XSD_SIMPLE_TYPE, XSD_ANNOTATION
from xmlschema.tests import XMLSchemaTestCase
class TestHelpers(XMLSchemaTestCase):
def test_iso_timezone_pattern(self):
self.assertEqual(ISO_TIMEZONE_PATTERN.search("2002-10-10T12:00:00-05:00").group(0), '-05:00')
self.assertEqual(ISO_TIMEZONE_PATTERN.search("2002-10-10T12:00:00+05:00").group(0), '+05:00')
self.assertEqual(ISO_TIMEZONE_PATTERN.search("2002-10-10T12:00:00+13:59").group(0), '+13:59')
self.assertEqual(ISO_TIMEZONE_PATTERN.search("2002-10-10T12:00:00-14:00").group(0), '-14:00')
self.assertEqual(ISO_TIMEZONE_PATTERN.search("2002-10-10T12:00:00Z").group(0), 'Z')
self.assertIsNone(ISO_TIMEZONE_PATTERN.search("2002-10-10T12:00:00-14:01"))
self.assertIsNone(ISO_TIMEZONE_PATTERN.search("2002-10-10T12:00:00-15:00"))
self.assertIsNone(ISO_TIMEZONE_PATTERN.search("2002-10-10T12:00:00"))
def test_duration_pattern(self):
self.assertEqual(DURATION_PATTERN.search("P5Y7M20DT3H5M5S").group(0), 'P5Y7M20DT3H5M5S')
self.assertIsNone(ISO_TIMEZONE_PATTERN.search("P1YM7D"))
def test_hex_binary_pattern(self):
self.assertEqual(HEX_BINARY_PATTERN.search("aff1c").group(0), 'aff1c')
self.assertEqual(HEX_BINARY_PATTERN.search("aF3Bc").group(0), 'aF3Bc')
self.assertIsNone(ISO_TIMEZONE_PATTERN.search("34FG"))
def test_not_base64_pattern(self):
self.assertIsNone(NOT_BASE64_BINARY_PATTERN.search("YWVpb3U="))
self.assertEqual(NOT_BASE64_BINARY_PATTERN.search("YWVpb3U!=").group(0), '!')
def test_get_namespace_function(self):
self.assertEqual(get_namespace(XSD_SIMPLE_TYPE), XSD_NAMESPACE)
self.assertEqual(get_namespace(''), '')
self.assertEqual(get_namespace(None), '')
def test_get_qname_functions(self):
self.assertEqual(get_qname(XSD_NAMESPACE, 'element'), XSD_ELEMENT)
self.assertEqual(get_qname(XSI_NAMESPACE, 'type'), XSI_TYPE)
self.assertEqual(get_qname(XSI_NAMESPACE, ''), '')
self.assertEqual(get_qname(XSI_NAMESPACE, None), None)
self.assertEqual(get_qname(XSI_NAMESPACE, 0), 0)
self.assertEqual(get_qname(XSI_NAMESPACE, False), False)
self.assertRaises(TypeError, get_qname, XSI_NAMESPACE, True)
self.assertEqual(get_qname(None, True), True)
self.assertEqual(get_qname(None, 'element'), 'element')
self.assertEqual(get_qname(None, ''), '')
self.assertEqual(get_qname('', 'element'), 'element')
def test_local_name_functions(self):
self.assertEqual(local_name(XSD_SCHEMA), 'schema')
self.assertEqual(local_name('schema'), 'schema')
self.assertEqual(local_name(''), '')
self.assertEqual(local_name(None), None)
self.assertRaises(ValueError, local_name, '{ns name')
self.assertRaises(TypeError, local_name, 1.0)
self.assertRaises(TypeError, local_name, 0)
def test_prefixed_to_qname_functions(self):
namespaces = {'xs': XSD_NAMESPACE, 'xsi': XSI_NAMESPACE}
self.assertEqual(prefixed_to_qname('xs:element', namespaces), XSD_ELEMENT)
self.assertEqual(prefixed_to_qname('xsi:type', namespaces), XSI_TYPE)
self.assertEqual(prefixed_to_qname(XSI_TYPE, namespaces), XSI_TYPE)
self.assertEqual(prefixed_to_qname('element', namespaces), 'element')
self.assertEqual(prefixed_to_qname('', namespaces), '')
self.assertEqual(prefixed_to_qname(None, namespaces), None)
self.assertRaises(ValueError, prefixed_to_qname, 'xsi:type', {})
self.assertRaises(ValueError, prefixed_to_qname, 'xml:lang', namespaces)
def test_qname_to_prefixed_functions(self):
namespaces = {'xs': XSD_NAMESPACE, 'xsi': XSI_NAMESPACE}
self.assertEqual(qname_to_prefixed(XSD_ELEMENT, namespaces), 'xs:element')
self.assertEqual(qname_to_prefixed('xs:element', namespaces), 'xs:element')
self.assertEqual(qname_to_prefixed('element', namespaces), 'element')
self.assertEqual(qname_to_prefixed('', namespaces), '')
self.assertEqual(qname_to_prefixed(None, namespaces), None)
self.assertEqual(qname_to_prefixed(0, namespaces), 0)
self.assertEqual(qname_to_prefixed(XSI_TYPE, {}), XSI_TYPE)
self.assertEqual(qname_to_prefixed(None, {}), None)
self.assertEqual(qname_to_prefixed('', {}), '')
self.assertEqual(qname_to_prefixed('type', {'': XSI_NAMESPACE}), 'type')
self.assertEqual(qname_to_prefixed('type', {'ns': ''}), 'ns:type')
self.assertEqual(qname_to_prefixed('type', {'': ''}), 'type')
def test_get_xsd_annotation(self):
elem = etree_element(XSD_SCHEMA)
self.assertIsNone(get_xsd_annotation(elem))
elem.append(etree_element(XSD_ANNOTATION))
self.assertEqual(get_xsd_annotation(elem), elem[0])
elem.append(etree_element(XSD_ELEMENT))
self.assertEqual(get_xsd_annotation(elem), elem[0])
elem.clear()
elem.append(etree_element(XSD_ELEMENT))
self.assertIsNone(get_xsd_annotation(elem))
elem.append(etree_element(XSD_ANNOTATION))
self.assertIsNone(get_xsd_annotation(elem))
def test_iter_xsd_components(self):
elem = etree_element(XSD_SCHEMA)
self.assertFalse(list(iter_xsd_components(elem)))
self.assertFalse(list(iter_xsd_components(elem, start=1)))
elem.append(etree_element(XSD_ANNOTATION))
self.assertFalse(list(iter_xsd_components(elem)))
self.assertFalse(list(iter_xsd_components(elem, start=1)))
elem.append(etree_element(XSD_ELEMENT))
self.assertEqual(list(iter_xsd_components(elem)), [elem[1]])
elem.append(etree_element(XSD_SIMPLE_TYPE))
self.assertEqual(list(iter_xsd_components(elem)), elem[1:])
self.assertEqual(list(iter_xsd_components(elem, start=1)), [elem[2]])
elem.append(etree_element(XSD_ANNOTATION))
self.assertRaises(ValueError, list, iter_xsd_components(elem))
def test_has_xsd_components(self):
elem = etree_element(XSD_SCHEMA)
elem.append(etree_element(XSD_ELEMENT))
self.assertTrue(has_xsd_components(elem))
elem.clear()
self.assertFalse(has_xsd_components(elem))
elem.append(etree_element(XSD_ANNOTATION))
self.assertFalse(has_xsd_components(elem))
elem.append(etree_element(XSD_ELEMENT))
self.assertTrue(has_xsd_components(elem))
self.assertFalse(has_xsd_components(elem, start=1))
elem.append(etree_element(XSD_ANNOTATION))
self.assertRaises(ValueError, list, iter_xsd_components(elem))
def test_get_xsd_component(self):
elem = etree_element(XSD_SCHEMA)
self.assertRaises(ValueError, get_xsd_component, elem)
self.assertIsNone(get_xsd_component(elem, required=False))
elem.append(etree_element(XSD_ELEMENT))
self.assertEqual(get_xsd_component(elem), elem[0])
elem.append(etree_element(XSD_SIMPLE_TYPE))
self.assertRaises(ValueError, get_xsd_component, elem)
self.assertEqual(get_xsd_component(elem, strict=False), elem[0])
elem.clear()
elem.append(etree_element(XSD_ANNOTATION))
self.assertRaises(ValueError, get_xsd_component, elem)
self.assertIsNone(get_xsd_component(elem, required=False))
elem.append(etree_element(XSD_SIMPLE_TYPE))
self.assertEqual(get_xsd_component(elem), elem[1])
elem.append(etree_element(XSD_ELEMENT))
self.assertRaises(ValueError, get_xsd_component, elem)
self.assertEqual(get_xsd_component(elem, strict=False), elem[1])
elem.clear()
elem.append(etree_element(XSD_ANNOTATION))
elem.append(etree_element(XSD_ANNOTATION))
self.assertRaises(ValueError, get_xsd_component, elem, True, False)
def test_get_xml_bool_attribute(self):
elem = etree_element(XSD_ELEMENT, attrib={'a1': 'true', 'a2': '1', 'a3': 'false', 'a4': '0', 'a5': 'x'})
self.assertEqual(get_xml_bool_attribute(elem, 'a1'), True)
self.assertEqual(get_xml_bool_attribute(elem, 'a2'), True)
self.assertEqual(get_xml_bool_attribute(elem, 'a3'), False)
self.assertEqual(get_xml_bool_attribute(elem, 'a4'), False)
self.assertRaises(TypeError, get_xml_bool_attribute, elem, 'a5')
self.assertRaises(KeyError, get_xml_bool_attribute, elem, 'a6')
self.assertEqual(get_xml_bool_attribute(elem, 'a6', True), True)
self.assertEqual(get_xml_bool_attribute(elem, 'a6', 'true'), True)
self.assertEqual(get_xml_bool_attribute(elem, 'a6', '1'), True)
self.assertEqual(get_xml_bool_attribute(elem, 'a6', False), False)
self.assertEqual(get_xml_bool_attribute(elem, 'a6', 'false'), False)
self.assertEqual(get_xml_bool_attribute(elem, 'a6', '0'), False)
self.assertRaises(TypeError, get_xml_bool_attribute, elem, 'a6', 1)
self.assertRaises(TypeError, get_xml_bool_attribute, elem, 'a6', 0)
self.assertRaises(TypeError, get_xml_bool_attribute, elem, 'a6', 'True')
def test_get_xsd_derivation_attribute(self):
elem = etree_element(XSD_ELEMENT, attrib={
'a1': 'extension', 'a2': ' restriction', 'a3': '#all', 'a4': 'other',
'a5': 'restriction extension restriction ', 'a6': 'other restriction'
})
values = ('extension', 'restriction')
self.assertEqual(get_xsd_derivation_attribute(elem, 'a1', values), 'extension')
self.assertEqual(get_xsd_derivation_attribute(elem, 'a2', values), ' restriction')
self.assertEqual(get_xsd_derivation_attribute(elem, 'a3', values), 'extension restriction')
self.assertRaises(ValueError, get_xsd_derivation_attribute, elem, 'a4', values)
self.assertEqual(get_xsd_derivation_attribute(elem, 'a5', values), 'restriction extension restriction ')
self.assertRaises(ValueError, get_xsd_derivation_attribute, elem, 'a6', values)
self.assertEqual(get_xsd_derivation_attribute(elem, 'a7', values), '')
def test_get_xpath_default_namespace(self):
elem = etree_element(XSD_ELEMENT, attrib={'xpathDefaultNamespace': '##local '})
self.assertEqual(get_xpath_default_namespace(elem, 'ns0', 'ns1', 'ns2'), '')
elem = etree_element(XSD_ELEMENT, attrib={'xpathDefaultNamespace': ' ##defaultNamespace'})
self.assertEqual(get_xpath_default_namespace(elem, 'ns0', 'ns1', 'ns2'), 'ns0')
elem = etree_element(XSD_ELEMENT, attrib={'xpathDefaultNamespace': ' ##targetNamespace'})
self.assertEqual(get_xpath_default_namespace(elem, 'ns0', 'ns1', 'ns2'), 'ns1')
elem = etree_element(XSD_ELEMENT)
self.assertIsNone(get_xpath_default_namespace(elem, 'ns0', 'ns1'))
self.assertEqual(get_xpath_default_namespace(elem, 'ns0', 'ns1', 'ns2'), 'ns2')
elem = etree_element(XSD_ELEMENT, attrib={'xpathDefaultNamespace': 'ns3'})
self.assertEqual(get_xpath_default_namespace(elem, 'ns0', 'ns1', 'ns2'), 'ns3')
elem = etree_element(XSD_ELEMENT, attrib={'xpathDefaultNamespace': 'ns3 ns4'})
self.assertRaises(ValueError, get_xpath_default_namespace, elem, 'ns0', 'ns1', 'ns2')
if __name__ == '__main__':
from xmlschema.tests import print_test_header
print_test_header()
unittest.main()
```
#### File: xmlschema/tests/test_package.py
```python
import unittest
import importlib
import glob
import fileinput
import os
import re
import sys
try:
import xmlschema
except ImportError:
# Adds the package base dir path as first search path for imports
pkg_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
sys.path.insert(0, pkg_base_dir)
import xmlschema
from xmlschema.etree import defused_etree, etree_tostring
# Import ElementTree and defusedxml.ElementTree
import xml.etree.ElementTree as ElementTree # Original module with C extensions
defused_etree.fromstring('<A/>') # Lazy import of defusedxml.ElementTree
import xml.etree.ElementTree as PyElementTree # Pure Python import
class TestEnvironment(unittest.TestCase):
def test_element_tree(self):
self.assertNotEqual(ElementTree.Element, ElementTree._Element_Py, msg="cElementTree not available!")
elem = PyElementTree.Element('element')
self.assertEqual(etree_tostring(elem), '<element />')
self.assertEqual(importlib.import_module('xml.etree.ElementTree'), ElementTree)
def test_pure_python_element_tree(self):
if sys.version_info >= (3,):
self.assertEqual(PyElementTree.Element, PyElementTree._Element_Py) # C extensions disabled by defusedxml
self.assertNotEqual(ElementTree.Element, PyElementTree.Element)
else:
self.assertNotEqual(PyElementTree.Element, PyElementTree._Element_Py)
elem = PyElementTree.Element('element')
self.assertEqual(etree_tostring(elem), '<element />')
def test_defused_etree(self):
self.assertEqual(defused_etree.element_tree, PyElementTree)
self.assertEqual(defused_etree.etree_element, PyElementTree.Element)
class TestPackaging(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.test_dir = os.path.dirname(os.path.abspath(__file__))
cls.source_dir = os.path.dirname(cls.test_dir)
cls.package_dir = os.path.dirname(cls.source_dir)
if not cls.package_dir.endswith('/xmlschema'):
cls.package_dir = None
cls.missing_debug = re.compile(r"(\bimport\s+pdb\b|\bpdb\s*\.\s*set_trace\(\s*\)|\bprint\s*\()")
cls.get_version = re.compile(r"(?:\brelease|__version__)(?:\s*=\s*)(\'[^\']*\'|\"[^\"]*\")")
def test_missing_debug_statements(self):
# Exclude explicit debug statements written in the code
exclude = {
'regex.py': [240, 241],
}
message = "\nFound a debug missing statement at line %d or file %r: %r"
filename = None
file_excluded = []
files = (
glob.glob(os.path.join(self.source_dir, '*.py')) +
glob.glob(os.path.join(self.source_dir, 'validators/*.py'))
)
for line in fileinput.input(files):
if fileinput.isfirstline():
filename = fileinput.filename()
file_excluded = exclude.get(os.path.basename(filename), [])
lineno = fileinput.filelineno()
if lineno in file_excluded:
continue
match = self.missing_debug.search(line)
self.assertIsNone(match, message % (lineno, filename, match.group(0) if match else None))
def test_version(self):
message = "\nFound a different version at line %d or file %r: %r (may be %r)."
files = [os.path.join(self.source_dir, '__init__.py')]
if self.package_dir is not None:
files.extend([
os.path.join(self.package_dir, 'setup.py'),
os.path.join(self.package_dir, 'doc/conf.py'),
])
version = filename = None
for line in fileinput.input(files):
if fileinput.isfirstline():
filename = fileinput.filename()
lineno = fileinput.filelineno()
match = self.get_version.search(line)
if match is not None:
if version is None:
version = match.group(1).strip('\'\"')
else:
self.assertTrue(
version == match.group(1).strip('\'\"'),
message % (lineno, filename, match.group(1).strip('\'\"'), version)
)
if __name__ == '__main__':
unittest.main()
```
#### File: xmlschema/validators/elements.py
```python
from __future__ import unicode_literals
from decimal import Decimal
from ..exceptions import XMLSchemaAttributeError, XMLSchemaValueError
from ..qnames import XSD_GROUP, XSD_SEQUENCE, XSD_ALL, XSD_CHOICE, XSD_ATTRIBUTE_GROUP, \
XSD_COMPLEX_TYPE, XSD_SIMPLE_TYPE, XSD_ALTERNATIVE, XSD_ELEMENT, XSD_ANY_TYPE, XSD_UNIQUE, \
XSD_KEY, XSD_KEYREF, XSI_NIL, XSI_TYPE
from ..helpers import get_qname, prefixed_to_qname, get_xml_bool_attribute, get_xsd_derivation_attribute
from ..etree import etree_element
from ..converters import ElementData, XMLSchemaConverter
from ..xpath import ElementPathMixin
from .exceptions import XMLSchemaValidationError
from .xsdbase import XsdComponent, XsdType, ParticleMixin, ValidationMixin
from .constraints import XsdUnique, XsdKey, XsdKeyref
from .wildcards import XsdAnyElement
XSD_MODEL_GROUP_TAGS = {XSD_GROUP, XSD_SEQUENCE, XSD_ALL, XSD_CHOICE}
XSD_ATTRIBUTE_GROUP_ELEMENT = etree_element(XSD_ATTRIBUTE_GROUP)
class XsdElement(XsdComponent, ValidationMixin, ParticleMixin, ElementPathMixin):
"""
Class for XSD 1.0 'element' declarations.
<element
abstract = boolean : false
block = (#all | List of (extension | restriction | substitution))
default = string
final = (#all | List of (extension | restriction))
fixed = string
form = (qualified | unqualified)
id = ID
maxOccurs = (nonNegativeInteger | unbounded) : 1
minOccurs = nonNegativeInteger : 1
name = NCName
nillable = boolean : false
ref = QName
substitutionGroup = QName
type = QName
{any attributes with non-schema namespace . . .}>
Content: (annotation?, ((simpleType | complexType)?, (unique | key | keyref)*))
</element>
"""
admitted_tags = {XSD_ELEMENT}
def __init__(self, elem, schema, parent, name=None):
super(XsdElement, self).__init__(elem, schema, parent, name)
self.names = (self.qualified_name,) if self.qualified else (self.qualified_name, self.local_name)
if not hasattr(self, 'type'):
raise XMLSchemaAttributeError("undefined 'type' attribute for %r." % self)
if not hasattr(self, 'qualified'):
raise XMLSchemaAttributeError("undefined 'qualified' attribute for %r." % self)
def __repr__(self):
if self.ref is None:
return '%s(name=%r, occurs=%r)' % (self.__class__.__name__, self.prefixed_name, self.occurs)
else:
return '%s(ref=%r, occurs=%r)' % (self.__class__.__name__, self.prefixed_name, self.occurs)
def __setattr__(self, name, value):
if name == "type":
assert value is None or isinstance(value, XsdType), "Wrong value %r for attribute 'type'." % value
if hasattr(value, 'attributes'):
self.attributes = value.attributes
else:
self.attributes = self.schema.BUILDERS.attribute_group_class(
XSD_ATTRIBUTE_GROUP_ELEMENT, self.schema, self
)
super(XsdElement, self).__setattr__(name, value)
def __iter__(self):
if not self.type.has_simple_content():
for e in self.type.content_type.iter_subelements():
yield e
def _parse(self):
XsdComponent._parse(self)
self._parse_attributes()
index = self._parse_type()
if self.type is None:
self.type = self.maps.lookup_type(XSD_ANY_TYPE)
self._parse_constraints(index)
self._parse_substitution_group()
def _parse_attributes(self):
self._parse_particle(self.elem)
self.name = None
self._ref = None
self.qualified = self.elem.get('form', self.schema.element_form_default) == 'qualified'
if self.default is not None and self.fixed is not None:
self.parse_error("'default' and 'fixed' attributes are mutually exclusive.")
self._parse_properties('abstract', 'block', 'final', 'form', 'nillable')
# Parse element attributes
try:
element_name = prefixed_to_qname(self.elem.attrib['ref'], self.namespaces)
except KeyError:
# No 'ref' attribute ==> 'name' attribute required.
try:
if self.is_global or self.qualified:
self.name = get_qname(self.target_namespace, self.elem.attrib['name'])
else:
self.name = self.elem.attrib['name']
except KeyError:
self.parse_error("missing both 'name' and 'ref' attributes.")
if self.is_global:
if 'minOccurs' in self.elem.attrib:
self.parse_error("attribute 'minOccurs' not allowed for a global element.")
if 'maxOccurs' in self.elem.attrib:
self.parse_error("attribute 'maxOccurs' not allowed for a global element.")
else:
# Reference to a global element
if self.is_global:
self.parse_error("an element reference can't be global.")
for attribute in ('name', 'type', 'nillable', 'default', 'fixed', 'form', 'block'):
if attribute in self.elem.attrib:
self.parse_error("attribute %r is not allowed when element reference is used." % attribute)
try:
xsd_element = self.maps.lookup_element(element_name)
except KeyError:
self.parse_error('unknown element %r' % element_name)
self.name = element_name
self.type = self.maps.lookup_type(XSD_ANY_TYPE)
else:
self._ref = xsd_element
self.name = xsd_element.name
self.type = xsd_element.type
self.qualified = xsd_element.qualified
def _parse_type(self):
if self.ref:
if self._parse_component(self.elem, required=False, strict=False) is not None:
self.parse_error("element reference declaration can't has children.")
elif 'type' in self.elem.attrib:
type_qname = prefixed_to_qname(self.elem.attrib['type'], self.namespaces)
try:
self.type = self.maps.lookup_type(type_qname)
except KeyError:
self.parse_error('unknown type %r' % self.elem.attrib['type'])
self.type = self.maps.lookup_type(XSD_ANY_TYPE)
else:
child = self._parse_component(self.elem, required=False, strict=False)
if child is not None:
if child.tag == XSD_COMPLEX_TYPE:
self.type = self.schema.BUILDERS.complex_type_class(child, self.schema, self)
elif child.tag == XSD_SIMPLE_TYPE:
self.type = self.schema.BUILDERS.simple_type_factory(child, self.schema, self)
return 1
else:
self.type = None
return 0
def _parse_constraints(self, index=0):
self.constraints = {}
for child in self._iterparse_components(self.elem, start=index):
if child.tag == XSD_UNIQUE:
constraint = XsdUnique(child, self.schema, self)
elif child.tag == XSD_KEY:
constraint = XsdKey(child, self.schema, self)
elif child.tag == XSD_KEYREF:
constraint = XsdKeyref(child, self.schema, self)
else:
continue # Error already caught by validation against the meta-schema
try:
if child != self.maps.constraints[constraint.name]:
self.parse_error("duplicated identity constraint %r:" % constraint.name, child)
except KeyError:
self.maps.constraints[constraint.name] = constraint
finally:
self.constraints[constraint.name] = constraint
def _parse_substitution_group(self):
substitution_group = self.substitution_group
if substitution_group is None:
return
if not self.is_global:
self.parse_error("'substitutionGroup' attribute in a local element declaration")
qname = prefixed_to_qname(substitution_group, self.namespaces)
if qname[0] != '{':
qname = get_qname(self.target_namespace, qname)
try:
head_element = self.maps.lookup_element(qname)
except KeyError:
self.parse_error("unknown substitutionGroup %r" % substitution_group)
else:
final = head_element.final
if final is None:
final = self.schema.final_default
if final == '#all' or 'extension' in final and 'restriction' in final:
self.parse_error("head element %r cannot be substituted." % head_element)
elif self.type == head_element.type or self.type.name == XSD_ANY_TYPE:
pass
elif 'extension' in final and not self.type.is_derived(head_element.type, 'extension'):
self.parse_error(
"%r type is not of the same or an extension of the head element %r type."
% (self, head_element)
)
elif 'restriction' in final and not self.type.is_derived(head_element.type, 'restriction'):
self.parse_error(
"%r type is not of the same or a restriction of the head element %r type."
% (self, head_element)
)
elif not self.type.is_derived(head_element.type):
self.parse_error(
"%r type is not of the same or a derivation of the head element %r type."
% (self, head_element)
)
@property
def built(self):
return self.type.parent is None or self.type.built
@property
def validation_attempted(self):
if self.built:
return 'full'
else:
return self.type.validation_attempted
# XSD declaration attributes
@property
def ref(self):
return self.elem.get('ref')
# Global element's exclusive properties
@property
def final(self):
return get_xsd_derivation_attribute(self.elem, 'final', ('extension', 'restriction'))
@property
def block(self):
return get_xsd_derivation_attribute(self.elem, 'block', ('extension', 'restriction', 'substitution'))
@property
def substitution_group(self):
return self.elem.get('substitutionGroup')
# Properties inherited by references
@property
def abstract(self):
if self._ref is not None:
return self._ref.abstract
return get_xml_bool_attribute(self.elem, 'abstract', default=False)
@property
def default(self):
return self.elem.get('default') if self._ref is None else self._ref.default
@property
def fixed(self):
return self.elem.get('fixed') if self._ref is None else self._ref.fixed
@property
def form(self):
if self._ref is not None:
return self._ref.form
value = self.elem.get('form')
if value not in (None, 'qualified', 'unqualified'):
raise XMLSchemaValueError("wrong value %r for 'form' attribute." % value)
return value
@property
def nillable(self):
if self._ref is not None:
return self._ref.nillable
return get_xml_bool_attribute(self.elem, 'nillable', default=False)
def get_attribute(self, name):
if name[0] != '{':
return self.type.attributes[get_qname(self.type.target_namespace, name)]
return self.type.attributes[name]
def iter_components(self, xsd_classes=None):
if xsd_classes is None:
yield self
for obj in self.constraints.values():
yield obj
else:
if isinstance(self, xsd_classes):
yield self
for obj in self.constraints.values():
if isinstance(obj, xsd_classes):
yield obj
if self.ref is None and not self.type.is_global:
for obj in self.type.iter_components(xsd_classes):
yield obj
def iter_decode(self, elem, validation='lax', converter=None, **kwargs):
"""
Creates an iterator for decoding an Element instance.
:param elem: the Element that has to be decoded.
:param validation: the validation mode, can be 'lax', 'strict' or 'skip.
:param converter: an :class:`XMLSchemaConverter` subclass or instance.
:param kwargs: keyword arguments for the decoding process.
:return: yields a decoded object, eventually preceded by a sequence of \
validation or decoding errors.
"""
if not isinstance(converter, XMLSchemaConverter):
converter = self.schema.get_converter(converter, **kwargs)
level = kwargs.pop('level', 0)
use_defaults = kwargs.get('use_defaults', False)
value = content = attributes = None
# Get the instance type: xsi:type or the schema's declaration
if XSI_TYPE not in elem.attrib:
xsd_type = self.type
attribute_group = self.attributes
else:
xsi_type = elem.attrib[XSI_TYPE]
try:
xsd_type = self.maps.lookup_type(converter.unmap_qname(xsi_type))
except KeyError:
yield self.validation_error(validation, "unknown type %r" % xsi_type, elem, **kwargs)
xsd_type = self.type
attribute_group = self.attributes
else:
attribute_group = getattr(xsd_type, 'attributes', self.attributes)
# Decode attributes
for result in attribute_group.iter_decode(elem.attrib, validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
yield self.validation_error(validation, result, elem, **kwargs)
else:
attributes = result
# Checks the xsi:nil attribute of the instance
if validation != 'skip' and XSI_NIL in elem.attrib:
if not self.nillable:
yield self.validation_error(validation, "element is not nillable.", elem, **kwargs)
try:
if get_xml_bool_attribute(elem, XSI_NIL):
if elem.text is not None:
reason = "xsi:nil='true' but the element is not empty."
yield self.validation_error(validation, reason, elem, **kwargs)
else:
element_data = ElementData(elem.tag, None, None, attributes)
yield converter.element_decode(element_data, self, level)
return
except TypeError:
reason = "xsi:nil attribute must has a boolean value."
yield self.validation_error(validation, reason, elem, **kwargs)
if xsd_type.is_simple():
if len(elem) and validation != 'skip':
reason = "a simpleType element can't has child elements."
yield self.validation_error(validation, reason, elem, **kwargs)
text = elem.text
if self.fixed is not None:
if text is None:
text = self.fixed
elif text != self.fixed:
reason = "must has the fixed value %r." % self.fixed
yield self.validation_error(validation, reason, elem, **kwargs)
elif not text and use_defaults and self.default is not None:
text = self.default
if text is None:
for result in xsd_type.iter_decode('', validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
yield self.validation_error(validation, result, elem, **kwargs)
else:
for result in xsd_type.iter_decode(text, validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
yield self.validation_error(validation, result, elem, **kwargs)
else:
value = result
elif xsd_type.has_simple_content():
if len(elem) and validation != 'skip':
reason = "a simple content element can't has child elements."
yield self.validation_error(validation, reason, elem, **kwargs)
if elem.text is not None:
text = elem.text or self.default if use_defaults else elem.text
for result in xsd_type.content_type.iter_decode(text, validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
yield self.validation_error(validation, result, elem, **kwargs)
else:
value = result
else:
for result in xsd_type.content_type.iter_decode(elem, validation, converter, level=level + 1, **kwargs):
if isinstance(result, XMLSchemaValidationError):
yield self.validation_error(validation, result, elem, **kwargs)
else:
content = result
if isinstance(value, Decimal):
try:
value = kwargs['decimal_type'](value)
except (KeyError, TypeError):
pass
element_data = ElementData(elem.tag, value, content, attributes)
yield converter.element_decode(element_data, self, level)
if content is not None:
del content
if validation != 'skip':
for constraint in self.constraints.values():
for error in constraint(elem):
yield self.validation_error(validation, error, elem, **kwargs)
def iter_encode(self, obj, validation='lax', converter=None, **kwargs):
"""
Creates an iterator for encoding data to an Element.
:param obj: the data that has to be encoded.
:param validation: the validation mode: can be 'lax', 'strict' or 'skip'.
:param converter: an :class:`XMLSchemaConverter` subclass or instance.
:param kwargs: keyword arguments for the encoding process.
:return: yields an Element, eventually preceded by a sequence of \
validation or encoding errors.
"""
if not isinstance(converter, XMLSchemaConverter):
converter = self.schema.get_converter(converter, **kwargs)
level = kwargs.pop('level', 0)
element_data = converter.element_encode(obj, self, level)
errors = []
tag = element_data.tag
text = None
children = element_data.content
attributes = ()
if element_data.attributes is not None and XSI_TYPE in element_data.attributes:
xsi_type = element_data.attributes[XSI_TYPE]
try:
xsd_type = self.maps.lookup_type(converter.unmap_qname(xsi_type))
except KeyError:
errors.append("unknown type %r" % xsi_type)
xsd_type = self.type
attribute_group = self.attributes
else:
attribute_group = getattr(xsd_type, 'attributes', self.attributes)
else:
xsd_type = self.type
attribute_group = self.attributes
for result in attribute_group.iter_encode(element_data.attributes, validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
errors.append(result)
else:
attributes = result
if validation != 'skip' and XSI_NIL in element_data.attributes:
if not self.nillable:
errors.append("element is not nillable.")
xsi_nil = element_data.attributes[XSI_NIL]
if xsi_nil.strip() not in ('0', '1', 'true', 'false'):
errors.append("xsi:nil attribute must has a boolean value.")
if element_data.text is not None:
errors.append("xsi:nil='true' but the element is not empty.")
else:
elem = converter.etree_element(element_data.tag, attrib=attributes, level=level)
for e in errors:
yield self.validation_error(validation, e, elem, **kwargs)
yield elem
return
if xsd_type.is_simple():
if element_data.content:
errors.append("a simpleType element can't has child elements.")
if element_data.text is None:
pass
else:
for result in xsd_type.iter_encode(element_data.text, validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
errors.append(result)
else:
text = result
elif xsd_type.has_simple_content():
if element_data.text is not None:
for result in xsd_type.content_type.iter_encode(element_data.text, validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
errors.append(result)
else:
text = result
else:
for result in xsd_type.content_type.iter_encode(
element_data, validation, converter, level=level+1, **kwargs):
if isinstance(result, XMLSchemaValidationError):
errors.append(result)
elif result:
text, children = result
elem = converter.etree_element(tag, text, children, attributes, level)
if validation != 'skip' and errors:
for e in errors:
yield self.validation_error(validation, e, elem, **kwargs)
yield elem
del element_data
def is_restriction(self, other, check_particle=True):
if isinstance(other, XsdAnyElement):
return True # TODO
elif isinstance(other, XsdElement):
if self.name != other.name:
if other.name not in self.maps.substitution_groups:
return False
else:
return any(self.is_restriction(e) for e in self.maps.substitution_groups[other.name])
elif check_particle and not ParticleMixin.is_restriction(self, other):
return False
elif self.type is not other.type and self.type.elem is not other.type.elem and \
not self.type.is_derived(other.type):
return False
elif self.fixed != other.fixed:
return False
elif other.nillable is False and self.nillable:
return False
elif not all(value in other.block for value in self.block):
return False
elif not all(k in other.constraints for k in self.constraints):
return False
elif other.model == 'choice':
if ParticleMixin.is_restriction(self, other):
return any(self.is_restriction(e, False) for e in other.iter_group())
else:
return any(self.is_restriction(e) for e in other.iter_group())
else:
match_restriction = False
for e in other.iter_group():
if match_restriction:
if not e.is_emptiable():
return False
elif self.is_restriction(e):
match_restriction = True
elif not e.is_emptiable():
return False
return True
class Xsd11Element(XsdElement):
"""
Class for XSD 1.1 'element' declarations.
<element
abstract = boolean : false
block = (#all | List of (extension | restriction | substitution))
default = string
final = (#all | List of (extension | restriction))
fixed = string
form = (qualified | unqualified)
id = ID
maxOccurs = (nonNegativeInteger | unbounded) : 1
minOccurs = nonNegativeInteger : 1
name = NCName
nillable = boolean : false
ref = QName
substitutionGroup = List of QName
targetNamespace = anyURI
type = QName
{any attributes with non-schema namespace . . .}>
Content: (annotation?, ((simpleType | complexType)?, alternative*, (unique | key | keyref)*))
</element>
"""
def _parse(self):
XsdComponent._parse(self)
self._parse_attributes()
index = self._parse_type()
index = self._parse_alternatives(index)
if self.type is None:
if not self.alternatives:
self.type = self.maps.lookup_type(XSD_ANY_TYPE)
elif self.alternatives:
self.parse_error("types alternatives incompatible with type specification.")
self._parse_constraints(index)
self._parse_substitution_group()
def _parse_alternatives(self, index=0):
self.alternatives = []
for child in self._iterparse_components(self.elem, start=index):
if child.tag == XSD_ALTERNATIVE:
self.alternatives.append(XsdAlternative(child, self.schema, self))
index += 1
else:
break
return index
@property
def target_namespace(self):
try:
return self.elem.attrib['targetNamespace']
except KeyError:
return self.schema.target_namespace
class XsdAlternative(XsdComponent):
"""
<alternative
id = ID
test = an XPath expression
type = QName
xpathDefaultNamespace = (anyURI | (##defaultNamespace | ##targetNamespace | ##local))
{any attributes with non-schema namespace . . .}>
Content: (annotation?, (simpleType | complexType)?)
</alternative>
"""
admitted_tags = {XSD_ALTERNATIVE}
@property
def built(self):
raise NotImplementedError
``` |
{
"source": "jhermon/Cam-COP",
"score": 3
} |
#### File: jhermon/Cam-COP/capFrontend.py
```python
from tkinter import *
from tkinter import filedialog
from tkinter import ttk
from ttkthemes import ThemedTk
import tkinter.font as font
from PIL import Image
from PIL import ImageTk
from Object_detection_image import detect
import time
import numpy as np
import cv2
import os
#from db_conn import get_details
import datetime
#tkinter Frontend
#home logo displayed
#home = Toplevel() # create the window
home = ThemedTk(theme="breeze")
#home.transient([root])
#opens the image ,resizes it and places it on the window
path =Image.open("C:/tensorflow1/models/research/object_detection/capcoplogo.png")
re_image = path.resize((600, 600), Image.ANTIALIAS)
home_img = ImageTk.PhotoImage(re_image)
home_label = Label(home,image=home_img)
home_label.image = home_img
#Places the home window in the center of the any computer screen
hl=600
wl=600
widthl = home.winfo_screenwidth()
heightl= home.winfo_screenheight()
x_coordinate = (widthl/2) - (wl/2)
y_coordinate = (heightl/2) - (hl/2)
home.geometry("%dx%d+%d+%d" % (wl,hl,x_coordinate,y_coordinate))
#actually puts the window on the screen
home_label.pack()
#destroys the window after 5000 mill seconds(5 secs)
home.after(3000,lambda:home.destroy())
home.mainloop()
#changes the theme and also changes fonts of different labels based on the variable used.
root = ThemedTk(theme="breeze")
myFont = font.Font(family='Helvetica', size=15, weight='bold')
#title of root window
root.title("Cam Cop")
root.iconbitmap('C:/tensorflow1/models/research/object_detection/capfinal.ico')
#size window
width_value = root.winfo_screenwidth()
height_value = root.winfo_screenheight()
root.geometry("{}x{}+0+0".format(width_value,height_value))
#First label
label1 = Label(root, text="Enter image:",font=myFont)
label1.place(x=1130,y=40)
def click1():
# gives the location of the images
global label_filedialog
filename = filedialog.askopenfilename(initialdir ="/", title="Select Image", filetype=(("png files","*.png"),("jpeg files","*.jpeg"),("jpg files","*.jpg")))
click1.img1 = filename
label_filedialog = Label(root, text="")
label_filedialog.place(x=1150,y=80)
label_filedialog.configure(text=filename)
if filename == "":
label_filedialog.destroy()
addButton['state'] = NORMAL
else:
addButton['state'] = DISABLED
img =cv2.imread(filename)
#adj_img = cv2.rectangle(img,(100,100),(300,300),(0,255,0),3)
# changes the color channel
image = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
#converts to tk image and resizes the image so it can fit in the tkinter frame
image_Pil = Image.fromarray(image)
resize_image = image_Pil.resize((1080, 700), Image.ANTIALIAS)
tkImage = ImageTk.PhotoImage(resize_image)
#displays on window
click1.label_img = Label(frame,image=tkImage)
click1.label_img.image = tkImage
click1.label_img.pack()
def detector():
global label_img
#runs the detect function from Object_detection_image
detect(click1.img1)
img =cv2.imread(detect.img_scan)
#adj_img = cv2.rectangle(img,(100,100),(300,300),(0,255,0),3)
# changes the color channel
image = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
#converts to tk image and resizes the image so it can fit in the tkinter frame
image_Pil = Image.fromarray(image)
resize_image = image_Pil.resize((1080, 700), Image.ANTIALIAS)
tkImage = ImageTk.PhotoImage(resize_image)
#removes previous image
click1.label_img.destroy()
#recreates the label that was destroyed/delected
click1.label_img = ''
#displays scanned image on window
detector.label_img = Label(frame,image=tkImage)
detector.label_img.image = tkImage
detector.label_img.pack()
def reset():
#remove label in the window
label_filedialog.destroy()
if click1.label_img != '':
click1.label_img.destroy()
elif detector.label_img !='':
detector.label_img.destroy()
addButton['state'] = NORMAL
#USING OFFLINE DATABASE
'''def detector():
global label_img
#runs the detect function from Object_detection_image
detect(click1.img1)
img =cv2.imread(detect.img_scan)
#adj_img = cv2.rectangle(img,(100,100),(300,300),(0,255,0),3)
# changes the color channel
image = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
#converts to tk image and resizes the image so it can fit in the tkinter frame
image_Pil = Image.fromarray(image)
resize_image = image_Pil.resize((1080, 700), Image.ANTIALIAS)
tkImage = ImageTk.PhotoImage(resize_image)
#removes previous image
click1.label_img.destroy()
#recreates the label that was destroyed/delected
click1.label_img = ''
#displays scanned image on window
detector.label_img = Label(frame,image=tkImage)
detector.label_img.image = tkImage
detector.label_img.pack()
if len(detect.plate) != 6:
detector.label_plate = Label(root,text=detect.plate)
detector.label_plate.place(x=1150,y=180)
detector.label_plate.config(font=('Courier',20))
else:
detector.label_plate = Label(root,text=detect.plate)
detector.label_plate.place(x=1150,y=180)
detector.label_plate.config(font=('Courier',44))
#this function queries the database
get_details(detect.plate)
if type(get_details.lst[0]) == str:
#adding result of database to the window
#header of driver details
detector.label_header = Label(root,text='Driver Info')
detector.label_header.place(x=1150,y=240)
detector.label_header.config(font=('Courier',25,'bold'))
#print firstname
detector.label_firstname = Label(root, text= 'Firstname: ' + get_details.lst[0])
detector.label_firstname.place(x=1150,y=270)
detector.label_firstname.config(font=('Courier',15))
#print lastname
detector.label_lastname = Label(root, text= 'Lastname: ' + get_details.lst[1])
detector.label_lastname.place(x=1150,y=300)
detector.label_lastname.config(font=('Courier',15))
#print gender
detector.label_gender = Label(root, text= 'Gender: ' + get_details.lst[2])
detector.label_gender.place(x=1150,y=330)
detector.label_gender.config(font=('Courier',15))
#print TRN
detector.label_TRN = Label(root, text= 'TRN: ' + get_details.lst[3])
detector.label_TRN.place(x=1150,y=360)
detector.label_TRN.config(font=('Courier',15))
#print address
detector.label_address = Label(root, text= 'Address:')
detector.label_address2 = Label(root, text= get_details.lst[4])
detector.label_address.place(x=1150,y=390)
detector.label_address.config(font=('Courier',15, 'bold'))
detector.label_address2.place(x=1150,y=420)
detector.label_address2.config(font=('Courier',15))
#print DOB
dob = get_details.lst[5].strftime('%m/%d/%Y')
detector.label_DOB = Label(root, text= 'DOB: ' + dob)
detector.label_DOB.place(x=1150,y=450)
detector.label_DOB.config(font=('Courier',15))
#print car brand
detector.label_car_brand = Label(root, text= 'Car brand: ' + get_details.lst[6])
detector.label_car_brand.place(x=1150,y=480)
detector.label_car_brand.config(font=('Courier',15))
#print car color
detector.label_car_color = Label(root, text= 'Car color: ' + get_details.lst[7])
detector.label_car_color.place(x=1150,y=510)
detector.label_car_color.config(font=('Courier',15))
#year made
detector.label_year_made = Label(root, text= 'Year made: ' + get_details.lst[8])
detector.label_year_made.place(x=1150,y=540)
detector.label_year_made.config(font=('Courier',15))
#year purchased
detector.label_year_purchased = Label(root, text= 'Year purchased: ' + get_details.lst[9])
detector.label_year_purchased.place(x=1150,y=570)
detector.label_year_purchased.config(font=('Courier',15))
else:
get_details(detect.plate)
detector.label_match = Label(root, text='Possible match found: ' + get_details.lst[0][1] + '\n' + str(get_details.lst[0][0]) + '% match')
detector.label_match.place(x=1150,y=240)
detector.label_match.config(font=('Courier',15))
def reset():
#remove label in the window
label_filedialog.destroy()
if click1.label_img != '':
click1.label_img.destroy()
elif detector.label_img !='' and len(detect.plate) == 6:
detector.label_img.destroy()
detector.label_plate.destroy()
detector.label_header.destroy()
detector.label_firstname.destroy()
detector.label_lastname.destroy()
detector.label_gender.destroy()
detector.label_TRN.destroy()
detector.label_address.destroy()
detector.label_address2.destroy()
detector.label_DOB.destroy()
detector.label_car_brand.destroy()
detector.label_car_color.destroy()
detector.label_year_made.destroy()
detector.label_year_purchased.destroy()
elif detector.label_img !='' and len(detect.plate) != 6:
detector.label_img.destroy()
detector.label_plate.destroy()
else:
detector.label_match.destroy()
addButton['state'] = NORMAL'''
#create button
addButton = ttk.Button(root, text="Choose image",command=click1)
#addButton['font'] = myFont
addButton.place(x=1260,y=40)
#scan image
scanButton = ttk.Button(root, text="Scan image",command=detector)
scanButton.place(x=1150,y=140)
# reset button
resetButton = ttk.Button(root, text="Reset",command=reset)
#resetButton['font'] = myFont
resetButton.place(x=1280,y=140)
# frame
frame = LabelFrame(root, padx=2, pady=2)
label_dummy =Label(frame)
label_dummy.pack()
frame.place(x=10,y=40,width=1100, height=720)
root.mainloop()
``` |
{
"source": "jhernandez18p/Dev2tech-Server",
"score": 2
} |
#### File: local_apps/authentication/views.py
```python
from django.shortcuts import render
# Create your views here.
def my_custom_page_not_found_view(request):
template = 'frontend/pages/404.html'
context = {}
return render(request, template, context)
def my_custom_error_view(request):
template = 'frontend/pages/500.html'
context = {}
return render(request, template, context)
def my_custom_permission_denied_view(request):
template = 'frontend/pages/403.html'
context = {}
return render(request, template, context)
def my_custom_bad_request_view(request):
template = 'frontend/pages/400.html'
context = {}
return render(request, template, context)
``` |
{
"source": "jhernandez18p/mobyapp",
"score": 2
} |
#### File: src/services/views.py
```python
from django.db.models import Q
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django.views.generic import ListView, DetailView
from src.services.models import Service, ServiceImage
from src.services.forms import ServiceForm
from src.base.models import Position, Carousel, CarouselImage, Site
from src.utils.libs import contact_email
# Create your views here.
class Home(ListView):
model = Service
template_name = 'app/services.html'
paginate_by = 6
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
service_list = self.model.objects.all()
if service_list.exists():
context = super().get_context_data(object_list=service_list.filter(featured=False),service_list=service_list.filter(featured=True),**kwargs)
service_header_carousel = Carousel.objects.filter(Q(page__name='servicios') & Q(position__name='header'))
if service_header_carousel.exists():
context['service_header_carousel'] = True
service_header_carousel_images = CarouselImage.objects.filter(Carousel_id=service_header_carousel[0])
if service_header_carousel_images.exists():
context['service_header_carousel_images'] = service_header_carousel_images
context['has_newsletter'] = True
context['SITE_URL'] = 'Servicios'
context['url'] = reverse('services:home')
context['url_nav'] = 'servicios'
return context
class ServiceDetail(DetailView):
model = Service
form_class = ServiceForm
paginate_by = 6
template_name = 'app/detail/service_detail.html'
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
form_name = form.cleaned_data.get('name')
form_email = form.cleaned_data.get('email')
form_subject = form.cleaned_data.get('subject')
form_message = form.cleaned_data.get('message')
form_service = request.GET.get('name')
contact_email(
(form_name,
form_email,
form_subject,
form_message,
form_service,)
)
return HttpResponseRedirect('/servicios/gracias')
else:
return HttpResponseRedirect('/servicios/error')
return render(request, self.template_name, {'form': form})
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
form = self.form_class
context['form'] = form
images = ServiceImage.objects.filter(Service_id=context['object'].id)
if images.exists():
context['has_images'] = True
context['images'] = images
obj = super().get_object()
services = self.model.objects.exclude(slug=obj.slug).order_by('?')[:3]
if services.exists():
context['object_list'] = services
context['url_nav'] = 'servicios'
return context
class ServiceThanks(ListView):
model = Service
template_name = 'app/base/thanks.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['has_newsletter'] = True
context['SITE_URL'] = 'Servicios'
context['url'] = reverse('services:home')
context['url_nav'] = 'servicios'
return context
class ServiceError(ListView):
model = Service
template_name = 'app/base/error.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['has_newsletter'] = True
context['SITE_URL'] = 'Servicios'
context['url_nav'] = 'servicios'
context['url'] = reverse('services:home')
return context
```
#### File: src/ventas/views.py
```python
from django.db.models import Q
from django.http import JsonResponse
from django.shortcuts import render
from django.views import View
from django.views.generic import ListView, DetailView
from src.base.models import Carousel, CarouselImage
from src.services.forms import ServiceForm
from src.ventas.models import Article, Photo, Department, Provider, Brands, Category, Color, Line, SubLine
from src.ventas.forms import PhotoForm
class Home(ListView):
queryset = ''
template_name = 'app/products.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['SITE_URL'] = 'Nuestros productos'
context['has_newsletter'] = True
articles = Article.objects.exclude(featured=False)
if articles.exists():
context['products'] = articles[:8]
else:
articles = Article.objects.all()
if articles.exists():
context['products'] = articles[:8]
products_header_carousel = Carousel.objects.filter(Q(page__name="products") & Q(position_id=1))
if products_header_carousel.exists():
context['products_header_carousel'] = True
products_header_carousel_images = CarouselImage.objects.filter(Carousel_id=products_header_carousel[0].id)
if products_header_carousel_images.exists():
context['products_header_carousel_images'] = products_header_carousel_images
departments = Department.objects.all()
if departments.exists():
context['departments'] = departments[:4]
brands = Brands.objects.all()
if brands.exists():
context['brands'] = brands
context['url_nav'] = 'productos'
print(self)
return context
class ProductsList(ListView):
model = Article
template_name = 'app/products_list.html'
paginate_by = 20
def get_queryset(self):
article_list = self.model.objects.all()
line = ''
if self.request.GET.get('line'):
line = int(self.request.GET.get('line'))
article_list = article_list.filter(line=line)
sub_line = ''
if self.request.GET.get('sub_line'):
sub_line = int(self.request.GET.get('sub_line'))
article_list = article_list.filter(sub_line=sub_line)
category = ''
if self.request.GET.get('category'):
category = int(self.request.GET.get('category'))
article_list = article_list.filter(category=category)
department = ''
if self.request.GET.get('department'):
department = int(self.request.GET.get('department'))
article_list = article_list.filter(department=department)
brand = ''
if self.request.GET.get('brand'):
brand = int(self.request.GET.get('brand'))
article_list = article_list.filter(brand=brand)
color = ''
if self.request.GET.get('color'):
color = int(self.request.GET.get('color'))
article_list = article_list.filter(color=color)
search = ''
if self.request.GET.get('search'):
search = self.request.GET.get('search')
article_list = article_list.filter(
Q(name__icontains=search)|
Q(code__icontains=search)|
Q(line__name__icontains=search)|
Q(sub_line__name__icontains=search)|
Q(category__name__icontains=search)|
Q(department__name__icontains=search)|
Q(brand__name__icontains=search)|
Q(color__name__icontains=search)
)
return article_list
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
category = 'cat'
context = super().get_context_data(**kwargs)
departments = Department.objects.all()
if departments.exists():
context['departments'] = departments.filter(active=True).order_by('name')
categories = Category.objects.all()
if categories.exists():
context['categories'] = categories.filter(active=True).order_by('name')
brands = Brands.objects.all()
if brands.exists():
context['brands'] = brands.filter(active=True).order_by('name')
colors = Color.objects.all()
if colors.exists():
context['colors'] = colors.filter(active=True).order_by('name')
lines = Line.objects.all()
if lines.exists():
context['lines'] = lines.filter(active=True).order_by('name')
sub_lines = SubLine.objects.all()
if sub_lines.exists():
context['sub_lines'] = sub_lines.filter(active=True).order_by('name')
url_arg = '&'
line = ''
if self.request.GET.get('line'):
line = lines.get(id=self.request.GET.get('line')).name
url_arg = '{}line={}&'.format(url_arg,self.request.GET.get('line'))
sub_line = ''
if self.request.GET.get('sub_line'):
sub_line = sub_lines.get(id=self.request.GET.get('sub_line')).name
url_arg = '{}sub_line={}&'.format(url_arg,self.request.GET.get('sub_line'))
category = ''
if self.request.GET.get('category'):
category = categories.get(id=self.request.GET.get('category')).name
url_arg = '{}category={}&'.format(url_arg,self.request.GET.get('category'))
department = ''
if self.request.GET.get('department'):
department = departments.get(id=self.request.GET.get('department')).name
url_arg = '{}department={}&'.format(url_arg,self.request.GET.get('department'))
brand = ''
if self.request.GET.get('brand'):
brand = brands.get(id=self.request.GET.get('brand')).name
url_arg = '{}brand={}&'.format(url_arg,self.request.GET.get('brand'))
color = ''
if self.request.GET.get('color'):
color = colors.get(id=self.request.GET.get('color')).name
url_arg = '{}color={}&'.format(url_arg,self.request.GET.get('color'))
search = ''
if self.request.GET.get('search'):
search = self.request.GET.get('search')
url_arg = '{}search={}&'.format(url_arg,self.request.GET.get('search'))
context['line'] = line
context['sub_line'] = sub_line
context['category'] = category
context['department'] = department
context['brand'] = brand
context['color'] = color
context['search'] = search
context['url_arg'] = url_arg
context['SITE_URL'] = 'Departamento %s' % (category)
context['cat'] = '%s' % (category)
context['url_nav'] = 'productos'
return context
class ProductsDetail(DetailView):
model = Article
form_class = ServiceForm
# queryset = ''
template_name = 'app/detail/product_details.html'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
object_list = Article.objects.filter(Q(line=context['object'].line) | Q(category=context['object'].category)).order_by('?')
if object_list.exists():
context['object_list'] = object_list[:4]
context['object'].update_counter()
category = 'herraje'
form = self.form_class
context['form'] = form
context['cat'] = '%s' % (category)
context['SITE_URL'] = 'Detalle de producto'
context['url_nav'] = 'productos'
return context
class Departments(ListView):
model = Department
paginate_by = 4
# context_object_name = 'boards'
# queryset = ''
template_name = 'app/departments.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['SITE_URL'] = 'Departamentos'
context['url_nav'] = 'productos'
return context
class DepartmentDetail(DetailView):
model = Department
# context_object_name = 'boards'
# queryset = ''
template_name = 'app/detail/departments_detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['SITE_URL'] = 'Detalles de Departamentos'
# context['objects'] = {}
articles = Article.objects.filter(department=self.get_object().pk)
if articles.exists():
context['products'] = articles[:9]
context['url_nav'] = 'productos'
return context
class BrandView(ListView):
model = Brands
# context_object_name = 'boards'
paginate_by = 6
# queryset = ''
template_name = 'app/providers.html'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# Add in a QuerySet of all the books
context['SITE_URL'] = 'Proveedores'
# context['objects'] = {}
context['url_nav'] = 'productos'
return context
class BrandsDetails(DetailView):
model = Brands
# context_object_name = 'boards'
# queryset = ''
template_name = 'app/detail/provider_details.html'
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
articles = Article.objects.filter(provider=self.get_object().pk)
if articles.exists():
print()
context['products'] = articles[:9]
context['SITE_URL'] = 'Detalles de proveedor'
# context['objects'] = {}
context['url_nav'] = 'productos'
return context
``` |
{
"source": "jhernand/openshift-ansible",
"score": 3
} |
#### File: bin/openshift_ansible/multi_inventory.py
```python
from time import time
import argparse
import yaml
import os
import subprocess
import json
import errno
import fcntl
import tempfile
import copy
from string import Template
import shutil
CONFIG_FILE_NAME = 'multi_inventory.yaml'
DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_inventory.cache')
class MultiInventoryException(Exception):
'''Exceptions for MultiInventory class'''
pass
class MultiInventory(object):
'''
MultiInventory class:
Opens a yaml config file and reads aws credentials.
Stores a json hash of resources in result.
'''
def __init__(self, args=None):
# Allow args to be passed when called as a library
if not args:
self.args = {}
else:
self.args = args
self.cache_path = DEFAULT_CACHE_PATH
self.config = None
self.all_inventory_results = {}
self.result = {}
self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
same_dir_config_file = os.path.join(self.file_path, CONFIG_FILE_NAME)
etc_dir_config_file = os.path.join(os.path.sep, 'etc', 'ansible', CONFIG_FILE_NAME)
# Prefer a file in the same directory, fall back to a file in etc
if os.path.isfile(same_dir_config_file):
self.config_file = same_dir_config_file
elif os.path.isfile(etc_dir_config_file):
self.config_file = etc_dir_config_file
else:
self.config_file = None # expect env vars
def run(self):
'''This method checks to see if the local
cache is valid for the inventory.
if the cache is valid; return cache
else the credentials are loaded from multi_inventory.yaml or from the env
and we attempt to get the inventory from the provider specified.
'''
# load yaml
if self.config_file and os.path.isfile(self.config_file):
self.config = self.load_yaml_config()
elif os.environ.has_key("AWS_ACCESS_KEY_ID") and \
os.environ.has_key("AWS_SECRET_ACCESS_KEY"):
# Build a default config
self.config = {}
self.config['accounts'] = [
{
'name': 'default',
'cache_location': DEFAULT_CACHE_PATH,
'provider': 'aws/hosts/ec2.py',
'env_vars': {
'AWS_ACCESS_KEY_ID': os.environ["AWS_ACCESS_KEY_ID"],
'AWS_SECRET_ACCESS_KEY': os.environ["AWS_SECRET_ACCESS_KEY"],
}
},
]
self.config['cache_max_age'] = 300
else:
raise RuntimeError("Could not find valid ec2 credentials in the environment.")
if self.config.has_key('cache_location'):
self.cache_path = self.config['cache_location']
if self.args.get('refresh_cache', None):
self.get_inventory()
self.write_to_cache()
# if its a host query, fetch and do not cache
elif self.args.get('host', None):
self.get_inventory()
elif not self.is_cache_valid():
# go fetch the inventories and cache them if cache is expired
self.get_inventory()
self.write_to_cache()
else:
# get data from disk
self.get_inventory_from_cache()
def load_yaml_config(self, conf_file=None):
"""Load a yaml config file with credentials to query the
respective cloud for inventory.
"""
config = None
if not conf_file:
conf_file = self.config_file
with open(conf_file) as conf:
config = yaml.safe_load(conf)
# Provide a check for unique account names
if len(set([acc['name'] for acc in config['accounts']])) != len(config['accounts']):
raise MultiInventoryException('Duplicate account names in config file')
return config
def get_provider_tags(self, provider, env=None):
"""Call <provider> and query all of the tags that are usuable
by ansible. If environment is empty use the default env.
"""
if not env:
env = os.environ
# Allow relatively path'd providers in config file
if os.path.isfile(os.path.join(self.file_path, provider)):
provider = os.path.join(self.file_path, provider)
# check to see if provider exists
if not os.path.isfile(provider) or not os.access(provider, os.X_OK):
raise RuntimeError("Problem with the provider. Please check path " \
"and that it is executable. (%s)" % provider)
cmds = [provider]
if self.args.get('host', None):
cmds.append("--host")
cmds.append(self.args.get('host', None))
else:
cmds.append('--list')
if 'aws' in provider.lower():
cmds.append('--refresh-cache')
return subprocess.Popen(cmds, stderr=subprocess.PIPE, \
stdout=subprocess.PIPE, env=env)
@staticmethod
def generate_config(provider_files):
"""Generate the provider_files in a temporary directory.
"""
prefix = 'multi_inventory.'
tmp_dir_path = tempfile.mkdtemp(prefix=prefix)
for provider_file in provider_files:
filedes = open(os.path.join(tmp_dir_path, provider_file['name']), 'w+')
content = Template(provider_file['contents']).substitute(tmpdir=tmp_dir_path)
filedes.write(content)
filedes.close()
return tmp_dir_path
def run_provider(self):
'''Setup the provider call with proper variables
and call self.get_provider_tags.
'''
try:
all_results = []
tmp_dir_paths = []
processes = {}
for account in self.config['accounts']:
tmp_dir = None
if account.has_key('provider_files'):
tmp_dir = MultiInventory.generate_config(account['provider_files'])
tmp_dir_paths.append(tmp_dir)
# Update env vars after creating provider_config_files
# so that we can grab the tmp_dir if it exists
env = account.get('env_vars', {})
if env and tmp_dir:
for key, value in env.items():
env[key] = Template(value).substitute(tmpdir=tmp_dir)
name = account['name']
provider = account['provider']
processes[name] = self.get_provider_tags(provider, env)
# for each process collect stdout when its available
for name, process in processes.items():
out, err = process.communicate()
all_results.append({
"name": name,
"out": out.strip(),
"err": err.strip(),
"code": process.returncode
})
finally:
# Clean up the mkdtemp dirs
for tmp_dir in tmp_dir_paths:
shutil.rmtree(tmp_dir)
return all_results
def get_inventory(self):
"""Create the subprocess to fetch tags from a provider.
Host query:
Query to return a specific host. If > 1 queries have
results then fail.
List query:
Query all of the different accounts for their tags. Once completed
store all of their results into one merged updated hash.
"""
provider_results = self.run_provider()
# process --host results
# For any 0 result, return it
if self.args.get('host', None):
count = 0
for results in provider_results:
if results['code'] == 0 and results['err'] == '' and results['out'] != '{}':
self.result = json.loads(results['out'])
count += 1
if count > 1:
raise RuntimeError("Found > 1 results for --host %s. \
This is an invalid state." % self.args.get('host', None))
# process --list results
else:
# For any non-zero, raise an error on it
for result in provider_results:
if result['code'] != 0:
err_msg = ['\nProblem fetching account: {name}',
'Error Code: {code}',
'StdErr: {err}',
'Stdout: {out}',
]
raise RuntimeError('\n'.join(err_msg).format(**result))
else:
self.all_inventory_results[result['name']] = json.loads(result['out'])
# Check if user wants extra vars in yaml by
# having hostvars and all_group defined
for acc_config in self.config['accounts']:
self.apply_account_config(acc_config)
# Build results by merging all dictionaries
values = self.all_inventory_results.values()
values.insert(0, self.result)
for result in values:
MultiInventory.merge_destructively(self.result, result)
def add_entry(self, data, keys, item):
''' Add an item to a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
keys = a.b
item = c
'''
if "." in keys:
key, rest = keys.split(".", 1)
if key not in data:
data[key] = {}
self.add_entry(data[key], rest, item)
else:
data[keys] = item
def get_entry(self, data, keys):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
keys = a.b
return c
'''
if keys and "." in keys:
key, rest = keys.split(".", 1)
return self.get_entry(data[key], rest)
else:
return data.get(keys, None)
def apply_account_config(self, acc_config):
''' Apply account config settings
'''
results = self.all_inventory_results[acc_config['name']]
results['all_hosts'] = results['_meta']['hostvars'].keys()
# Extra vars go here
for new_var, value in acc_config.get('extra_vars', {}).items():
for data in results['_meta']['hostvars'].values():
self.add_entry(data, new_var, value)
# Clone vars go here
for to_name, from_name in acc_config.get('clone_vars', {}).items():
for data in results['_meta']['hostvars'].values():
self.add_entry(data, to_name, self.get_entry(data, from_name))
# Extra groups go here
for new_var, value in acc_config.get('extra_groups', {}).items():
for data in results['_meta']['hostvars'].values():
results["%s_%s" % (new_var, value)] = copy.copy(results['all_hosts'])
# Clone groups go here
# Build a group based on the desired key name
for to_name, from_name in acc_config.get('clone_groups', {}).items():
for name, data in results['_meta']['hostvars'].items():
key = '%s_%s' % (to_name, self.get_entry(data, from_name))
if not results.has_key(key):
results[key] = []
results[key].append(name)
# store the results back into all_inventory_results
self.all_inventory_results[acc_config['name']] = results
@staticmethod
def merge_destructively(input_a, input_b):
"merges b into input_a"
for key in input_b:
if key in input_a:
if isinstance(input_a[key], dict) and isinstance(input_b[key], dict):
MultiInventory.merge_destructively(input_a[key], input_b[key])
elif input_a[key] == input_b[key]:
pass # same leaf value
# both lists so add each element in b to a if it does ! exist
elif isinstance(input_a[key], list) and isinstance(input_b[key], list):
for result in input_b[key]:
if result not in input_a[key]:
input_a[key].append(result)
# a is a list and not b
elif isinstance(input_a[key], list):
if input_b[key] not in input_a[key]:
input_a[key].append(input_b[key])
elif isinstance(input_b[key], list):
input_a[key] = [input_a[key]] + [k for k in input_b[key] if k != input_a[key]]
else:
input_a[key] = [input_a[key], input_b[key]]
else:
input_a[key] = input_b[key]
return input_a
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path):
mod_time = os.path.getmtime(self.cache_path)
current_time = time()
if (mod_time + self.config['cache_max_age']) > current_time:
return True
return False
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file based on a provider')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Fetch cached only instances (default: False)')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store', default=False,
help='Get all the variables about a specific instance')
self.args = parser.parse_args().__dict__
def write_to_cache(self):
''' Writes data in JSON format to a file '''
# if it does not exist, try and create it.
if not os.path.isfile(self.cache_path):
path = os.path.dirname(self.cache_path)
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST or not os.path.isdir(path):
raise
json_data = MultiInventory.json_format_dict(self.result, True)
with open(self.cache_path, 'w') as cache:
try:
fcntl.flock(cache, fcntl.LOCK_EX)
cache.write(json_data)
finally:
fcntl.flock(cache, fcntl.LOCK_UN)
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
if not os.path.isfile(self.cache_path):
return None
with open(self.cache_path, 'r') as cache:
self.result = json.loads(cache.read())
return True
@classmethod
def json_format_dict(cls, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
def result_str(self):
'''Return cache string stored in self.result'''
return self.json_format_dict(self.result, True)
if __name__ == "__main__":
MI2 = MultiInventory()
MI2.parse_cli_args()
MI2.run()
print MI2.result_str()
```
#### File: ooinstall/ansible_plugins/facts_callback.py
```python
import os
import yaml
class CallbackModule(object):
def __init__(self):
######################
# This is ugly stoopid. This should be updated in the following ways:
# 1) it should probably only be used for the
# openshift_facts.yml playbook, so maybe there's some way to check
# a variable that's set when that playbook is run?
try:
self.hosts_yaml_name = os.environ['OO_INSTALL_CALLBACK_FACTS_YAML']
except KeyError:
raise ValueError('The OO_INSTALL_CALLBACK_FACTS_YAML environment '
'variable must be set.')
self.hosts_yaml = os.open(self.hosts_yaml_name, os.O_CREAT |
os.O_WRONLY)
def on_any(self, *args, **kwargs):
pass
def runner_on_failed(self, host, res, ignore_errors=False):
pass
def runner_on_ok(self, host, res):
if res['invocation']['module_args'] == 'var=result':
facts = res['var']['result']['ansible_facts']['openshift']
hosts_yaml = {}
hosts_yaml[host] = facts
os.write(self.hosts_yaml, yaml.safe_dump(hosts_yaml))
def runner_on_skipped(self, host, item=None):
pass
def runner_on_unreachable(self, host, res):
pass
def runner_on_no_hosts(self):
pass
def runner_on_async_poll(self, host, res):
pass
def runner_on_async_ok(self, host, res):
pass
def runner_on_async_failed(self, host, res):
pass
def playbook_on_start(self):
pass
def playbook_on_notify(self, host, handler):
pass
def playbook_on_no_hosts_matched(self):
pass
def playbook_on_no_hosts_remaining(self):
pass
def playbook_on_task_start(self, name, is_conditional):
pass
#pylint: disable=too-many-arguments
def playbook_on_vars_prompt(self, varname, private=True, prompt=None,
encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
pass
def playbook_on_setup(self):
pass
def playbook_on_import_for_host(self, host, imported_file):
pass
def playbook_on_not_import_for_host(self, host, missing_file):
pass
def playbook_on_play_start(self, name):
pass
def playbook_on_stats(self, stats):
pass
``` |
{
"source": "jherning/link68",
"score": 3
} |
#### File: jherning/link68/customlink.py
```python
import serial
from settings import serial_port
import time
class glink:
def __init__(self):
print('Initializing Custom Link on ' + serial_port + ' ..')
self.ser = serial.Serial(
port = serial_port,
baudrate = 9600,
parity = serial.PARITY_NONE, # See PySerial documentation for constant options.
stopbits = serial.STOPBITS_ONE,
bytesize = serial.EIGHTBITS,
xonxoff = False,
rtscts = False,
dsrdtr = False,
timeout = 65 # Read timeout. The ~2s protocol timeout is not implemented.
# Set this to a number of seconds longer needed to read 64k on your link.
# Or just remove it for no read timeout.
)
#self.ser.rts = 1 # You can turn on modem control lines if needed (as with the GrayLink).
#self.ser.dtr = 1
time.sleep(.1) # Give a little time for it to power up and init the lines.
# Remove or adjust if necessary.
self.ser.reset_input_buffer() # Necessary for GrayLink.
def read(self, numbytes):
return self.ser.read(numbytes)
def write(self, data): # data should be a bytearray
self.ser.write(data)
while self.ser.out_waiting: pass # Block until write finishes.
def __del__(self):
self.ser.close()
def softreset(self):
self.ser.reset_input_buffer() # GrayLink sends an extra byte when it times-out.
```
#### File: jherning/link68/link68.py
```python
from sys import argv
from os.path import exists
import time
# Now get Joe's modules:
from prot import *
from tifiles import *
import settings
from univ import *
# Used LOUD as its own command and not LOUD for find / overwrite checks
# Gets a listing from the calc and returns as a calcdir object.
def ls(link, loud):
if loud: print('Getting directory listing..')
entries = []
# Should get folders, files, and flash apps
sendglobalREQ(link)
quickget('ACK', link)
quickget('VAR', link) # disregarding
# Loop until EOT (needed for 92 only):
calcdata = calcdir()
while True:
quicksend('ACK', link)
quicksend('CTS', link)
quickget('ACK', link)
DATApack = quickget('DATA', link)
calcdata.parse_dirDATA(DATApack, loud)
quicksend('ACK', link)
if getpacket(link).CIDt == 'EOT': break
quicksend('ACK', link)
return(calcdata)
# List matching var path/names, return list of matching path/names.
def find(args, link, loud):
if loud: print('Getting directory listing to search..')
calclist = ls(link, False)
filelist = []
for strtofind in args:
strtofind = strtofind.replace('/', '\\')
for entry in calclist.files:
if entry.find(strtofind) != -1:
entry = entry.replace('\\', '/')
filelist.append(entry)
if loud: print(' ' + entry)
return(filelist)
# Combine find and get.
def findget(args, link, grouped):
if len(args) < 2:
print('Usage: findgetg [string(s) to match] group_file_base_name')
quit()
if grouped: groupfile = args.pop() # Remove group file name before searching.
filelist = find(args, link, False)
if grouped: filelist.append(groupfile)
get(filelist, link, grouped)
# Backup all variables to a group file.
def backup(args, link):
if len(args) == 0: backupname = 'backup'
else: backupname = args[0]
print('Getting directory listing..')
calclist = ls(link, False)
getargs = calclist.files
print(' ' + str(len(getargs)) + ' vars to backup.')
getargs.append(backupname) # Get needs the group file base name.
get(getargs, link, True)
# Load TI vars from computer files. Return list of tivar objects.
def loadvars(filenames):
tivars = []
for filename in filenames:
tivars.extend(parse_tifile(filename)) # Files may have multiple vars.
return(tivars)
# Load TI vars from computer then write individually.
def ungroup(filenames):
tivars = loadvars(filenames)
for var in tivars:
writevars(var.varname, [var], settings.calc_type)
# ungroup 'folder.something.89p -92' Will write folder.something.9xp
# Load TI vars from computer then write as a group file.
def group(filenames):
if len(filenames) < 3:
print('Usage: group [files to group] group_file_base_name')
quit()
pcfile = filenames.pop()
tivars = loadvars(filenames)
writevars(pcfile, tivars, settings.calc_type)
# Write flash file(s) to calc.
def flash(filenames, link, recovery):
flashvarsALL = []
for ffile in filenames: # Load flash files.
flashvarsALL.extend(parse_flashfile(ffile))
flashvars = [] # Check for overwrites
if (not settings.overwrite) and (not recovery):
print("Getting directory listing for overwrite check..")
calclist = ls(link, False)
for var in flashvarsALL:
if var.name in calclist.apps:
print('** ' + var.name + ' app exists. Skipping. **')
else:
flashvars.append(var)
else: flashvars = flashvarsALL
if recovery: # In recovery mode, we haven't auto-detected anything.
settings.comp_MIDt = 'Computer->89/92+/V200' # No TI-92s in this rodeo.
for var in flashvars: # Flash the Calc!
link.softreset() # Good for SilverLink
if var.TID == 0x23 and var.hw_id >= 8: # AMS flash V200/89T!
sendV200amsRTS(len(var.data), var.hw_id, link) # 0x08: V200, 0x09: Titanium
elif var.TID == 0x23: # AMS flash 89/92+
send8992amsRTS(var.name, len(var.data), link)
else:
sendRTS('', var.name, var.TID, len(var.data), link) # App
## Send 64k at a time:
while (var.data):
quickget('ACK', link)
quickget('CTS', link)
quicksend('ACK', link)
outpack = packet()
outpack.data.extend(var.data[0:64*1024])
var.data = var.data[64*1024:] # slice off data we'll be sending (kills object)
outpack.CIDt = 'DATA'
print(' Flashing ' + var.name + ' (' + str(len(outpack.data)) + ' byte block).')
outpack.buildsend(link)
quickget('ACK', link)
if var.data: # There's more!
quicksend('CONT', link)
quicksend('EOT', link)
quickget('ACK', link)
print('Done flashing.')
if recovery: quit()
# Load then put normal vars to calculator.
def put(filenames, link):
putvarsALL = loadvars(filenames)
putvars = []
if not settings.overwrite:
print("Getting directory listing for overwrite check..")
calclist = ls(link, False)
for var in putvarsALL:
if var.folder + '\\' + var.varname in calclist.files:
print('** ' + var.folder + '/' + var.varname + ' exists. Skipping. [Overwrite: -o] **')
else:
putvars.append(var)
else: putvars = putvarsALL
for var in putvars:
link.softreset() # SilverLink..
if settings.current: # Send to current dir.
var.folder = ''
sendRTS(var.folder, var.varname, TID_ft[var.TIDt], len(var.vardata), link)
quickget('ACK', link)
quickget('CTS', link)
quicksend('ACK', link)
# Build and send the DATA packet:
outpack = packet()
outpack.data.extend(b'\x00\x00\x00\x00')
outpack.data.extend(var.vardata)
outpack.CIDt = 'DATA'
print(' put ' + var.folder + '/' + var.varname + ' (' + var.TIDt + ', ' + str(len(var.vardata)) + ' bytes)' )
outpack.buildsend(link)
### /DATA
quickget('ACK', link)
quicksend('EOT', link) # Send EOT for each file. FIX?
quickget('ACK', link) # Doesn't seem to work w/o doing this.
print('Done putting files.')
# Get vars from calc; write individually or as a group file.
def get(args, link, grouped):
sendvars = []
if grouped:
groupfilename = args.pop()
for filename in args:
print("get " + filename)
filename = filename.replace("/", "\\") # Rewrite forward slash from command line.
sendvarREQ(filename, link)
quickget('ACK', link)
headerpack = getpacket(link) # Is it a VAR or SKE?
if headerpack.CIDt == 'VAR':
quicksend('ACK', link)
quicksend('CTS', link)
quickget('ACK', link)
datapack = quickget('DATA', link)
quicksend('ACK', link)
quickget('EOT', link)
quicksend('ACK', link)
# Now parse the header/data.
calcvar = tivar()
calcvar.parse_silentreq(headerpack, datapack)
# If invoked as "folder\filename", set folder.
if "\\" in filename:
calcvar.folder = filename.split("\\")[0]
else:
calcvar.folder = 'main' # If no dir specified, set to main. FIX?
if grouped:
sendvars.append(calcvar) # Add to list for group send later.
else:
writevars(filename, [calcvar], settings.calc_type)
elif headerpack.CIDt == 'SKE':
print(' ** Got SKE from calc. ' + filename + ' probably does not exist **')
time.sleep(2.1) # Calculator seems to timeout after SKE.
# Then graylink gets a byte, so reset:
link.softreset() # SilverLink!
print("Done getting files.")
if grouped:
writevars(groupfilename, sendvars, settings.calc_type)
def screenshot(args, link):
import png
print('Getting screenshot.')
quicksend('SCR', link)
quickget('ACK', link)
timer = time.time()
inpack = quickget('DATA', link)
timer = time.time() - timer
print (' Link rate: %i bytes/s' % ((inpack.data_len + 2)/timer))
quicksend('ACK', link)
sshot = [[0 for i in range(240)] for j in range(128)] # 240 cols, 128 rows
for row in range(0, 128):
for colbyte in range(0,30):
recvd = inpack.data[row*30 + colbyte]
for i in range(8):
sshot[row][colbyte*8+7-i] = int(recvd&(2**i) == 0)
sshot89 = [[0 for i in range(160)] for j in range(100)] # 160x100
for row in range(0,100): # Crop for TI-89
for col in range(0,160):
sshot89[row][col] = sshot[row][col]
if len(args) > 0:
filename = args[0] + '.png'
else: filename = 'screenshot.png'
if exists(filename) and settings.overwrite == False:
print('** ' + filename + ' exists. Quitting. [Overwrite: -o] **')
quit()
with open(filename, 'wb') as f:
if settings.calc_type == 'TI-89':
w = png.Writer(160, 100, greyscale=1, bitdepth=1)
w.write(f, sshot89)
else:
w = png.Writer(240, 128, greyscale=1, bitdepth=1)
w.write(f, sshot)
f.close()
print(' Wrote ' + filename + '.')
#### MAIN ####
# Parse argv[] into cmd and args[], process flags.
argstemp = argv
argstemp.pop(0)
args = []
for el in argstemp: # Process any flags:
if el == '-o':
settings.overwrite = True
elif el == '-co':
settings.current = True
elif el == '-89':
settings.calc_type = 'TI-89'
elif el == '-92':
settings.calc_type = 'TI-92'
elif el == '-92+':
settings.calc_type = 'TI-92+'
else: args.append(el)
if settings.current: settings.overwrite = True
if settings.current: print('** Current folder mode enabled. **')
if settings.overwrite: print('** Overwrite mode enabled. **')
if len(args) == 0:
print('Link 68 version 0.2 (alpha)')
print('Usage: link68.py COMMAND ARGS [optional switches]')
print(' COMMANDS: ls, shot, get, getg, put, find, finget, findgetg, flash, rflash, backup')
print(' OPTIONS: -o: overwrite mode, -co: current folder + overwrite mode')
print(' Settings contained in settings.py.')
quit()
if args: cmd = args.pop(0)
# Do any commands that don't require the link cable, then quit:
if cmd == 'ungroup':
ungroup(args)
quit()
if cmd == 'group':
group(args)
quit()
# Init the link cable.
if settings.link_type == 'gray':
from graylink import glink
elif settings.link_type == 'black':
from blacklink import glink
elif settings.link_type == 'silver':
from silverlink import glink
elif settings.link_type == 'custom':
from customlink import glink
link = glink()
# Skip connectivity test for rflash:
if cmd == 'rflash': flash(args, link, True)
# Check connectivity & detect calculator type
settings.comp_MIDt = 'Computer->92' # Pretend we think it's a 92. Later models respond (as a 92), so it's safe.
quicksend('RDY', link)
inpack = quickget('ACK', link)
if inpack.header[3] & 1 == 1:
print("Calc not ready. Are you at the homescreen?")
quit()
if inpack.header[2] == 0x00: # Byte 3 of ACK seems to contain hardware version info.
# Known responses: 0x0c: TI-92+ w/AMS 2.09, 0x04: TI-92+ w/ AMS 2.05
# We are assuming all plain 92's give 00. TI-92-II???
settings.calc_type = 'TI-92'
else:
settings.comp_MIDt = 'Computer->89/92+/V200' # It's not a 92. Should ACK as 89 or 92+/V200.
quicksend('RDY', link)
inpack = quickget('ACK', link)
settings.calc_type = inpack.MIDt
print(' ' + settings.calc_type + ' detected.')
# Do requested linking command.
if cmd == 'shot': screenshot(args, link)
elif cmd == 'get': get(args, link, False)
elif cmd == 'getg': get(args, link, True)
elif cmd == 'put': put(args, link)
elif cmd == 'ls': ls(link, True)
elif cmd == 'find': find(args, link, True)
elif cmd == 'backup': backup(args, link)
elif cmd == 'flash': flash(args, link, False)
elif cmd == 'findget': findget(args, link, False)
elif cmd == 'findgetg': findget(args, link, True)
else: print('Unkown command: ' + cmd)
``` |
{
"source": "jherrerotardon/PoliticsForecast",
"score": 3
} |
#### File: PoliticsForecast/src/twitter_crawler.py
```python
import csv
import sys
from utils.paths import get_data_path
import tweepy
sys.path.append("./")
from config.credentials import access_tokens, consumer_keys
def authenticate_twitter():
auth = tweepy.OAuthHandler(**consumer_keys)
auth.set_access_token(access_tokens['access_token_key'], access_tokens['access_token_secret'])
api_ = tweepy.API(auth, wait_on_rate_limit=True)
return api_
def crawl_tweets(api_):
cursor = tweepy.Cursor(api_.search,
q=f"brexit -filter:retweets",
count=100,
tweet_mode="extended",
lang="en",
since="2019-11-28",
until="2019-12-10"
)
fieldnames = ['id', 'created_at', 'text', 'username', 'verified']
with open(get_data_path() + '/tweets.csv', mode='a') as file:
writer = csv.DictWriter(file, fieldnames=fieldnames)
writer.writeheader()
for tweet in cursor.items():
dto_tweet = {
'id': tweet.id,
'created_at': tweet.created_at,
'text': tweet.full_text,
'username': tweet.author.screen_name,
'verified': tweet.author.verified,
}
writer.writerow(dto_tweet)
if __name__ == '__main__':
api = authenticate_twitter()
crawl_tweets(api)
``` |
{
"source": "jherrerotardon/spies",
"score": 3
} |
#### File: src/algorithm/sentimental_analyser.py
```python
import csv
import gzip
import pickle
import re
import string
from io import StringIO
from pathlib import Path
import nltk
from nltk.classify import NaiveBayesClassifier
from nltk.classify.util import accuracy
from nltk.tokenize import RegexpTokenizer
from pyframework.container import Container
# NLTK requirements.
NLTK_RESOURCES_DIR = Container().root_path() + '/.venv/nltk_data'
NLTK_RESOURCES = [
'corpora/stopwords'
]
# Tries to load nltk resource if already has not be loaded.
for resource in NLTK_RESOURCES:
try:
nltk.data.find(resource)
except LookupError:
nltk.download(resource.split('/')[-1], download_dir=NLTK_RESOURCES_DIR)
from nltk.corpus import stopwords
stopwords_list = set(stopwords.words('english'))
def tokenize_clean_text(text) -> list:
"""Clean text and tokenize it.
Remove @ from tweets, rare characters, remove stopwords, etc.
:param text:
:return:
"""
text = re.sub(r"https?\S+", "", text)
text = re.sub(r"@\S+", "", text)
text = re.sub('\[.*?¿\]\%', ' ', text)
text = re.sub('[%s]' % re.escape(string.punctuation), ' ', text)
text = re.sub('\w*\d\w*', '', text)
text = text.lower()
# Remove punctuation.
tokenizer = RegexpTokenizer(r'\w+')
word_tokens = tokenizer.tokenize(text)
return [w for w in word_tokens if w not in stopwords_list]
def word_feats(words):
"""Creates the matrix of words.
:param words:
:return:
"""
return dict([(word, True) for word in words])
class SentimentalAnalyser:
"""Naive-Bayes sentiment analyzer. """
NEGATIVE = '0'
POSITIVE = '4'
NEUTRAL = '2'
UNKNOWN = 'unk'
"""Possible sentiments."""
DATASET_FILE = 'sentimental_dataset.gz'
MODEL_FILE = 'sentimental_analyser.pickle'
_classifier = None
"""Sentimental classifier model. """
def __init__(self):
self._load_model()
def train(self):
"""Trains new sentimental analyzer model.
:return:
"""
data = self._get_train_data()
percentage_to_train = 0.9
neg_feats = [(word_feats(tokenize_clean_text(tweet[1])), self.NEGATIVE)
for tweet in data if tweet[0] == self.NEGATIVE]
pos_feats = [(word_feats(tokenize_clean_text(tweet[1])), self.POSITIVE)
for tweet in data if tweet[0] == self.POSITIVE]
del data
neg_cutoff = round(len(neg_feats) * percentage_to_train)
pos_cutoff = round(len(pos_feats) * percentage_to_train)
train_feats = neg_feats[:neg_cutoff] + pos_feats[:pos_cutoff]
test_feats = neg_feats[neg_cutoff:] + pos_feats[pos_cutoff:]
# Train Classifier.
print('train on %d instances, test on %d instances' % (len(train_feats), len(test_feats)))
self._classifier = NaiveBayesClassifier.train(train_feats)
print('accuracy: ', accuracy(self._classifier, test_feats))
self._classifier.show_most_informative_features()
def classify(self, text: str):
"""Classify the list of texts in positive or negative sentiment.
:param text:
:return:
"""
result = self._classifier.classify(word_feats(tokenize_clean_text(text)))
return result
def _get_train_data(self) -> list:
"""Returns a list with text and sentiments to train process.
:return:
"""
with gzip.open(self._dataset_file_path()) as file:
content = file.read().decode('latin-1')
content = [row for row in csv.reader(StringIO(content), delimiter=',')]
content.pop(0)
return content
def _dataset_file_path(self) -> str:
"""Returns path for data to train model.
:return:
"""
return Container().data_path() + '/' + self.DATASET_FILE
def _load_model(self):
"""Tries to load model from storage. If not exists, train new
and stored it.
:return:
"""
model_storage = self._get_model_storage()
if model_storage.is_file():
self._classifier = pickle.load(open(str(model_storage), "rb"))
else:
self.train()
self.storage_model()
def storage_model(self):
"""Storage model in storage dir.
:return:
"""
model_storage = self._get_model_storage()
pickle.dump(self._classifier, open(str(model_storage), "wb"))
def _get_model_storage(self):
"""Returns file path where model should be stored.
:return:
"""
return Path(Container().storage_path() + '/models/' + self.MODEL_FILE)
```
#### File: src/algorithm/translator.py
```python
from googletrans import Translator as GoogleTranslator
class Translator:
"""Translator wrapper. """
_translator = None
"""Translator instance. """
def __init__(self):
self._translator = GoogleTranslator()
def to_english(self, text: str) -> str:
"""Returns strings translated to English.
:param text:
:return:
"""
translation = self._translator.translate(text, src='es', dest='en')
return translation.text
```
#### File: src/algorithm/word_cloud_generator.py
```python
from pathlib import Path
import matplotlib.pyplot as plt
import nltk
from pyframework.container import Container
from pyframework.helpers.lists import array_column
from wordcloud import WordCloud
from ..models.review import Review
# NLTK requirements.
NLTK_RESOURCES_DIR = Container().root_path() + '/.venv/nltk_data'
NLTK_RESOURCES = [
'corpora/stopwords'
]
# Tries to load nltk resource if already has not be loaded.
for resource in NLTK_RESOURCES:
try:
nltk.data.find(resource)
except LookupError:
nltk.download(resource.split('/')[-1], download_dir=NLTK_RESOURCES_DIR)
from nltk.corpus import stopwords
stopwords_list = set(stopwords.words('spanish'))
class WordCloudGenerator:
@staticmethod
def generate(restaurant_id: int):
"""Generates and save it a plot with wordcloud from
reviews of restaurant.
:param restaurant_id:
:return:
"""
reviews = Review().get_from_restaurants([restaurant_id])
text = array_column(reviews, 'text')
text = ' '.join(text)
if not text:
return
wordcloud = WordCloud(
stopwords=stopwords_list,
background_color="white",
colormap="Dark2",
max_font_size=150,
random_state=42
)
plt.rcParams['figure.figsize'] = [16, 12]
# Create plot.
wordcloud.generate(text)
plt.figure()
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
WordCloudGenerator._storage(wordcloud, restaurant_id)
plt.clf()
plt.close('all')
@staticmethod
def _storage(wordcloud, restaurant_id: int):
"""Storage current plot in storage directory.
:param wordcloud:
:param restaurant_id:
:return:
"""
file = '{}/{}/{}.png'.format(
Container().data_path(),
'restaurants',
restaurant_id
)
Path(file).parents[0].mkdir(exist_ok=True)
wordcloud.to_file(file)
```
#### File: commands/fire/fire_city_restaurants_download.py
```python
from pyframework.exceptions.custom_exceptions import ArgumentException
from .base_fire import BaseFire, Event
from ...models.city import City
class FireCityRestaurantsDownload(BaseFire):
_name = 'fire:cityDownload'
_description = 'Launch an event to download entities info from city.'
_arguments = [
['-c', '--city', 'City ID to be fired.']
]
_city = {}
def set_up(self):
city_id = self.get_argument('city')
if not city_id:
raise ArgumentException('City ID is required.')
self._city = City().get_city(city_id)
if not self._city:
raise ArgumentException('No valid city ID.')
def handle(self) -> int:
info = {
'place_id': self._city['id']
}
self._fire_event(Event.PLACE_DOWNLOAD_ACTION, info)
return self.RETURN_SUCCESS
```
#### File: commands/fire/fire_restaurants_info_download.py
```python
from pyframework.exceptions.custom_exceptions import ArgumentException
from pyframework.helpers.lists import array_column
from .base_fire import BaseFire, Event
from ...models.city_endpoint import CityEndpoint
from ...models.restaurant import Restaurant
class FireRestaurantsInfoDownload(BaseFire):
_name = 'fire:restaurantsInfoDownload'
_description = 'Launch an event to download entity information.'
_arguments = [
['-e', '--endpoint', 'Endpoint ID to be fired.'],
['-c', '--city', 'City ID to be fired.'],
]
_city_id = int
"""City to be downloaded. """
_restaurants = []
"""Restaurants to be downloaded. """
_endpoint_id = int
"""Endpoint to be downloaded. """
def set_up(self):
self._city_id = self.get_argument('city')
if not self._city_id:
raise ArgumentException('City ID is required.')
self._city_id = int(self._city_id)
self._endpoint_id = self.get_argument('endpoint')
if self._endpoint_id is None:
raise ArgumentException('Endpoint ID is required.')
self._endpoint_id = int(self._endpoint_id)
download = CityEndpoint().get_downloads(self._city_id)
if not any([self._endpoint_id == task['endpoint_id'] for task in download]):
raise ArgumentException('Endpoint {} not enabled on city {}.'.format(self._endpoint_id, self._city_id))
self._restaurants = Restaurant().get_restaurants_on_city(self._city_id)
def handle(self) -> int:
info = {
'restaurants_ids': array_column(self._restaurants, 'id'),
'endpoint_id': self._endpoint_id,
}
self._fire_event(Event.RESTAURANTS_INFO_DOWNLOAD_ACTION, info)
return self.RETURN_SUCCESS
```
#### File: commands/tools/train.py
```python
from pyframework.providers.cli.command import Command
from ...algorithm.restaurant_recommender import RestaurantRecommender
from ...models.user import User
class Train(Command):
_name = 'tools:generateSentimentalAnalyzer'
_description = "Command to do generate model for analyzer sentient."
_arguments = [
['-c', '--cities', 'Cities IDs separated by commas.'],
['-u', '--user', 'User ID to recommend.']
]
_cities = []
"""Cities to do recommendations. """
_user = []
"""Cities to do recommendations. """
def set_up(self):
self._cities = self.get_argument('cities')
if not self._cities:
raise Exception('Cities can no be empty')
self._cities = self._cities.split(',')
self._user = User().get_user(self.get_argument('user'))
if not self._user:
raise Exception('User can no be empty')
def handle(self) -> int:
recommender = RestaurantRecommender()
recommender.train(self._cities)
return self.RETURN_SUCCESS
```
#### File: src/interfaces/factory_interface.py
```python
from pyframework.exceptions.custom_exceptions import ConfigurationException
from pyframework.helpers.configuration import get_module
class FactoryAbstract:
"""Abstract factory class to instantiate object on inside levels of current module. """
_module_path = ''
_module_name = ''
@classmethod
def get_class(cls, **kwargs):
"""Returns the class definition found by factory.
:param kwargs:
:return:
"""
if not (from_module := cls.get_module(**kwargs)):
raise ConfigurationException('From module import not configured.')
if not (class_name := cls.get_class_name(**kwargs)):
raise ConfigurationException('From module import not configured.')
module = get_module(
from_module,
'.'.join(cls.__module__.split('.')[:-1])
)
if not module:
raise Exception('Module <{}> is not available.'.format(
'.'.join(cls.__module__.split('.')[:-1]) + from_module
))
metaclass = getattr(module, class_name)
return metaclass
@classmethod
def instance(cls, **kwargs):
"""Create dynamic instance of an object from next level.
:param kwargs:
:return:
"""
instance = cls.get_class(**kwargs)(**kwargs)
return instance
@classmethod
def get_module(cls, **kwargs) -> str:
module = '.{}.{}'.format(
cls._get_module_path(**kwargs),
cls._get_module_name(**kwargs)
)
return module
@classmethod
def _get_module_path(cls, product: dict, **kwargs) -> str:
return cls._module_name
@classmethod
def _get_module_name(cls, **kwargs) -> str:
return cls._module_name
@classmethod
def get_class_name(cls, **kwargs) -> str:
return cls._get_module_name(**kwargs).capitalize()
```
#### File: src/models/city_endpoint.py
```python
from enum import Enum
from pyframework.models.mysql_model import MySQLModel
class Column(Enum):
"""Columns of table. """
CITY_ID = 'city_id'
ENDPOINT_ID = 'endpoint_id'
ENDPOINT_CODE = 'endpoint_code'
ENDPOINT_NAME = 'endpoint_name'
CREATED_AT = 'created_at'
UPDATED_AT = 'updated_at'
class CityEndpoint(MySQLModel):
_columns = [column.value for column in Column]
_database = 'tourism'
_table = 'city_endpoint'
def __init__(self):
super(CityEndpoint, self).__init__()
self._use_db()
def get_downloads(self, city_id: int):
"""Returns endpoints active info from city.
:param city_id:
:return:
"""
columns = [
Column.ENDPOINT_ID.value,
Column.CITY_ID.value,
Column.ENDPOINT_CODE.value,
Column.ENDPOINT_NAME.value,
]
columns = [self._table + '.' + sufix for sufix in columns]
foreign_columns = ['endpoint.url', 'endpoint.name']
command = 'SELECT {} FROM {} LEFT JOIN {} ON city_endpoint.{}=endpoint.id WHERE {}=%s AND {}=1'.format(
','.join(columns + foreign_columns),
self._table,
'endpoint',
Column.ENDPOINT_ID.value,
self._table + '.' + Column.CITY_ID.value,
'endpoint.enabled'
)
cursor = self.execute(command, (city_id,))
result = []
for data in cursor:
keys = [
'endpoint_id',
'city_id',
'endpoint_code',
'endpoint_name',
'url',
'name',
]
formatted = {key: value for key, value in zip(keys, data)}
result.append(formatted)
return result
``` |
{
"source": "Jherrild/Espyresso",
"score": 3
} |
#### File: Espyresso/lib/PID.py
```python
class PIDController(object):
def __init__(self, p=2.0, i=0.0, d=1.0, set_temp=200, i_max=500, i_min=-500):
self.kp = p
self.ki = i
self.kd = d
self.set_point = set_temp
self.i_max = i_max
self.i_min = i_min
self.dev = 0
self.int = 0
self.error = 0
def set_temp(self, temp):
self.set_point = temp
self.dev = 0
self.int = 0
def set_kp(self, kp):
self.kp = kp
def set_ki(self, ki):
self.ki = ki
def set_kd(self, kd):
self.kd = kd
def update(self, current_temp):
self.error = self.set_point - current_temp
p_value = self.kp * self.error
d_value = self.kd * (self.error - self.dev)
self.dev = self.error
self.int = self.int + self.error
if self.int > self.i_max:
self.int = self.i_max
elif self.int < self.i_min:
self.int = self.i_min
i_value = self.int * self.ki
output = p_value + i_value + d_value
print(self.set_point)
return output
``` |
{
"source": "JHerrmann01/AESDataManipulator",
"score": 2
} |
#### File: JHerrmann01/AESDataManipulator/AESDataV3.py
```python
from __future__ import print_function
from os.path import join, dirname, abspath
import xlrd
from xlrd.sheet import ctype_text
import xlsxwriter
####################
def loadSpreadsheet():
fname = join(dirname(dirname(abspath(__file__))), 'AES/First Spreadsheet', 'GBZ65745 Excel SE855 GLENWOOD RD-1 (copy).xls')
xl_workbook = xlrd.open_workbook(fname)
xl_sheet = xl_workbook.sheet_by_name("Results")
return xl_workbook, xl_sheet
def grabSimpleInformation(xl_workbook, xl_sheet):
numSpaces = 0
generalAreas = {}
num_cols = xl_sheet.ncols
for row_idx in range(8, xl_sheet.nrows-7):
if(xl_sheet.cell(row_idx,0).value == "Mercury"):
Mercury_Values_Raw = (xl_sheet.row(row_idx))
if(xl_sheet.cell(row_idx,0).value == "pH at 25C - Soil"):
Corrosivity_Values_Raw = (xl_sheet.row(row_idx))
if(xl_sheet.cell(row_idx,0).value == "Flash Point"):
Flashpoint_Values_Raw = (xl_sheet.row(row_idx))
if(xl_sheet.cell(row_idx,0).value == "Ignitability"):
Ignitability_Values_Raw = (xl_sheet.row(row_idx))
if(xl_sheet.cell(row_idx,0).value == "Reactivity Cyanide"):
Reactivity_Values_Cyanide_Raw = (xl_sheet.row(row_idx))
if(xl_sheet.cell(row_idx,0).value == "Reactivity Sulfide"):
Reactivity_Values_Sulfide_Raw = (xl_sheet.row(row_idx))
if(xl_sheet.cell(row_idx,0).value == "Total Cyanide (SW9010C Distill.)"):
Cyanide_Values_Raw = (xl_sheet.row(row_idx))
if(numSpaces%3 == 0):
generalAreas[int(row_idx)] = str(xl_sheet.cell(row_idx,0).value)
numSpaces +=1
if(xl_sheet.cell(row_idx,0).value == ""):
numSpaces += 1
return Mercury_Values_Raw, Corrosivity_Values_Raw, Flashpoint_Values_Raw, Ignitability_Values_Raw, Reactivity_Values_Cyanide_Raw, Reactivity_Values_Sulfide_Raw, Cyanide_Values_Raw, generalAreas
def sortGeneralAreas(generalAreas):
keys = generalAreas.keys()
sortedGenAreas = [[0 for i in range(2)]for i in range(len(keys))]
for x in range(0,len(keys)):
smallestKey = 100000
for key in generalAreas.keys():
if(key < smallestKey):
smallestKey = key
sortedGenAreas[x][0] = int(smallestKey)
sortedGenAreas[x][1] = str(generalAreas.pop(smallestKey))
return sortedGenAreas
def insertRowsIntoAreas(xl_sheet, sortedGenAreas):
rowsInArea = [[""]for i in range(len(sortedGenAreas))]
for x in range(0,len(sortedGenAreas)):
rowsInArea[x][0] = sortedGenAreas[x][1]
numAreas = len(sortedGenAreas)
for x in range(0 , numAreas):
if(x < numAreas-1):
for y in range(sortedGenAreas[x][0]+1, sortedGenAreas[x+1][0]-2):
rowsInArea[x].append(xl_sheet.row(y))
else:
for y in range(sortedGenAreas[x][0]+1, xl_sheet.nrows-7):
rowsInArea[x].append(xl_sheet.row(y))
return rowsInArea
print("Beginning program...")
#Loading the file to be parsed
xl_workbook, xl_sheet = loadSpreadsheet()
#Grabbing basic information
Company_Name = xl_sheet.cell(0, 0).value
Type_Samples_Collected_Raw = xl_sheet.row(4)
global firstIndex
firstIndex = 6
#Begin parsing to find simple useful information
Mercury_Values_Raw, Corrosivity_Values_Raw, Flashpoint_Values_Raw, Ignitability_Values_Raw, Reactivity_Values_Cyanide_Raw, Reactivity_Values_Sulfide_Raw, Cyanide_Values_Raw, generalAreas = grabSimpleInformation(xl_workbook, xl_sheet)
#Sort the general areas in increasing order(Row number)
sortedGenAreas = sortGeneralAreas(generalAreas)
#Insert the rows that belong to each respective area
rowsInArea = insertRowsIntoAreas(xl_sheet, sortedGenAreas)
print("Done Parsing")
print()
########################################################################################################################
def startWritingFinalFile():
workbook = xlsxwriter.Workbook('/home/jeremy/Desktop/AES/Excel_Reformatting.xlsx')
worksheet = workbook.add_worksheet()
return workbook, worksheet
#Refining a given row
def valueRefinerMetals(inputArrayRaw):
outputArray = []
pos = 0
units = str(inputArrayRaw[2].value)
divisor = 1
if(units[0:2] == "ug"):
divisor = 1000
for value in inputArrayRaw:
if((pos >= firstIndex and pos%2 == firstIndex%2) or (pos == 0) or (pos == 2)):
if(pos == 0):
outputArray.append(str(value.value))
elif(pos == 2):
outputArray.append("ppm")
outputArray.append("")
elif(str(value.value).find("<") == -1):
outputArray.append(str(round((float(value.value)/divisor), 5)))
else:
outputArray.append("N.D.")
pos+=1
return(outputArray)
def isDetected(compound):
hasFloat = False
for x in compound:
try:
val = float(x)
hasFloat = True
break
except Exception as e:
val = ""
return hasFloat
def isNumber(value):
try:
val = float(value)
return True
except Exception as e:
return False
def removeUselessRows(rowsInArea, index):
y = 1
lenRow = (len(rowsInArea[index][1]))
while(y < len(rowsInArea[index])):
if not isDetected(rowsInArea[index][y]):
rowsInArea[index].remove(rowsInArea[index][y])
y -= 1
y += 1
if(len(rowsInArea[index]) == 1):
emptyArray = ["None Detected", "_", "_"]
for x in range(len(emptyArray), lenRow):
emptyArray.append("N.D.")
rowsInArea[index].append(emptyArray)
return rowsInArea[index]
def createBeginning(worksheet, currLine):
line = 1
x = len(Type_Samples_Collected)
offset = 4
finalLetter=""
if 64+x+offset > 90:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(64+((x+offset)%26))
finalLetter = firstLetter+secondLetter
else:
finalLetter = chr(64+x+offset)
for x in range(0, 5):
worksheet.merge_range("B"+str(line)+":"+finalLetter+str(line), "")
line += 1
return worksheet, currLine
def createHeading(worksheet, currLine, Type_Samples_Collected_Raw, formatOne):
formatOne.set_text_wrap(True)
Type_Samples_Collected = []
pos = 0
for value in Type_Samples_Collected_Raw:
if((pos >= firstIndex and pos%2 == firstIndex%2) or (pos ==0)):
Type_Samples_Collected.append(value.value)
pos+=1
worksheet.write('B'+str(currLine), 'Parameter', formatOne)
worksheet.write('C'+str(currLine), 'Compounds Detected', formatOne)
worksheet.write('D'+str(currLine), 'Units', formatOne)
worksheet.write('E'+str(currLine), 'NYSDEC Part 375 Unrestricted Use Criteria', formatOne)
offset = 4
for x in range(1,len(Type_Samples_Collected)):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Type_Samples_Collected[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Type_Samples_Collected[x]), formatOne)
currLine += 1
return worksheet, currLine, Type_Samples_Collected
def addMercuryValues(worksheet, currLine, Mercury_Values_Raw, formatOne, formatTwo):
Mercury_Values = valueRefinerMetals(Mercury_Values_Raw)
offset = 2
worksheet.write('B'+str(currLine), 'Mercury 7471', formatOne)
for x in range(0, len(Mercury_Values)):
if(isNumber(Mercury_Values[x])):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Mercury_Values[x]), formatTwo)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Mercury_Values[x]), formatTwo)
else:
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Mercury_Values[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Mercury_Values[x]), formatOne)
currLine += 1
return worksheet, currLine, Mercury_Values
def addPCBValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo):
indexOfPCBS = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "PCBs By SW8082A":
indexOfPCBS = x
for x in range(1, len(rowsInArea[indexOfPCBS])):
rowsInArea[indexOfPCBS][x] = valueRefinerMetals(rowsInArea[indexOfPCBS][x])
rowsInArea[indexOfPCBS] = removeUselessRows(rowsInArea, indexOfPCBS)
firstLine = currLine
offset = 2
for x in range(1, len(rowsInArea[indexOfPCBS])):
for y in range(0, len(rowsInArea[indexOfPCBS][x])):
if(isNumber(rowsInArea[indexOfPCBS][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfPCBS][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfPCBS][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfPCBS][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfPCBS][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'PCBS', formatOne)
else:
worksheet.write('B'+str(firstLine), 'PCBS',formatOne)
return worksheet, currLine
def addPesticideValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo):
indexOfPesticides = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "Pesticides - Soil By SW8081B":
indexOfPesticides = x
for x in range(1, len(rowsInArea[indexOfPesticides])):
rowsInArea[indexOfPesticides][x] = valueRefinerMetals(rowsInArea[indexOfPesticides][x])
rowsInArea[indexOfPesticides] = removeUselessRows(rowsInArea, indexOfPesticides)
firstLine = currLine
offset = 2
for x in range(1, len(rowsInArea[indexOfPesticides])):
for y in range(0, len(rowsInArea[indexOfPesticides][x])):
if(isNumber(rowsInArea[indexOfPesticides][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfPesticides][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfPesticides][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfPesticides][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfPesticides][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'Pesticides', formatOne)
else:
worksheet.write('B'+str(firstLine), 'Pesticides', formatOne)
return worksheet, currLine
def addMetalValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo):
indexOfMetals = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "Metals, Total":
indexOfMetals = x
for x in range(1, len(rowsInArea[indexOfMetals])):
rowsInArea[indexOfMetals][x] = valueRefinerMetals(rowsInArea[indexOfMetals][x])
rowsInArea[indexOfMetals] = removeUselessRows(rowsInArea, indexOfMetals)
firstLine = currLine
offset = 2
worksheet.write('B'+str(currLine), 'Metals, Total')
for x in range(1, len(rowsInArea[indexOfMetals])):
if(rowsInArea[indexOfMetals][x][0] != "Mercury"):
for y in range(0, len(rowsInArea[indexOfMetals][x])):
if(isNumber(rowsInArea[indexOfMetals][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+offset+y))+str(currLine), str(rowsInArea[indexOfMetals][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfMetals][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfMetals][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfMetals][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'Metals', formatOne)
else:
worksheet.write('B'+str(firstLine), 'Metals', formatOne)
return worksheet, currLine
def addCyanideValues(worksheet, currLine, Cyanide_Values_Raw, formatOne, formatTwo):
Cyanide_Values = valueRefinerMetals(Cyanide_Values_Raw)
worksheet.write('B'+str(currLine), 'Cyanide', formatOne)
offset = 2
for x in range(0, len(Cyanide_Values)):
if(isNumber(Cyanide_Values[x])):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Cyanide_Values[x]), formatTwo)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Cyanide_Values[x]), formatTwo)
else:
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Cyanide_Values[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Cyanide_Values[x]), formatOne)
currLine += 1
return worksheet, currLine, Cyanide_Values
def addSemiVolatileValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo):
indexOfSemiVolatiles = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "Semivolatiles By SW8270D":
indexOfSemiVolatiles = x
for x in range(1, len(rowsInArea[indexOfSemiVolatiles])):
rowsInArea[indexOfSemiVolatiles][x] = valueRefinerMetals(rowsInArea[indexOfSemiVolatiles][x])
rowsInArea[indexOfSemiVolatiles] = removeUselessRows(rowsInArea, indexOfSemiVolatiles)
firstLine = currLine
offset = 2
for x in range(1, len(rowsInArea[indexOfSemiVolatiles])):
for y in range(0, len(rowsInArea[indexOfSemiVolatiles][x])):
if(isNumber(rowsInArea[indexOfSemiVolatiles][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfSemiVolatiles][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfSemiVolatiles][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfSemiVolatiles][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfSemiVolatiles][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'SemiVolatiles', formatOne)
else:
worksheet.write('B'+str(firstLine), 'SemiVolatiles', formatOne)
return worksheet, currLine
def addVolatileValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo):
indexOfVolatiles = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "Volatiles (TCL) By SW8260C":
indexOfVolatiles = x
for x in range(1, len(rowsInArea[indexOfVolatiles])):
rowsInArea[indexOfVolatiles][x] = valueRefinerMetals(rowsInArea[indexOfVolatiles][x])
rowsInArea[indexOfVolatiles] = removeUselessRows(rowsInArea, indexOfVolatiles)
firstLine = currLine
offset = 2
worksheet.write('B'+str(currLine), 'Volatiles (TCL) By SW8260C')
for x in range(1, len(rowsInArea[indexOfVolatiles])):
for y in range(0, len(rowsInArea[indexOfVolatiles][x])):
if(isNumber(rowsInArea[indexOfVolatiles][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfVolatiles][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfVolatiles][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfVolatiles][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfVolatiles][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'Volatiles', formatOne)
else:
worksheet.write('B'+str(firstLine), 'Volatiles', formatOne)
return worksheet, currLine
def createSecondHeading(worksheet, currLine, Type_Samples_Collected, formatOne):
worksheet.set_row(currLine-1,50)
worksheet.write('B'+str(currLine), 'RCRA Characteristics ', formatOne)
worksheet.merge_range('C'+str(currLine)+':E'+str(currLine), 'Regulatory Criteria', formatOne)
offset = 4
for x in range(1,len(Type_Samples_Collected)):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Type_Samples_Collected[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Type_Samples_Collected[x]), formatOne)
currLine += 1
return worksheet, currLine
def addCorrosivityValues(worksheet, currLine, Corrosivity_Values_Raw, formatOne):
Corrosivity_Values = valueRefinerMetals(Corrosivity_Values_Raw)
worksheet.write('B'+str(currLine), 'Corrosivity', formatOne)
offset = 2
for x in range(0,len(Corrosivity_Values)):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Corrosivity_Values[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Corrosivity_Values[x]), formatOne)
currLine += 1
return worksheet, currLine, Corrosivity_Values
def addFlashpointValues(worksheet, currLine, Flashpoint_Values_Raw, formastOne):
Flashpoint_Values = []
pos = 0
for value in Flashpoint_Values_Raw:
if(pos == 0):
Flashpoint_Values.append(value.value)
Flashpoint_Values.append(" ")
Flashpoint_Values.append("Degree F")
Flashpoint_Values.append(">200 Degree F")
if((pos >= firstIndex and pos%2 == firstIndex%2)):
Flashpoint_Values.append(value.value)
pos+=1
offset = 1
for x in range(0,len(Flashpoint_Values)):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Flashpoint_Values[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Flashpoint_Values[x]), formatOne)
currLine += 1
return worksheet, currLine, Flashpoint_Values
def addIgnitabilityValues(worksheet, currLine, Ignitability_Values_Raw, formatOne):
Ignitability_Values = []
pos = 0
for value in Ignitability_Values_Raw:
if(pos == 0):
Ignitability_Values.append(value.value)
Ignitability_Values.append(" ")
Ignitability_Values.append("Degree F")
Ignitability_Values.append("<140 Degree F")
if((pos >= firstIndex and pos%2 == firstIndex%2)):
Ignitability_Values.append(value.value)
pos+=1
offset = 1
for x in range(0,len(Ignitability_Values)):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Ignitability_Values[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Ignitability_Values[x]), formatOne)
currLine += 1
return worksheet, currLine, Ignitability_Values
def addReactivityValues(worksheet, currLine, Reactivity_Values_Cyanide_Raw, Reactivity_Values_Sulfide_Raw, formatOne):
Reactivity_Values_Cyanide = valueRefinerMetals(Reactivity_Values_Cyanide_Raw)
worksheet.merge_range('B'+str(currLine)+":B"+str(currLine+1), 'Reactivity', formatOne)
worksheet.write('C'+str(currLine), 'Cyanide', formatOne)
offset = 2
for x in range(1,len(Reactivity_Values_Cyanide)):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Reactivity_Values_Cyanide[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Reactivity_Values_Cyanide[x]), formatOne)
currLine += 1
Reactivity_Values_Sulfide = valueRefinerMetals(Reactivity_Values_Sulfide_Raw)
worksheet.write('C'+str(currLine), 'Sulfide', formatOne)
for x in range(1,len(Reactivity_Values_Sulfide)):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Reactivity_Values_Sulfide[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Reactivity_Values_Sulfide[x]), formatOne)
currLine += 1
return worksheet, currLine, Reactivity_Values_Cyanide, Reactivity_Values_Sulfide
def createThirdHeading(worksheet, currLine, Type_Samples_Collected, formatOne):
worksheet.set_row(currLine-1,50)
worksheet.write('B'+str(currLine), 'Toxicity ', formatOne)
worksheet.merge_range('C'+str(currLine)+':E'+str(currLine), 'TCLP Regulatory Criteria', formatOne)
x = len(Type_Samples_Collected)
offset = 4
finalLetter=""
if 64+x+offset > 90:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
print(firstLetter)
secondLetter = chr(64+((x+offset)%26))
print(secondLetter)
finalLetter = firstLetter+secondLetter
else:
finalLetter = chr(64+x+offset)
worksheet.merge_range("F"+str(currLine)+":"+finalLetter+str(currLine), "", formatOne)
currLine += 1
return worksheet, currLine
def addTCLPMetalValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo):
indexOfTCLPMetals = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "Metals, TCLP":
indexOfTCLPMetals = x
for x in range(1, len(rowsInArea[indexOfTCLPMetals])):
rowsInArea[indexOfTCLPMetals][x] = valueRefinerMetals(rowsInArea[indexOfTCLPMetals][x])
rowsInArea[indexOfTCLPMetals] = removeUselessRows(rowsInArea, indexOfTCLPMetals)
firstLine = currLine
offset = 2
for x in range(1, len(rowsInArea[indexOfTCLPMetals])):
for y in range(0, len(rowsInArea[indexOfTCLPMetals][x])):
if(isNumber(rowsInArea[indexOfTCLPMetals][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfTCLPMetals][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfTCLPMetals][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfTCLPMetals][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfTCLPMetals][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'TCLP Metals', formatOne)
else:
worksheet.write('B'+str(firstLine), 'TCLP Metals', formatOne)
return worksheet, currLine
def addVOCSValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo):
indexOfVOCS = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "TCLP Volatiles By SW8260C":
indexOfVOCS = x
for x in range(1, len(rowsInArea[indexOfVOCS])):
rowsInArea[indexOfVOCS][x] = valueRefinerMetals(rowsInArea[indexOfVOCS][x])
rowsInArea[indexOfVOCS] = removeUselessRows(rowsInArea, indexOfVOCS)
firstLine = currLine
offset = 2
for x in range(1, len(rowsInArea[indexOfVOCS])):
for y in range(0, len(rowsInArea[indexOfVOCS][x])):
if(isNumber(rowsInArea[indexOfVOCS][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfVOCS][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfVOCS][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfVOCS][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfVOCS][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'TCLP Vocs', formatOne)
else:
worksheet.write('B'+str(firstLine), 'TCLP Vocs', formatOne)
return worksheet, currLine
def addSVOCSValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne):
indexOfSVOCS = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "TCLP Acid/Base-Neutral By SW8270D":
indexOfSVOCS = x
for x in range(1, len(rowsInArea[indexOfSVOCS])):
rowsInArea[indexOfSVOCS][x] = valueRefinerMetals(rowsInArea[indexOfSVOCS][x])
rowsInArea[indexOfSVOCS] = removeUselessRows(rowsInArea, indexOfSVOCS)
firstLine = currLine
offset = 2
for x in range(1, len(rowsInArea[indexOfSVOCS])):
for y in range(0, len(rowsInArea[indexOfSVOCS][x])):
if(isNumber(rowsInArea[indexOfSVOCS][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfSVOCS][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfSVOCS][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfSVOCS][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfSVOCS][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'TCLP SVocs', formatOne)
else:
worksheet.write('B'+str(firstLine), 'TCLP SVocs', formatOne)
return worksheet, currLine
def addTCLPPesticideValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne):
indexOfTCLPPesticides = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "TCLP Pesticides By SW8081B":
indexOfTCLPPesticides = x
for x in range(1, len(rowsInArea[indexOfTCLPPesticides])):
rowsInArea[indexOfTCLPPesticides][x] = valueRefinerMetals(rowsInArea[indexOfTCLPPesticides][x])
rowsInArea[indexOfTCLPPesticides] = removeUselessRows(rowsInArea, indexOfTCLPPesticides)
firstLine = currLine
offset = 2
for x in range(1, len(rowsInArea[indexOfTCLPPesticides])):
for y in range(0, len(rowsInArea[indexOfTCLPPesticides][x])):
if(isNumber(rowsInArea[indexOfTCLPPesticides][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfTCLPPesticides][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfTCLPPesticides][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfTCLPPesticides][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfTCLPPesticides][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'TCLP Pesticides', formatOne)
else:
worksheet.write('B'+str(firstLine), 'TCLP Pesticides', formatOne)
return worksheet, currLine
def addTCLPHerbicideValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne):
indexOfTCLPHerbicides = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "TCLP Herbicides By SW8151A":
indexOfHerbicides = x
for x in range(1, len(rowsInArea[indexOfHerbicides])):
rowsInArea[indexOfHerbicides][x] = valueRefinerMetals(rowsInArea[indexOfHerbicides][x])
rowsInArea[indexOfTCLPHerbicides] = removeUselessRows(rowsInArea, indexOfTCLPHerbicides)
firstLine = currLine
offset = 2
for x in range(1, len(rowsInArea[indexOfHerbicides])):
for y in range(0, len(rowsInArea[indexOfHerbicides][x])):
if(isNumber(rowsInArea[indexOfHerbicides][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfHerbicides][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfHerbicides][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfHerbicides][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfHerbicides][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'TCLP Pesticides / Herbicides', formatOne)
else:
worksheet.write('B'+str(firstLine), 'TCLP Pesticides / Herbicides', formatOne)
return worksheet, currLine
def addTPHValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne):
indexOfGasolineHydrocarbons = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "Gasoline Range Hydrocarbons (C6-C10) By SW8015D":
indexOfGasolineHydrocarbons = x
for x in range(1, len(rowsInArea[indexOfGasolineHydrocarbons])):
rowsInArea[indexOfGasolineHydrocarbons][x] = valueRefinerMetals(rowsInArea[indexOfGasolineHydrocarbons][x])
indexOfDieselHydrocarbons = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "TPH By SW8015D DRO":
indexOfDieselHydrocarbons = x
for x in range(1, len(rowsInArea[indexOfDieselHydrocarbons])):
rowsInArea[indexOfDieselHydrocarbons][x] = valueRefinerMetals(rowsInArea[indexOfDieselHydrocarbons][x])
offset = 2
worksheet.merge_range('B'+str(currLine)+":B"+str(currLine+1), 'Total Petroleum Hydrocarbons', formatOne)
for x in range(1, len(rowsInArea[indexOfGasolineHydrocarbons])):
for y in range(0, len(rowsInArea[indexOfGasolineHydrocarbons][x])):
if(isNumber(rowsInArea[indexOfGasolineHydrocarbons][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfGasolineHydrocarbons][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfGasolineHydrocarbons][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfGasolineHydrocarbons][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfGasolineHydrocarbons][x][y]), formatOne)
currLine += 1
for x in range(1, len(rowsInArea[indexOfDieselHydrocarbons])):
for y in range(0, len(rowsInArea[indexOfDieselHydrocarbons][x])):
if(isNumber(rowsInArea[indexOfDieselHydrocarbons][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfDieselHydrocarbons][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfDieselHydrocarbons][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfDieselHydrocarbons][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfDieselHydrocarbons][x][y]), formatOne)
currLine += 1
return worksheet, currLine
print("Writing to Excel File...")
workbook, worksheet = startWritingFinalFile()
worksheet.set_column('B:B', 25)
worksheet.set_column('C:C', 30)
worksheet.set_column('E:E', 15)
worksheet.set_row(5,50)
#Important Information - Titles, etc..
formatOne = workbook.add_format()
formatOne.set_align('center')
formatOne.set_align('vcenter')
formatOne.set_font_name('Arial')
formatOne.set_font_size('12')
formatOne.set_border(6)
#Numbers within the text
formatTwo = workbook.add_format()
formatTwo.set_align('center')
formatTwo.set_align('vcenter')
formatTwo.set_font_name('Arial')
formatTwo.set_font_size('12')
formatTwo.set_border(6)
formatTwo.set_bg_color('#87CEFF')
formatTwo.set_bold()
#Current Line to overwrite each process
currLine = 6
#Heading for each column
worksheet, currLine, Type_Samples_Collected = createHeading(worksheet, currLine, Type_Samples_Collected_Raw, formatOne)
#Adding Mercury Values
worksheet, currLine, Mercury_Values = addMercuryValues(worksheet, currLine, Mercury_Values_Raw, formatOne, formatTwo)
#Adding PCB Values
worksheet, currLine = addPCBValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo)
#Adding Pesticide Values
worksheet, currLine = addPesticideValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo)
#Adding Metal Values
worksheet, currLine = addMetalValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo)
#Adding Cyanide Values
worksheet, currLine, Cyanide_Values = addCyanideValues(worksheet, currLine, Cyanide_Values_Raw, formatOne, formatTwo)
#Adding Semi Volatile Organic Compounds
worksheet, currLine = addSemiVolatileValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo)
#Adding Volatile Organic Compounds
worksheet, currLine = addVolatileValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo)
#RCRA Second Heading
worksheet, currLine = createSecondHeading(worksheet, currLine, Type_Samples_Collected, formatOne)
#Adding Corrosivity(pH) Values
worksheet, currLine, Corrosivity_Values = addCorrosivityValues(worksheet, currLine, Corrosivity_Values_Raw, formatOne)
#Adding Flashpoint Values
worksheet, currLine, Flashpoint_Values = addFlashpointValues(worksheet, currLine, Flashpoint_Values_Raw, formatOne)
#Adding Ignitability Values
worksheet, currLine, Ignitability_Values = addIgnitabilityValues(worksheet, currLine, Ignitability_Values_Raw, formatOne)
#Adding Reactivity Values
worksheet, currLine, Reactivity_Values_Cyanide, Reactivity_Values_Sulfide = addReactivityValues(worksheet, currLine, Reactivity_Values_Cyanide_Raw, Reactivity_Values_Sulfide_Raw, formatOne)
#Toxicity Third Heading
worksheet, currLine = createThirdHeading(worksheet, currLine, Type_Samples_Collected, formatOne)
#Adding TCLP Metals(Barium / Lead)
worksheet, currLine = addTCLPMetalValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo)
#Adding TCLP VOCS
worksheet, currLine = addVOCSValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo)
#Adding TCLP SVOCS
worksheet, currLine = addSVOCSValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne)
#Adding TCLP Pesticides
worksheet, currLine = addTCLPPesticideValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne)
#Adding TCLP Herbicides
worksheet, currLine = addTCLPHerbicideValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne)
#Adding Total Petroleum Hydrocarbons
worksheet, currLine = addTPHValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne)
#Beginning information(Company Name, Address, Dates Samples were collected)
worksheet, currLine = createBeginning(worksheet, currLine)
workbook.close()
print("Done Writing")
``` |
{
"source": "jherrm/mpf-mc",
"score": 2
} |
#### File: mpfmc/tests/MpfMcTestCase.py
```python
import os
import sys
import unittest
verbose = sys.argv and "-v" in sys.argv
if not verbose:
os.environ['KIVY_NO_FILELOG'] = '1'
os.environ['KIVY_NO_CONSOLELOG'] = '1'
from kivy.graphics.opengl import glReadPixels, GL_RGB, GL_UNSIGNED_BYTE
from kivy import Config, Logger
from kivy.base import runTouchApp, stopTouchApp, EventLoop
from kivy.clock import Clock
from kivy.uix.widget import Widget as KivyWidget
import mpfmc
from mpf.core.utility_functions import Util
if not verbose:
Config.set('kivy', 'log_enable', '0')
Config.set('kivy', 'log_level', 'warning')
from mpfmc.core.mc import MpfMc
from time import time, sleep
sys.stderr = sys.__stderr__
class MpfMcTestCase(unittest.TestCase):
def __init__(self, *args):
self.sent_bcp_commands = list()
super().__init__(*args)
self._events = dict()
self._last_event_kwargs = dict()
self.max_test_setup_secs = 30
self._fps = 30
def _mc_time(self):
return self._current_time
def get_options(self):
return dict(machine_path=self.get_machine_path(),
mcconfigfile='mcconfig.yaml',
production=False,
configfile=Util.string_to_list(self.get_config_file()),
no_load_cache=False,
create_config_cache=True,
no_sound=False,
bcp=False)
def get_absolute_machine_path(self):
return os.path.abspath(os.path.join(
mpfmc.__path__[0], os.pardir, 'mpfmc', self.get_machine_path()))
def get_machine_path(self):
raise NotImplementedError
def get_config_file(self):
raise NotImplementedError
def get_abs_path(self, path):
return os.path.join(os.path.abspath(os.curdir), path)
def advance_time(self, secs=.1):
start = self._current_time
while self._current_time < start + secs:
EventLoop.idle()
self._current_time += 1 / self._fps
def advance_real_time(self, secs=.1):
start = self._current_time
while self._current_time < start + secs:
EventLoop.idle()
sleep(1 / self._fps)
self._current_time += 1 / self._fps
EventLoop.idle()
def get_pixel_color(self, x, y):
"""Returns a binary string of the RGB bytes that make up the slide
pixel at the passed x,y coordinates. 2 bytes per pixel, 6 bytes total.
This method *does* compensate for different window sizes.
Note: This method does not work yet.
"""
raise NotImplementedError # remove when we fix it. :)
# do the Window import here because we don't want to import it at the
# top or else we won't be able to set window properties
from kivy.core.window import Window
# convert the passed x/y to the actual x/y of the Window since it's
# possible for the mpf-mc display size to be different than the Window
# size
x *= Window.width / Window.children[0].width
y *= Window.height / Window.children[0].height
return glReadPixels(x, y, 1, 1, GL_RGB, GL_UNSIGNED_BYTE)
def tearDown(self):
self.mc.stop()
def patch_bcp(self):
# used internally
self.orig_bcp_send = self.mc.bcp_processor.send
self.mc.bcp_processor.send = self._bcp_send
# this is used to send BCP commands to mpf-mc
self.send = self.mc.bcp_processor._process_command
self.mc.bcp_client_connected = True
def _bcp_send(self, bcp_command, callback=None, **kwargs):
# used for commands sent from the MC to the PC
# print((bcp_command, callback, kwargs))
self.sent_bcp_commands.append((bcp_command, callback, kwargs))
self.orig_bcp_send(bcp_command=bcp_command, callback=callback,
**kwargs)
def setUp(self):
# Most of the setup is done in run(). Explanation is there.
Config._named_configs.pop('app', None)
self._start_time = time()
self._current_time = self._start_time
Clock._start_tick = self._start_time
Clock._last_tick = self._start_time
Clock.time = self._mc_time
# prevent sleep in clock
Clock._max_fps = 0
# reset clock
Clock._root_event = None
from mpf.core.player import Player
Player.monitor_enabled = False
machine_path = self.get_absolute_machine_path()
try:
self.mc = MpfMc(options=self.get_options(),
machine_path=machine_path)
self.patch_bcp()
from kivy.core.window import Window
Window.create_window()
Window.canvas.clear()
self._start_app_as_slave()
except Exception:
if self.mc:
# prevent dead locks with two asset manager threads
self.mc.stop()
raise
def _start_app_as_slave(self):
# from app::run
if not self.mc.built:
self.mc.load_config()
self.mc.load_kv(filename=self.mc.kv_file)
root = self.mc.build()
if root:
self.mc.root = root
if self.mc.root:
if not isinstance(self.mc.root, KivyWidget):
Logger.critical('App.root must be an _instance_ of Kivy Widget')
raise Exception('Invalid instance in App.root')
from kivy.core.window import Window
Window.add_widget(self.mc.root)
# Check if the window is already created
from kivy.base import EventLoop
window = EventLoop.window
if window:
self.mc._app_window = window
#window.set_title(self.mc.get_application_name() + self._testMethodName)
icon = self.mc.get_application_icon()
if icon:
window.set_icon(icon)
self.mc._install_settings_keys(window)
else:
Logger.critical("Application: No window is created."
" Terminating application run.")
return
self.mc.dispatch('on_start')
runTouchApp(slave=True) # change is here
# Perform init process
tries = 0
while not self.mc.is_init_done.is_set() and not self.mc.thread_stopper.is_set():
self.advance_time()
sleep(.001)
tries += 1
if tries > 1000:
self.fail("Test init took too long")
# set a nice title
window.set_title(self.__class__.__name__ + "::" + self._testMethodName)
def dump_clock(self):
print("---------")
events = []
event = Clock._root_event
while event:
events.append(event)
event = event.next
events.sort(key=lambda x: str(x.get_callback()))
for event in events:
print(event.get_callback(), event.timeout)
def _mock_event_handler(self, event_name, **kwargs):
self._last_event_kwargs[event_name] = kwargs
self._events[event_name] += 1
def mock_event(self, event_name):
self._events[event_name] = 0
self.mc.events.remove_handler_by_event(
event=event_name, handler=self._mock_event_handler)
self.mc.events.add_handler(event=event_name,
handler=self._mock_event_handler,
event_name=event_name)
def assertEventNotCalled(self, event_name):
"""Assert that event was not called."""
if event_name not in self._events:
raise AssertionError("Event {} not mocked.".format(event_name))
if self._events[event_name] != 0:
raise AssertionError("Event {} was called {} times.".format(
event_name, self._events[event_name]))
def assertEventCalled(self, event_name, times=None):
"""Assert that event was called."""
if event_name not in self._events:
raise AssertionError("Event {} not mocked.".format(event_name))
if self._events[event_name] == 0:
raise AssertionError("Event {} was not called.".format(event_name))
if times is not None and self._events[event_name] != times:
raise AssertionError("Event {} was called {} instead of {}.".format(
event_name, self._events[event_name], times))
def assertEventCalledWith(self, event_name, **kwargs):
"""Assert that event was called with kwargs."""
self.assertEventCalled(event_name)
self.assertEqual(kwargs, self._last_event_kwargs[event_name],
"Args for {} differ.".format(event_name))
def reset_mock_events(self):
for event in self._events.keys():
self._events[event] = 0
``` |
{
"source": "jhershberg/redis-py",
"score": 2
} |
#### File: redis-py/tests/conftest.py
```python
import pytest
import redis
def _get_client(cls, request=None, **kwargs):
params = {'host': 'localhost', 'port': 6379, 'db': 9}
params.update(kwargs)
client = cls(**params)
client.flushdb()
if request:
request.addfinalizer(client.flushdb)
return client
def skip_if_server_version_lt(min_version):
version = _get_client(redis.Redis).info()['redis_version']
c = "StrictVersion('%s') < StrictVersion('%s')" % (version, min_version)
return pytest.mark.skipif(c)
@pytest.fixture()
def r(request, **kwargs):
return _get_client(redis.Redis, request, **kwargs)
@pytest.fixture()
def sr(request, **kwargs):
return _get_client(redis.StrictRedis, request, **kwargs)
``` |
{
"source": "jherskovic/img-xlsx",
"score": 3
} |
#### File: jherskovic/img-xlsx/img-xlsx.py
```python
from PIL import Image
from openpyxl import Workbook
from openpyxl.styles import PatternFill
from openpyxl.utils import get_column_letter
from functools import partial
import sys
import argparse
def rgb_to_xls_hex(rgb_tuple, image_mode='RGB'):
if image_mode == 'RGB':
r, g, b = rgb_tuple
elif image_mode == 'RGBA':
# Ignore alpha channel in images that have one.
r, g, b, _ = rgb_tuple
return f'{r:02x}{g:02x}{b:02x}'
def handle_arguments():
parser = argparse.ArgumentParser(description='Convert an image file to an Excel spreadsheet. I\'m sorry.')
parser.add_argument('--size', dest='size', type=int, default=64,
help='The number of cells for the largest dimension of the image. '
'Defaults to 64. Up to 512 works well for landscape images, up to 256 '
'for portrait images.')
parser.add_argument('--quantize', dest='quantize', metavar='NUM_COLORS', type=int, default=0,
help='Quantize the image (i.e. set an upper bound on the number of colors). '
'Max 255.')
parser.add_argument('image', metavar='FILENAME', type=str,
help='The image file to turn into an Excel spreadsheet. JPGs and PNGs work well.')
parser.add_argument('xlsx', metavar='FILENAME', type=str,
help='The output filename. Should end in .xlsx')
args = parser.parse_args()
return args
def convert(args):
im = Image.open(args.image)
maxsize = (args.size, args.size)
im.thumbnail(maxsize)
if args.quantize > 0 and args.quantize < 256:
quantized = im.quantize(colors=args.quantize)
im = quantized
if im.mode in ['P', 'L']:
image = im.convert("RGB")
else:
image = im
pixels=image.load()
pixel_converter = partial(rgb_to_xls_hex, image_mode=image.mode)
# Get the final image size
size_x, size_y = image.size
out_wb = Workbook()
out = out_wb.active
for y in range(size_y):
for x in range(size_x):
cell = out.cell(y+1, x+1)
rgb = pixels[x, y]
cell.fill = PatternFill("solid", fgColor=pixel_converter(rgb))
for col in range(1, size_x+1):
out.column_dimensions[get_column_letter(col)].width = 3
out_wb.save(args.xlsx)
if __name__ == "__main__":
args = handle_arguments()
convert(args)
``` |
{
"source": "jherskow/Zulu",
"score": 2
} |
#### File: backend/zulu/server.py
```python
import uvicorn
from fastapi import FastAPI
from zulu import routes
from zulu.db_tools import init_db
app = FastAPI(title='Zulu app', )
app.include_router(routes.api, prefix='/api')
@app.on_event('startup')
def startup_event():
init_db()
if __name__ == '__main__':
uvicorn.run(
'server:app',
host='0.0.0.0',
port=8342,
reload=True,
)
``` |
{
"source": "jhertfe/basic-log",
"score": 3
} |
#### File: jhertfe/basic-log/log.py
```python
from time import localtime
from time import strftime
from glob import glob
import sys
import os
def get_file_len(fname):
with open(fname, 'r') as f:
for i, l in enumerate(f):
pass
return i + 1
def main(args):
line_length = 80 # max number of symbols to write in each line
log_dir = '.' # log directory
show_lines = 5 # last x lines to show
# create log directory
if not os.path.isdir(log_dir):
os.mkdir(log_dir)
now = localtime()
log_file = os.path.join(log_dir, '{}.log'.format(strftime('%Y-%m-%d', now)))
if args:
# split args into words if it was passed with quotes
if len(args) == 1:
args = args[0].split(' ')
time_string = strftime('%H:%M:%S', now)
out_text = ''
time_prefix = '[{}]'.format(time_string)
current_line = time_prefix
# create a file for each day
if not os.path.isfile(log_file):
out_text = '{:=^{l_len}}\n'.format(' ' + strftime('%Y-%m-%d', now) + ' ', l_len=line_length - 2)
# split text into lines depending on line_length
for i, a in enumerate(args):
if len(current_line) + len(a) + 1 > line_length:
out_text += current_line + '\n'
current_line = ' ' * len(time_prefix)
current_line += ' ' + a
out_text += current_line + '\n'
# write text to logfile
with open(log_file, 'a') as log_out:
log_out.write(out_text)
# show the last x lines in the current or last existing log file
view_file = log_file
if os.path.isfile(log_file):
print 'Log of today:'
else:
# get old log files and sort them by time
old_logs = glob(os.path.join(log_dir, '*.log'))
old_logs.sort(key=os.path.getmtime)
if old_logs:
print 'Last log found:'
view_file = old_logs[-1]
if os.path.isfile(view_file):
# check if there are more than x lines
file_len = get_file_len(view_file)
if file_len > show_lines:
os.system('head -n 1 {}'.format(view_file))
os.system('tail -n {} {}'.format(show_lines, view_file))
if __name__ == '__main__':
main(sys.argv[1:])
``` |
{
"source": "jhertz/autovod",
"score": 3
} |
#### File: jhertz/autovod/uploader_melee.py
```python
import httplib
import httplib2
import os
import random
import sys
import time
import untangle
from sys import argv, exit
from apiclient.discovery import build
from apiclient.errors import HttpError
from apiclient.http import MediaFileUpload
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import argparser, run_flow
# config -- these should be absolute paths
CLIENT_SECRETS_FILE = "C:\\Users\\NYCPM\\Desktop\\autovod\\client_secrets.json"
STREAM_CONTROL_FILE = "C:\\Users\\NYCPM\\Dropbox\\Melee\\MeleeStreamControl\\Melee\\streamcontrol.xml"
VOD_FOLDER = "X:\\Vods\melee\\"
# constants
httplib2.RETRIES = 1
MAX_RETRIES = 10
RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected,
httplib.IncompleteRead, httplib.ImproperConnectionState,
httplib.CannotSendRequest, httplib.CannotSendHeader,
httplib.ResponseNotReady, httplib.BadStatusLine)
RETRIABLE_STATUS_CODES = [500, 502, 503, 504]
YOUTUBE_UPLOAD_SCOPE = "https://www.googleapis.com/auth/youtube.upload"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
VALID_PRIVACY_STATUSES = ("public", "private", "unlisted")
PM_KEYWORDS = ["PM", "Project M", "SSBPM", "Wii"]
MELEE_KEYWORDS = ["Melee", "SSBM", "Gamecube"]
SMASH4_KEYWORDS = ["Smash4", "Sm4sh", "Smash4WiiU", "Smash 4 Wii U", "SSB4", "Wii U", "S4"]
SSB64_KEYWORDS = ["SSB", "64", "N64", "SSB64", "Smash64", "Smash 64"]
SMASH_KEYWORDS = ["Smash", "Smash Bros", "Super Smash Bros", "SmashBros", "SuperSmashBros"]
NEBS_KEYWORDS = ["nebs", "nebulous", "nebulous gaming", "nyc", "nebulous smash", "nebulous nyc", "nebulous gaming nyc", "esports"]
#functions
# parse stream control, return a dict with the options dict that initialize_upload() expects
def parse_xml(filename):
contents= ""
with open(filename, "r") as f:
contents = f.readlines()
#print "i read the xml file, here are its contents:\n\n"
#print contents
root = untangle.parse(filename)
return_dict = {}
player1 = root.items.pName1.cdata
player2 = root.items.pName2.cdata
main_title = root.items.eventTitle.cdata
titleL = root.items.rOundl.cdata
titleR = root.items.rOundr.cdata
#print "p1" + player1 + "p2" + player2 + "titleL" + titleL + "titleR" + titleR + "main" + main_title
# figure out what game we're in right now
game_keywords = MELEE_KEYWORDS
vod_title = main_title + ": " + titleL + " - " + titleR + ": ", player1 + " vs. " + player2
final_title = "".join(vod_title)
if len(final_title) > 95:
final_title = final_title[:95]
return_dict['title'] = final_title
print "going to upload with the title:" + return_dict['title']
return_dict['description'] = "VOD from" + main_title + " created by khz's auto vod uploader"
return_dict['keywords'] = SMASH_KEYWORDS + NEBS_KEYWORDS + game_keywords
return_dict['category'] = "20" # this is the US category for gaming
return_dict['privacyStatus'] = "public" #XXX: change this later
return return_dict
def get_game_keywords(game):
if "Melee" in game:
return MELEE_KEYWORDS
if "PM" in game:
return PM_KEYWORDS
if "Smash 4" in game:
return SMASH4_KEYWORDS
if "64" in game:
return SSB64_KEYWORDS
return []
# handle oauth stuff to give us the youtube service
def get_authenticated_service(args):
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE,
scope=YOUTUBE_UPLOAD_SCOPE,
message="clients secrets file not found!")
storage = Storage("%s-oauth2.json" % sys.argv[0])
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage, args)
return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
http=credentials.authorize(httplib2.Http()))
# entrypoint to actually upload
def initialize_upload(youtube, vod_file):
options = parse_xml(STREAM_CONTROL_FILE)
tags = ",".join(options['keywords'])
body=dict(
snippet=dict(
title=options['title'],
description=options['description'],
tags=tags,
categoryId=options['category']
),
status=dict(
privacyStatus=options['privacyStatus']
)
)
# Call the API's videos.insert method to create and upload the video.
insert_request = youtube.videos().insert(
part=",".join(body.keys()),
body=body,
# The chunksize parameter specifies the size of each chunk of data, in
# bytes, that will be uploaded at a time. Set a higher value for
# reliable connections as fewer chunks lead to faster uploads. Set a lower
# value for better recovery on less reliable connections.
#
# Setting "chunksize" equal to -1 in the code below means that the entire
# file will be uploaded in a single HTTP request. (If the upload fails,
# it will still be retried where it left off.) This is usually a best
# practice, but if you're using Python older than 2.6 or if you're
# running on App Engine, you should set the chunksize to something like
# 1024 * 1024 (1 megabyte).
media_body=MediaFileUpload(vod_file, chunksize=-1, resumable=True)
)
resumable_upload(insert_request)
# This method implements an exponential backoff strategy to resume a
# failed upload.
def resumable_upload(insert_request):
response = None
error = None
retry = 0
while response is None:
try:
print "Uploading file..."
status, response = insert_request.next_chunk()
if response is not None:
if 'id' in response:
print "Video id '%s' was successfully uploaded." % response['id']
else:
exit("The upload failed with an unexpected response: %s" % response)
except HttpError, e:
if e.resp.status in RETRIABLE_STATUS_CODES:
error = "A retriable HTTP error %d occurred:\n%s" % (e.resp.status,
e.content)
else:
raise
except RETRIABLE_EXCEPTIONS, e:
error = "A retriable error occurred: %s" % e
if error is not None:
print error
retry += 1
if retry > MAX_RETRIES:
exit("No longer attempting to retry.")
max_sleep = 2 ** retry
sleep_seconds = random.random() * max_sleep
print "Sleeping %f seconds and then retrying..." % sleep_seconds
time.sleep(sleep_seconds)
#main entrypoint
if __name__ == '__main__':
if not argv or not argv[1]:
print "error! no file suppplied"
exit(1)
try:
argparser.add_argument("--file", required=True, help="Video file to upload")
args = argparser.parse_args()
file_name = VOD_FOLDER + args.file
print "file name: ", file_name
old_size = -1
print "about to loop waiting for file to be done"
while True:
size = os.path.getsize(file_name)
print "size is", size
if old_size == size:
break
old_size = size
time.sleep(10)
print "file is done"
#print "read file name:", file_name
youtube = get_authenticated_service(args)
print "sucessfully created authenticated yt service"
except Exception as ex:
print ex
raw_input()
try:
initialize_upload(youtube, file_name)
except HttpError, e:
print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)
raw_input()
print "upload was sucessful!"
time.sleep(10)
``` |
{
"source": "jherzberg/article-tagging",
"score": 2
} |
#### File: tagnews/crimetype/tag.py
```python
import os
import pickle
import glob
import time
import pandas as pd
# not used explicitly, but this needs to be imported like this
# for unpickling to work.
from ..utils.model_helpers import LemmaTokenizer # noqa
"""
Contains the CrimeTags class that allows tagging of articles.
"""
MODEL_LOCATION = os.path.join(os.path.split(__file__)[0],
'models',
'binary_stemmed_logistic')
TAGS = ['OEMC', 'CPD', 'SAO', 'CCCC', 'CCJ', 'CCSP',
'CPUB', 'IDOC', 'DOMV', 'SEXA', 'POLB', 'POLM',
'GUNV', 'GLBTQ', 'JUVE', 'REEN', 'VIOL', 'BEAT',
'PROB', 'PARL', 'CPLY', 'DRUG', 'CPS', 'GANG', 'ILSP',
'HOMI', 'IPRA', 'CPBD', 'IMMG', 'ENVI', 'UNSPC',
'ILSC', 'ARSN', 'BURG', 'DUI', 'FRUD', 'ROBB', 'TASR']
def load_model(location=MODEL_LOCATION):
"""
Load a model from the given folder `location`.
There should be at least one file named model-TIME.pkl and
a file named vectorizer-TIME.pkl inside the folder.
The files with the most recent timestamp are loaded.
"""
models = glob.glob(os.path.join(location, 'model*.pkl'))
if not models:
raise RuntimeError(('No models to load. Run'
' "python -m tagnews.crimetype.models.'
'binary_stemmed_logistic.save_model"'))
model = models.pop()
while models:
model_time = time.strptime(model[-19:-4], '%Y%m%d-%H%M%S')
new_model_time = time.strptime(models[0][-19:-4], '%Y%m%d-%H%M%S')
if model_time < new_model_time:
model = models[0]
models = models[1:]
with open(model, 'rb') as f:
clf = pickle.load(f)
with open(os.path.join(location, 'vectorizer-' + model[-19:-4] + '.pkl'),
'rb') as f:
vectorizer = pickle.load(f)
return clf, vectorizer
class CrimeTags():
"""
CrimeTags let you tag articles. Neat!
"""
def __init__(self,
model_directory=MODEL_LOCATION,
clf=None,
vectorizer=None):
"""
Load a model from the given `model_directory`.
See `load_model` for more information.
Alternatively, the classifier and vectorizer can be
provided. If one is provided, then both must be provided.
"""
if clf is None and vectorizer is None:
self.clf, self.vectorizer = load_model(model_directory)
elif clf is None or vectorizer is None:
raise ValueError(('clf and vectorizer must both be None,'
' or both be not None'))
else:
self.clf, self.vectorizer = clf, vectorizer
def tagtext_proba(self, text):
"""
Compute the probability each tag applies to the given text.
inputs:
text: A python string.
returns:
pred_proba: A pandas series indexed by the tag name.
"""
x = self.vectorizer.transform([text])
y_hat = self.clf.predict_proba(x)
preds = pd.DataFrame(y_hat)
preds.columns = TAGS
preds = preds.T.iloc[:, 0].sort_values(ascending=False)
return preds
def tagtext(self, text, prob_thresh=0.5):
"""
Tag a string with labels.
inputs:
text: A python string.
prob_thresh: The threshold on probability at which point
the tag will be applied.
returns:
preds: A list of tags that have > prob_thresh probability
according to the model.
"""
preds = self.tagtext_proba(text)
return preds[preds > prob_thresh].index.values.tolist()
def relevant_proba(self, text):
"""
Outputs the probability that the given text is relevant.
This probability is computed naively as the maximum of
the probabilities each tag applies to the text.
A more nuanced method would compute a joint probability.
inputs:
text: A python string.
returns:
relevant_proba: Probability the text is relevant.
"""
return max(self.tagtext_proba(text))
def relevant(self, text, prob_thresh=0.05):
"""
Determines whether given text is relevant or not. Relevance
is defined as whether any tag has more than prob_thresh
chance of applying to the text according to the model.
inputs:
text: A python string.
prob_thresh: The threshold on probability that
determines relevance. If no tags have >=
prob_thresh of applying to the text, then
the text is not relevant.
returns:
relevant: Boolean. Is the text "relevant"?
"""
return len(self.tagtext(text, prob_thresh)) > 0
def get_contributions(self, text):
"""
Rank the words in the text by their contribution to each
category. This function assumes that clf has an attribute
`coef_` and that vectorizer has an attribute
`inverse_transform`.
inputs:
text: A python string.
returns:
contributions: Pandas panel keyed off [category, word].
Example:
>>> s = 'This is an article about drugs and gangs.'
>>> s += ' Written by the amazing <NAME>.'
>>> p = tagger.get_contributions(s)
>>> p['DRUG'].sort_values('weight', ascending=False)
weight
drug 5.549870
copyright 0.366905
gang 0.194773
this 0.124590
an -0.004484
article -0.052026
is -0.085534
about -0.154800
kevin -0.219028
rose -0.238296
and -0.316201
. -0.853208
"""
p = {}
vec = self.vectorizer.transform([text])
vec_inv = self.vectorizer.inverse_transform(vec)
for i, tag in enumerate(TAGS):
p[tag] = pd.DataFrame(
index=vec_inv,
data={'weight': self.clf.coef_[i, vec.nonzero()[1]]}
)
return pd.Panel(p)
```
#### File: tagnews/tests/test_crimetype_tag.py
```python
import tagnews
class Test_Crimetype():
@classmethod
def setup_method(cls):
cls.model = tagnews.CrimeTags()
def test_tagtext(self):
self.model.tagtext('This is example article text')
def test_tagtext_proba(self):
article = 'Murder afoul, someone has been shot!'
probs = self.model.tagtext_proba(article)
max_prob = probs.max()
max_type = probs.idxmax()
tags = self.model.tagtext(article,
prob_thresh=max_prob-0.001)
assert max_type in tags
```
#### File: tagnews/utils/load_vectorizer.py
```python
import numpy as np
import pandas as pd
import sklearn.preprocessing
def load_glove(vectors_file, normalize=False):
"""
Load a GloVe formatted file, which is simply of the format
<word_0><space><vec_0,0><space><vec_0,1><space>...<newline>
<word_1><space><vec_1,0><space><vec_1,1><space>...<newline>
...
See https://github.com/stanfordnlp/GloVe for more information.
That link also has information on how to download the pre-trained
word vectorizer models. If the file you download is compressed,
you will need to uncompress it before using this function.
Note that the loading speed and memory usage is highly depdendent
on what model you use. The downloadable model "glove.840B.300d.txt"
will take a few minutes to load and use 2.8 GB of memory, whereas the
model "glove.6B.50d.txt" will take a few seconds and use < 200 MB
of memory.
Sample usage:
>>> vectors = load_glove('tagnews/data/glove.6B.50d.txt')
>>> text = 'This is a sentence and stuff.'
>>> # you should use an actual tokenizer for this step.
>>> vectorized_text = vectors.loc[[word.lower()
... for word in text.split()]]
>>> print(vectorized_text.shape)
(6, 300)
>>> k = 5
>>> import numpy as np
>>> def euc(word):
... return np.sum((vectors.values-vectors.loc[word].values)**2.0, 1)
...
>>> vectors.index[np.argpartition(euc('murder'), range(k))[:k]]
Inputs:
vectors_file: path to file that contains GloVe formatted word
vectors.
normalize: Should the word vectors be normalized? See
https://stats.stackexchange.com/questions/177905/ for
a good discussion on the topic.
Retuns:
vectors: NxM pandas dataframe whose rows are indexed by the word.
"""
with open(vectors_file, 'r', encoding='utf-8') as f:
for vocab_size, line in enumerate(f):
pass
vocab_size += 1
vec_size = len(line.split(' ')) - 1
vectors = np.zeros((vocab_size, vec_size), dtype=np.float32)
words = np.empty(shape=(vocab_size), dtype=np.dtype('object'))
with open(vectors_file, 'r', encoding='utf-8') as f:
for i, line in enumerate(f):
line = line.split(' ')
words[i] = line[0]
vectors[i] = [float(x) for x in line[1:]]
vectors = pd.DataFrame(vectors, index=words, copy=False)
vectors = vectors.loc[~vectors.index.duplicated()]
if normalize:
sklearn.preprocessing.normalize(vectors, copy=False)
return vectors
``` |
{
"source": "jherzfeld/24-game",
"score": 4
} |
#### File: 24-game/test/test_twenty_four.py
```python
import unittest
from src.twenty_four import TwentyFour
class TestClass(unittest.TestCase):
def test_validInput(self):
game = TwentyFour()
# Valid input
game.numbers = [1, 3, 5, 6]
game.user_input = '1 + 3 - (5 + 6)'
output = game.check_valid_input()
self.assertTrue(output)
# Too few numbers
game.user_input = '1 + 3 - (5)'
output = game.check_valid_input()
self.assertFalse(output)
# Too many numbers
game.user_input = '1 + 3 - (5 + 6)*1'
output = game.check_valid_input()
self.assertFalse(output)
# Wrong numbers
game.user_input = '1 + 2 - (5 + 6)'
output = game.check_valid_input()
self.assertFalse(output)
def test_generateNumbers(self):
game = TwentyFour()
for i in range(50):
game.generate_numbers()
valid_numbers = sum([1 for x in game.numbers if x in range(1,10)])
self.assertTrue(valid_numbers == 4)
``` |
{
"source": "jhescobar05/airflow_support",
"score": 2
} |
#### File: airflow_support/AIRFLOW-4128/test_bash_template_dag.py
```python
from builtins import range
from datetime import timedelta, datetime
import pendulum
from dateutil import tz
import airflow
from airflow.models import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.macros import dateutil
def print_the_new_timezone(**context):
execution_date = context['ts']
print("execution_date in ISO Format: {}".format(execution_date))
# Convert UTC execution date to US/Pacific
local_tz = pendulum.timezone("US/Pacific")
new_datetime_object = local_tz.convert(execution_date)
print("Converted datetime: {}".format(converted_datetime))
local_tz = pendulum.timezone("US/Pacific")
args = {
'owner': 'airflow',
'start_date': datetime(2019, 5, 3, tzinfo=local_tz),
}
dag = DAG(
dag_id='test_bash_template_dag',
default_args=args,
schedule_interval='0 0 * * *',
dagrun_timeout=timedelta(minutes=60),
)
run_this_last = DummyOperator(
task_id='run_this_last',
dag=dag,
)
test_echo_command = BashOperator(
task_id='test_echo_command_test',
bash_command="echo \"Execution Date :\" {{ ds }}",
dag=dag,
)
test_datetime_macro = BashOperator(
task_id='test_datetime_macro_task',
bash_command="echo {{ execution_date.replace(tzinfo=macros.dateutil.tz.gettz('US/Pacific')) }}",
dag=dag,
)
test_python_op = PythonOperator(
task_id='print_the_new_timezone_task',
provide_context=True,
python_callable=print_the_new_timezone,
dag=dag,
)
test_echo_command >> test_python_op >> test_datetime_macro >> run_this_last
``` |
{
"source": "jhesed/saleor",
"score": 2
} |
#### File: tests/dashboard/test_order.py
```python
from __future__ import unicode_literals
import pytest
from django.core.urlresolvers import reverse
from saleor.dashboard.order.forms import MoveItemsForm
from saleor.order.models import Order, OrderHistoryEntry, OrderedItem, DeliveryGroup
from saleor.product.models import Stock
from tests.utils import get_redirect_location, get_url_path
@pytest.mark.integration
@pytest.mark.django_db
def test_view_cancel_order_line(admin_client, order_with_items_and_stock):
lines_before = order_with_items_and_stock.get_items()
lines_before_count = lines_before.count()
line = lines_before.first()
line_quantity = line.quantity
quantity_allocated_before = line.stock.quantity_allocated
product = line.product
url = reverse(
'dashboard:orderline-cancel', kwargs={
'order_pk': order_with_items_and_stock.pk,
'line_pk': line.pk})
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(url, {'csrfmiddlewaretoken': 'hello'})
assert response.status_code == 302
assert get_redirect_location(response) == reverse(
'dashboard:order-details', args=[order_with_items_and_stock.pk])
# check ordered item removal
lines_after = Order.objects.get().get_items()
assert lines_before_count - 1 == lines_after.count()
# check stock deallocation
assert Stock.objects.first().quantity_allocated == quantity_allocated_before - line_quantity
# check note in the order's history
assert OrderHistoryEntry.objects.get(
order=order_with_items_and_stock).comment == 'Cancelled item %s' % product
url = reverse(
'dashboard:orderline-cancel', kwargs={
'order_pk': order_with_items_and_stock.pk,
'line_pk': OrderedItem.objects.get().pk})
response = admin_client.post(
url, {'csrfmiddlewaretoken': 'hello'}, follow=True)
# check delivery group removal if it becomes empty
assert Order.objects.get().get_items().count() == 0
assert DeliveryGroup.objects.count() == 0
# check success messages after redirect
assert response.context['messages']
@pytest.mark.integration
@pytest.mark.django_db
def test_view_split_order_line(admin_client, order_with_items_and_stock):
"""
user goes to order details page
user selects first order line with quantity 3 and moves 2 items
to a new shipment
user selects the line from the new shipment and moves all items
back to the first shipment
"""
lines_before_split = order_with_items_and_stock.get_items()
lines_before_split_count = lines_before_split.count()
line = lines_before_split.first()
line_quantity_before_split = line.quantity
quantity_allocated_before_split = line.stock.quantity_allocated
old_delivery_group = DeliveryGroup.objects.get()
url = reverse(
'dashboard:orderline-split', kwargs={
'order_pk': order_with_items_and_stock.pk,
'line_pk': line.pk})
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url,
{'quantity': 2, 'target_group': MoveItemsForm.NEW_SHIPMENT},
follow=True)
redirected_to, redirect_status_code = response.redirect_chain[-1]
# check redirection
assert redirect_status_code == 302
assert get_url_path(redirected_to) == reverse(
'dashboard:order-details',
args=[order_with_items_and_stock.pk])
# success messages should appear after redirect
assert response.context['messages']
lines_after = Order.objects.get().get_items()
# order should have one more line
assert lines_before_split_count + 1 == lines_after.count()
# stock allocation should not be changed
assert Stock.objects.first().quantity_allocated == quantity_allocated_before_split
line.refresh_from_db()
# source line quantity should be decreased to 1
assert line.quantity == line_quantity_before_split - 2
# order should have 2 delivery groups now
assert order_with_items_and_stock.groups.count() == 2
# a note in the order's history should be created
new_group = DeliveryGroup.objects.last()
assert OrderHistoryEntry.objects.get(
order=order_with_items_and_stock).comment == (
'Moved 2 items %(item)s from '
'%(old_group)s to %(new_group)s') % {
'item': line,
'old_group': old_delivery_group,
'new_group': new_group}
new_line = new_group.items.get()
# the new line should contain the moved quantity
assert new_line.quantity == 2
url = reverse(
'dashboard:orderline-split', kwargs={
'order_pk': order_with_items_and_stock.pk,
'line_pk': new_line.pk})
admin_client.post(
url, {'quantity': 2, 'target_group': old_delivery_group.pk})
# an other note in the order's history should be created
assert OrderHistoryEntry.objects.filter(
order=order_with_items_and_stock).last().comment ==(
'Moved 2 items %(item)s from removed '
'group to %(new_group)s') % {
'item': line,
'new_group': old_delivery_group}
# the new shipment should be removed
assert order_with_items_and_stock.groups.count() == 1
# the related order line should be removed
assert lines_before_split_count == Order.objects.get().get_items().count()
line.refresh_from_db()
# the initial line should get the quantity restored to its initial value
assert line_quantity_before_split == line.quantity
@pytest.mark.integration
@pytest.mark.django_db
@pytest.mark.parametrize('quantity', [0, 4])
def test_view_split_order_line_with_invalid_data(admin_client, order_with_items_and_stock, quantity):
"""
user goes to order details page
user selects first order line with quantity 3 and try move 0 and 4 items to a new shipment
user gets an error and no delivery groups are created.
"""
lines = order_with_items_and_stock.get_items()
line = lines.first()
url = reverse(
'dashboard:orderline-split', kwargs={
'order_pk': order_with_items_and_stock.pk,
'line_pk': line.pk})
response = admin_client.post(
url, {'quantity': quantity, 'target_group': MoveItemsForm.NEW_SHIPMENT})
assert response.status_code == 400
assert DeliveryGroup.objects.count() == 1
``` |
{
"source": "jhesketh/rookcheck",
"score": 2
} |
#### File: lib/distro/sles.py
```python
from tests.lib.distro import base
class SLES_CaaSP(base.Distro):
def bootstrap_play(self):
tasks = []
# First task will be installing the correct repositories for Skuba.
# SLES doesn't have any respositories configured by default. We either
# need to register the node against SCC or add the repos assuming we
# have access to IBS.
# "caasp_devel":
# "http://download.suse.de/ibs/Devel:/CaaSP:/4.0/SLE_15_SP1/",
# "suse_ca":
# "http://download.suse.de/ibs/SUSE:/CA/SLE_15_SP1/",
# "sle_server_pool":
# "http://download.suse.de/ibs/SUSE/Products/SLE-Product-SLES/"
# "15-SP1/x86_64/product/",
# "basesystem_pool":
# "http://download.suse.de/ibs/SUSE/Products/"
# "SLE-Module-Basesystem/15-SP1/x86_64/product/",
# "containers_pool":
# "http://download.suse.de/ibs/SUSE/Products/"
# "SLE-Module-Containers/15-SP1/x86_64/product/",
# "serverapps_pool":
# "http://download.suse.de/ibs/SUSE/Products/"
# "SLE-Module-Server-Applications/15-SP1/x86_64/product/",
# "sle_server_updates":
# "http://download.suse.de/ibs/SUSE/Updates/"
# "SLE-Product-SLES/15-SP1/x86_64/update/",
# "basesystem_updates":
# "http://download.suse.de/ibs/SUSE/Updates/"
# "SLE-Module-Basesystem/15-SP1/x86_64/update/",
# "containers_updates":
# "http://download.suse.de/ibs/SUSE/Updates/"
# "SLE-Module-Containers/15-SP1/x86_64/update/",
# "serverapps_updates":
# "http://download.suse.de/ibs/SUSE/Updates/"
# "SLE-Module-Server-Applications/15-SP1/x86_64/update/"
play_source = dict(
name="Prepare nodes",
hosts="all",
tasks=tasks,
gather_facts="no",
strategy="mitogen_free",
)
return play_source
``` |
{
"source": "jhesketh/smoke_rook",
"score": 2
} |
#### File: lib/rook/upstream.py
```python
import logging
import os
import requests
import yaml
from tests.config import settings, converter
from tests.lib.common import execute, recursive_replace
from tests.lib.rook.base import RookBase
logger = logging.getLogger(__name__)
class RookCluster(RookBase):
def __init__(self, workspace, kubernetes):
super().__init__(workspace, kubernetes)
self._rook_built = False
self.build_dir = os.path.join(self.workspace.build_dir, 'rook')
self.ceph_dir = os.path.join(
self.build_dir, 'cluster/examples/kubernetes/ceph')
self.rook_chart = settings.UPSTREAM_ROOK.ROOK_CEPH_CHART
def build(self):
super().build()
self.get_rook()
if not converter('@bool', settings.UPSTREAM_ROOK.BUILD_ROOK_FROM_GIT):
return
self.get_golang()
logger.info("Compiling rook...")
execute(
command=f"make --directory {self.build_dir} "
f"-j BUILD_REGISTRY='rook-build' IMAGES='ceph'",
env={"PATH": f"{self.workspace.bin_dir}/go/bin:"
f"{os.environ['PATH']}",
"TMPDIR": self.workspace.tmp_dir,
"GOCACHE": self.workspace.tmp_dir,
"GOPATH": self.workspace.build_dir},
log_stderr=False)
image = 'rook/ceph'
tag = f"{settings.UPSTREAM_ROOK.VERSION}-rookcheck"
self.rook_image = f"{image}:{tag}"
logger.info(f"Tag image as {image}:{tag}")
execute(f'docker tag "rook-build/ceph-amd64" {image}:{tag}')
logger.info("Save image tar")
# TODO(jhesketh): build arch may differ
execute(f"docker save {image}:{tag} | gzip > %s"
% os.path.join(self.build_dir, 'rook-ceph.tar.gz'))
self._rook_built = True
def preinstall(self):
super().preinstall()
if converter('@bool', settings.UPSTREAM_ROOK.BUILD_ROOK_FROM_GIT):
self.upload_rook_image()
self._fix_yaml()
def _get_charts(self):
super()._get_charts()
logger.info(f"Adding rook chart helm repo {self.rook_chart}")
self.kubernetes.helm(f"repo add rook-upstream {self.rook_chart}")
def get_rook(self):
logger.info("Clone rook version %s from repo %s" % (
settings.UPSTREAM_ROOK.VERSION,
settings.UPSTREAM_ROOK.REPO))
execute(
"git clone -b %s %s %s" % (
settings.UPSTREAM_ROOK.VERSION,
settings.UPSTREAM_ROOK.REPO,
self.build_dir),
log_stderr=False
)
def get_golang(self):
url = 'https://golang.org/VERSION?m=text'
version = requests.get(url).content.decode("utf-8")
self.workspace.get_unpack(
"https://dl.google.com/go/%s.linux-amd64.tar.gz" % version,
unpack_folder=self.workspace.bin_dir
)
def _fix_yaml(self):
# Replace image reference if we built it in this run
with open(os.path.join(self.ceph_dir, 'operator.yaml')) as file:
docs = yaml.load_all(file, Loader=yaml.FullLoader)
for doc in docs:
try:
image = doc['spec']['template']['spec'][
'containers'][0]['image']
break
except KeyError:
pass
replacements = {image: self.rook_image}
recursive_replace(dir=self.ceph_dir, replacements=replacements)
def upload_rook_image(self):
self.kubernetes.hardware.ansible_run_playbook(
"playbook_rook_upstream.yaml")
def _install_operator_helm(self):
version = ""
if settings.UPSTREAM_ROOK.VERSION != "master":
version = f"--version {settings.UPSTREAM_ROOK.VERSION}"
logger.info(
"Installing rook operator with helm rook-upstream/rook-ceph"
f" {version}"
)
self.kubernetes.helm(
f"install -n rook-ceph rook-ceph rook-upstream/rook-ceph"
f" {version}")
``` |
{
"source": "jhessin/axis.docset",
"score": 3
} |
#### File: Documents/git-dash-master/git-gendoc2dash.py
```python
import sqlite3
import os
import urllib
from bs4 import BeautifulSoup as bs
import requests
# CONFIGURATION
docset_name = 'Git.docset'
output = docset_name + '/Contents/Resources/Documents'
root_url = 'http://git-scm.com/docs'
# create directory
docpath = output + '/'
if not os.path.exists(docpath): os.makedirs(docpath)
icon = 'http://blog.novatec-gmbh.de/wp-content/uploads/2013/07/logo-git.png'
# soup
data = requests.get(root_url).text # for online page
soup = bs(data)
index = str(soup.find(id='main'))
open(os.path.join(output, 'index.html'), 'wb').write(index)
# add icon
urllib.urlretrieve(icon, docset_name + "/icon.png")
def update_db(name, path):
typ = 'func'
cur.execute('INSERT OR IGNORE INTO searchIndex(name, type, path) VALUES (?,?,?)', (name, typ, path))
print 'DB add >> name: %s, path: %s' % (name, path)
def add_docs():
sections = []
titles = []
for i, link in enumerate(soup.findAll('a')):
name = link.text.strip()
path = link.get('href')
if path.startswith('/docs/'):
sections.append(path)
titles.append(name)
# download and update db
for path, name in zip(sections, titles):
# create subdir
folder = os.path.join(output)
for i in range(len(path.split("/")) - 1):
folder += path.split("/")[i] + "/"
if not os.path.exists(folder): os.makedirs(folder)
print name, path
try:
# download docs
page = path.split('/')[-1]
url = root_url + '/' + page
data1 = requests.get(url).text
soup1 = bs(data1)
div = str(soup1.find(id='main'))
open(os.path.join(folder, page + '.html'), 'wb').write(div)
print "downloaded doc: ", path
print " V"
# update db
path += '.html'
update_db(name, path)
#y += 1
except:
print " X"
pass
def add_infoplist():
name = docset_name.split('.')[0]
info = " <?xml version=\"1.0\" encoding=\"UTF-8\"?>" \
"<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\"> " \
"<plist version=\"1.0\"> " \
"<dict> " \
" <key>CFBundleIdentifier</key> " \
" <string>{0}</string> " \
" <key>CFBundleName</key> " \
" <string>{1}</string>" \
" <key>DocSetPlatformFamily</key>" \
" <string>{2}</string>" \
" <key>isDashDocset</key>" \
" <true/>" \
" <key>dashIndexFilePath</key>" \
" <string>index.html</string>" \
"</dict>" \
"</plist>".format(name, name, name)
open(docset_name + '/Contents/info.plist', 'wb').write(info)
if __name__ == '__main__':
db = sqlite3.connect(docset_name + '/Contents/Resources/docSet.dsidx')
cur = db.cursor()
try:
cur.execute('DROP TABLE searchIndex;')
except:
pass
cur.execute('CREATE TABLE searchIndex(id INTEGER PRIMARY KEY, name TEXT, type TEXT, path TEXT);')
cur.execute('CREATE UNIQUE INDEX anchor ON searchIndex (name, type, path);')
# start
add_docs()
add_infoplist()
# commit and close db
db.commit()
db.close()
``` |
{
"source": "jhetherly/document_recommender",
"score": 3
} |
#### File: jhetherly/document_recommender/download_wiki_data.py
```python
import os
import string
import argparse
import json
from tqdm import tqdm
from unidecode import unidecode
import requests
from lxml import html
BASE_URL = 'https://en.wikipedia.org'
def get_valid_lists_pages_and_subcategories(sess, url):
pages = []
subcategories = []
page = sess.get(url)
tree = html.fromstring(page.content)
pages_content = tree.xpath('//div[@id="bodyContent"]//div[@id="mw-pages"]')
subcategories_content = tree.xpath('//div[@id="bodyContent"]//div[@id="mw-subcategories"]')
assert(len(pages_content) <= 1)
assert(len(subcategories_content) <= 1)
if len(pages_content) > 0:
pages_content = pages_content[0]
for group in pages_content.xpath('//div[@class="mw-category-group"]'):
label = group.xpath('h3/text()')[0]
if label not in string.ascii_uppercase:
continue
items = group.xpath('ul/li/a/@href')
for item in items:
pages.append(BASE_URL + item)
if len(subcategories_content) > 0:
subcategories_content = subcategories_content[0]
for group in subcategories_content.xpath('//div[@class="mw-category-group"]'):
label = group.xpath('h3/text()')[0]
if label not in string.ascii_uppercase:
continue
skip_group = False
items = group.xpath('ul/li')
for item in items:
spans = group.xpath('div/div/span')
for span in spans:
# if this link has no further links skip it (i.e. it's invalid)
skip_group |= 'empty' in span.text_content()
if skip_group:
continue
link = item.xpath('div/div/a/@href')
if len(link) != 1:
continue
subcategories.append(BASE_URL + str(link[0]))
return pages, subcategories
def get_book_pages(sess, url):
page = sess.get(url)
tree = html.fromstring(page.content)
page_links = tree.xpath('//div[@id="mw-content-text"]//dd/a/@href')
pages = []
for page_link in page_links:
pages.append(BASE_URL + page_link)
return pages
def save_wiki_page(sess, url, save_dir):
page = sess.get(url)
filename = os.path.join(save_dir, unidecode(url.split('/')[-1]) + '.html')
with open(filename, "w") as html_file:
html_file.write(str(page.content))
def main(settings_filename):
with open(settings_filename, 'r') as f:
config = json.load(f)
topic = config.get('topic', 'Medicine')
min_pages = config.get('min_pages', 500)
save_dir = os.path.join(config.get('save_dir', os.path.join('data', 'wiki')), topic)
buffer = config.get('buffer', 10) # percentage
wiki_url = 'https://en.wikipedia.org/wiki/Category:{}'.format(topic)
min_pages_plus_buffer = int(min_pages*(1 + buffer/100))
print('will scan Wikipedia for {} pages in topic {}'.format(min_pages_plus_buffer, topic))
S = requests.Session()
pages, subcategories = get_valid_lists_pages_and_subcategories(S, wiki_url)
# scape Wikipedia for a given number of pages (with an additional buffer)
while len(subcategories) > 0 and len(pages) < min_pages_plus_buffer:
current_subcategories = subcategories[:]
for subcategory in current_subcategories:
subpages, subsubcategories = get_valid_lists_pages_and_subcategories(S, subcategory)
pages += subpages
subcategories += subsubcategories
# get all the pages within each book
book_pages = [x for x in pages if 'wiki/Book:' in x]
while len(book_pages) > 0 and len(pages) < min_pages_plus_buffer:
for page in book_pages:
pages_in_book = get_book_pages(S, page)
pages += pages_in_book
pages = [x for x in pages if x not in book_pages]
book_pages = [x for x in pages if 'wiki/Book:' in x]
subcategories = [x for x in subcategories if x not in current_subcategories]
os.makedirs(save_dir, exist_ok=True)
print('saving Wikipedia html files to {}'.format(save_dir))
for page in tqdm(pages[:min_pages_plus_buffer]):
save_wiki_page(S, page, save_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Download Wikipedia Category')
parser.add_argument('--download_settings', type=str,
default='settings/wiki_download.json', help='download settings file')
args = parser.parse_args()
main(args.download_settings)
``` |
{
"source": "jhetherly/linear_binning",
"score": 3
} |
#### File: linear_binning/test/test_linear_binning.py
```python
from linear_binning import linear_binning
import numpy as np
import logging
from timeit import default_timer as timer
logging.basicConfig(level=logging.INFO)
def generate_data(n_samples=100000, D=2):
sample_coords = np.random.random(size=(n_samples, D))
sample_weights = np.random.random(size=n_samples)
# NOTE: purposely limiting the range to test over- and underflow bins
extents = np.tile([0.02, 0.8999], D).reshape((D, 2))
sizes = np.full(D, 51)
return sample_coords, sample_weights, extents, sizes
def test_sum_of_weights():
# tests that the sum of weights in the binned grid is preserved
sample_coords, sample_weights, extents, sizes = generate_data(1000000)
start = timer()
coords, weights = linear_binning(sample_coords, sample_weights,
extents, sizes)
end = timer()
logging.info('\n')
logging.info('One million 2D points binned with linear_binning in {}s'.format(end - start))
assert np.allclose(weights.sum(), sample_weights.sum())
x = np.ascontiguousarray(sample_coords[:,0])
y = np.ascontiguousarray(sample_coords[:,1])
start = timer()
np.histogram2d(x, y,
weights=sample_weights,
bins=sizes, range=extents)
end = timer()
logging.info('For comparison, np.histogram2d finished in {}s'.format(end - start))
# tests specific values on the grid
sample_coords = np.array([[0.2, 0.9], [0.5, 1.1], [-0.1, 0.7]])
sample_weights = np.array([25, 50, 25])
extents = np.array([[0.0, 1.0], [0.0, 1.0]])
sizes = np.array([11, 11])
coords, weights = linear_binning(sample_coords, sample_weights,
extents, sizes)
pass_value_test = True
value_tests = 0
for i in range(coords.shape[0]):
if np.allclose(coords[i, 0], 0.0) and np.allclose(coords[i, 1], 0.7):
pass_value_test &= np.allclose(weights[i], 25.0)
value_tests += 1
elif np.allclose(coords[i, 0], 0.2) and np.allclose(coords[i, 1], 0.9):
pass_value_test &= np.allclose(weights[i], 25.0)
value_tests += 1
elif np.allclose(coords[i, 0], 0.5) and np.allclose(coords[i, 1], 1.0):
pass_value_test &= np.allclose(weights[i], 50.0)
value_tests += 1
else:
pass_value_test &= np.allclose(weights[i], 0.0)
assert pass_value_test and value_tests == 3
``` |
{
"source": "jhetherly/python_knapsack",
"score": 3
} |
#### File: knapsack_python/test/test_funcs.py
```python
from __future__ import division
from knapsack_python import mthm, mthg, assign_all
import numpy as np
import logging
logging.basicConfig(level=logging.INFO)
def generate_random_data(n=250, m=10, rng=None):
if rng is None:
rng = np.random
# weights
w = rng.randint(1, 51, size=n)
# profits
p = rng.randint(1, 51, size=n)
# capacities
c = []
lower_index = 0
for i in range(m):
if i < m - 1:
upper_index = rng.randint(w.size//(m + 1), w.size//m) + lower_index
c.append(w[lower_index:upper_index].sum())
else:
c.append(w[lower_index:].sum())
lower_index = upper_index
c = np.array(c)
return p, w, c
def static_data():
# weights
w = np.array([40, 21, 21, 31, 10, 38, 34, 17, 23, 11, 45, 24, 8, 17, 25,
29, 31, 43, 15, 48, 31, 1, 32, 26, 11, 21, 23, 10, 36, 13,
30, 43, 44, 28, 7, 11, 41, 45, 7, 16, 4, 26, 14, 18, 2,
20, 21, 7, 50, 23, 24, 5, 30, 10, 15, 5, 34, 22, 28, 17,
5, 27, 20, 34, 4, 33, 30, 5, 40, 45, 49, 45, 1, 22, 36,
26, 20, 12, 32, 39, 36, 42, 22, 38, 18, 6, 10, 3, 23, 11,
48, 47, 2, 17, 6, 5, 26, 23, 35, 30, 4, 34, 16, 38, 49,
29, 2, 38, 45, 6, 29, 7, 47, 29, 6, 45, 13, 30, 36, 49,
10, 7, 17, 4, 22, 2, 50, 10, 32, 36, 13, 35, 19, 36, 32,
42, 17, 7, 18, 40, 10, 11, 22, 38, 50, 5, 13, 3, 12, 36,
46, 28, 50, 16, 24, 9, 34, 48, 6, 22, 37, 28, 14, 10, 13,
2, 43, 29, 12, 48, 29, 40, 20, 32, 37, 36, 34, 40, 36, 45,
32, 6, 46, 12, 43, 28, 4, 8, 33, 38, 7, 38, 27, 4, 48,
38, 6, 26, 17, 35, 33, 3, 7, 3, 28, 45, 24, 45, 47, 21,
50, 37, 35, 25, 19, 21, 18, 39, 47, 48, 26, 49, 25, 40, 44,
11, 10, 38, 47, 43, 35, 34, 34, 25, 37, 40, 4, 14, 44, 18,
48, 49, 8, 6, 39, 18, 15, 33, 26, 16])
# profits
p = w
# capacities
c = np.array([595, 517, 562, 556, 562, 574, 562, 675, 623, 1165])
return p, w, c
def test_mthm():
p, w, c = static_data()
# number of items
n = len(w)
# number of knapsacks
m = len(c)
# NOTE: this is to try to make the weights and capacities not align
chunked_w = np.empty(n//2)
for i in range(n//2):
chunked_w[i] = w[2*i] + w[2*i + 1]
chunked_w = chunked_w.astype(c.dtype)
# target output
target_y = np.array([ 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 9,
2, 2, 2, 2, 2, 2, 4, 4, 2, 4, 2, 4, 4, 4,
4, 1, 4, 6, 3, -1, 6, 6, 4, 6, 6, 6, 6, 6,
6, 5, 5, 5, 6, 5, 5, 5, 5, 5, 5, 5, 5, 0,
5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 7, 4, 7, 7, 8, 7,
7, 7, 7, 0, 7, 7, 7, 9, 9, 7, 9, 9, 9, 9,
9, 9, 9, 9, 9, 9, 7, 9, 9, 7, 9, 9, 9])
target_score = 6380
score, y = mthm(chunked_w, c)
# print(chunked_w)
# print(score, y)
actual_c = []
for i in range(len(c)):
actual_c.append(chunked_w[y == i].sum())
actual_c = np.array(actual_c)
# print(c)
# print(actual_c)
# print((np.abs(actual_c - c)/c).mean())
# print((np.abs(actual_c - c)/c).std())
assert np.allclose(y, target_y)
assert score == target_score
def test_mthg():
# number of items
n = 8
# number of knapsacks
m = 3
p = np.array([[27, 12, 12, 16, 24, 31, 41, 13],
[14, 5, 37, 9, 36, 25, 1, 34],
[34, 34, 20, 9, 19, 19, 3, 34]])
w = np.array([[21, 13, 9, 5, 7, 15, 5, 24],
[20, 8, 18, 25, 6, 6, 9, 6],
[16, 16, 18, 24, 11, 11, 16, 18]])
f = w
c = np.array([26, 25, 34])
# target output
target_y = np.array([3, 3, 1, 1, 2, 2, 1, 2])
target_score = 232
score, y = mthg(w, c, p=p, f=f)
assert np.allclose(y, target_y)
assert score == target_score
def test_mthm_assign_all():
p, w, c = static_data()
# number of items
n = len(w)
# number of knapsacks
m = len(c)
# NOTE: this is to try to make the weights and capacities not align
chunked_w = np.empty(n//2)
for i in range(n//2):
chunked_w[i] = w[2*i] + w[2*i + 1]
chunked_w = chunked_w.astype(c.dtype)
# target output
target_y = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 2, 2, 2, 2, 2, 2, 4, 2, 2, 2, 2, 2, 2, 4, 4,
2, 4, 2, 4, 4, 4, 9, 1, 9, 6, 3, 4, 6, 6, 4, 6, 6, 6,
6, 6, 6, 5, 5, 5, 6, 5, 5, 5, 5, 5, 5, 5, 5, 0, 5, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 7, 9, 7, 7, 8, 7, 7, 7, 7, 0, 7, 7, 7, 9, 9, 7,
9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 7, 9, 9, 7, 9, 9, 4])
target_score = 6391
score, y = mthm(chunked_w, c)
# print(chunked_w)
score, y_all = assign_all(chunked_w, c, y, z_init=score, max_balance_iter=100)
# print(score, y_all)
actual_c = []
for i in range(len(c)):
actual_c.append(chunked_w[y_all == i].sum())
actual_c = np.array(actual_c)
# print(actual_c)
# print((np.abs(actual_c - c)/c).mean())
# print((np.abs(actual_c - c)/c).std())
assert np.allclose(y_all, target_y)
assert score == target_score
``` |
{
"source": "jhettler/knowledge-repo",
"score": 2
} |
#### File: knowledge_repo/converters/ipynb.py
```python
import os
from ..converter import KnowledgePostConverter
from .._version import __optional_dependencies__
TEMPLATE = '''
{%- extends 'markdown.tpl' -%}
{%- block data_javascript scoped %}
{% set div_id = uuid4() %}
<div id="{{ div_id }}"></div>
<div class="output_subarea output_javascript {{ extra_class }}">
<script type="text/javascript">
var element = $('#{{ div_id }}');
{{ output.data['application/javascript'] }}
</script>
</div>
{%- endblock -%}
{%- block input -%}
{%- if cell['metadata'].get('slideshow',{}).get('slide_type','') == 'skip' -%}
{%- elif cell['metadata'].get('slideshow',{}).get('slide_type','') == 'skip-in' -%}
{%- else %}
```python
{{ cell.source }}
```
{%- endif %}
{%- endblock input -%}
{%- block data_priority scoped %}
{{ super() }}
{%- endblock %}
{# remove stderr output and skip cells #}
{%- block stream scoped -%}
{%- if cell['metadata'].get('slideshow',{}).get('slide_type','') == 'skip' -%}
{%- elif cell['metadata'].get('slideshow',{}).get('slide_type','') == 'skip-out' -%}
{%- elif output.name == 'stdout' -%}
{{ super () }}
{%- elif output.name == 'stderr' -%}
{%- endif -%}
{%- endblock stream -%}
'''
class IpynbFormat(KnowledgePostConverter):
_registry_keys = ['ipynb']
@property
def dependencies(self):
return __optional_dependencies__['ipynb']
def from_file(self, filename):
import nbformat
from nbconvert import MarkdownExporter
from jinja2 import DictLoader
from traitlets.config import Config
c = Config()
# c.ExtractOutputPreprocessor.extract_output_types = set()
c.ExtractOutputPreprocessor.output_filename_template = 'images/{unique_key}_{cell_index}_{index}{extension}'
c.NbConvertBase.display_data_priority = ['application/javascript', 'text/html', 'text/markdown',
'image/svg+xml', 'text/latex', 'image/png', 'image/jpeg',
'text/plain']
nb = nbformat.read(filename, as_version=4)
dl = DictLoader({'full.tpl': TEMPLATE})
md_exporter = MarkdownExporter(config=c, extra_loaders=[
dl], template_file='full.tpl')
(body, resources) = md_exporter.from_notebook_node(nb)
self.kp_write(body, images={name.split(
'images/')[1]: data for name, data in resources.get('outputs', {}).items()})
# Add cleaned ipynb file
for cell in nb['cells']:
if cell['cell_type'] == 'code':
cell['outputs'] = [] # remove output data
cell['execution_count'] = None # reset to not executed
self.kp.write_src(os.path.basename(filename), nbformat.writes(nb))
``` |
{
"source": "jhettler/pyqlikengine",
"score": 2
} |
#### File: pyqlikengine/test/test_app_api.py
```python
import unittest
from pyqlikengine.engine_app_api import EngineAppApi
from pyqlikengine.engine_communicator import EngineCommunicator
from pyqlikengine.engine_field_api import EngineFieldApi
from pyqlikengine.engine_global_api import EngineGlobalApi
from pyqlikengine.structs import Structs
from pyqlikengine.engine_generic_object_api import EngineGenericObjectApi
class TestAppApi(unittest.TestCase):
# Constructor to prepare everything before running the tests.
def setUp(self):
url = 'ws://localhost:4848/app'
self.conn = EngineCommunicator(url)
self.ega = EngineGlobalApi(self.conn)
self.eaa = EngineAppApi(self.conn)
self.egoa = EngineGenericObjectApi(self.conn)
self.efa = EngineFieldApi(self.conn)
self.struct = Structs()
self.app = self.ega.create_app("TestApp")['qAppId']
opened_app = self.ega.open_doc(self.app)
self.app_handle = self.ega.get_handle(opened_app['qReturn'])
def test_add_alternate_state(self):
response = self.eaa.add_alternate_state(self.app_handle, "MyState")
self.assertEqual(response, {}, "Failed to add alternate state")
def test_create_hypercube_object(self):
with open('./test/test_data/ctrl00_script.qvs') as f:
script = f.read()
self.eaa.set_script(self.app_handle, script)
self.eaa.do_reload_ex(self.app_handle)
# Create the inline dimension structures
hc_inline_dim1 = Structs.nx_inline_dimension_def(["Alpha"])
hc_inline_dim2 = Structs.nx_inline_dimension_def(["Num"])
# Create a sort structure
hc_mes_sort = Structs.nx_sort_by()
# Create the measure structures
hc_inline_mes1 = Structs.nx_inline_measure_def("=Sum(Num)")
hc_inline_mes2 = Structs.nx_inline_measure_def("=Avg(Num)")
# Create hypercube dimensions from the inline dimension structures
hc_dim1 = Structs.nx_hypercube_dimensions(hc_inline_dim1)
hc_dim2 = Structs.nx_hypercube_dimensions(hc_inline_dim2)
# Create hypercube measures from the inline measure structures
hc_mes1 = Structs.nx_hypercube_measure(hc_mes_sort, hc_inline_mes1)
hc_mes2 = Structs.nx_hypercube_measure(hc_mes_sort, hc_inline_mes2)
# Create the paging model/structure (26 rows and 4 columns)
nx_page = Structs.nx_page(0, 0, 26, 4)
# Create a hypercube definition with arrays of
# hc dims, measures and nxpages
hc_def = Structs.hypercube_def("$",
[hc_dim1, hc_dim2],
[hc_mes1, hc_mes2],
[nx_page])
# Create a Chart object with the hypercube definitions as parameter
hc_response = self.eaa.create_object(self.app_handle,
"CH01",
"Chart",
"qHyperCubeDef",
hc_def)
# Get the handle to the chart object (this may be different
# in my local repo. I have made some changes to thisfor
# future versions)
hc_handle = self.ega.get_handle(hc_response['qReturn'])
# Validate the chart object by calling get_layout
self.egoa.get_layout(hc_handle)
# Call the get_hypercube_data to get the resulting json object,
# using the handle and nx page as paramters
hc_data = self.egoa.get_hypercube_data(hc_handle,
"/qHyperCubeDef",
[nx_page])
self.assertTrue(type(hc_data is {}),
"Unexpected type of hypercube data")
first_element_number = hc_data["qDataPages"][0]["qMatrix"][0][0]["qElemNumber"] # NOQA
first_element_text = hc_data["qDataPages"][0]["qMatrix"][0][0]["qText"] # NOQA
self.assertTrue(first_element_number == 0,
"Incorrect value in first element number")
self.assertTrue(first_element_text == 'A',
"Incorrect value in first element text")
def tearDown(self):
self.ega.delete_app(self.app)
self.conn.close_qvengine_connection(self.conn)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jhevans/cloud-platform-infrastructure",
"score": 2
} |
#### File: cloud-platform-infrastructure/bin/03-cluster_config.py
```python
import sys
import subprocess
import json
import argparse
import yaml
import os.path
from ast import literal_eval
# Init lists
templates = []
clusters = []
instances = []
nodes = []
masters = []
output = []
# Json output configuration
class literal(str):
pass
def literal_presenter(dumper, data):
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
# Function to fetch a value from the terraform dictionary
def tf(key):
if terraform[key]['value']:
return terraform[key]['value']
else:
sys.exit(key + ' not found, exit.')
# Dumper config
yaml.SafeDumper.add_representer(literal, literal_presenter)
yaml.SafeDumper.ignore_aliases = lambda *args: True
# Parser config
parser = argparse.ArgumentParser(description='Create Kubernetes clusters.')
parser.add_argument(
'clusterconfig',
help='the base yaml file to create the cluster from')
args = parser.parse_args()
# Load the base kops yaml file located in the repo
if os.path.isfile(args.clusterconfig):
stream = open(args.clusterconfig)
for template in yaml.load_all(stream):
templates.append(template)
stream.close()
else:
sys.exit(args.clusterconfig + ' file not found, exit.')
# Load the terraform outputs from the terraform command
try:
terraform = json.loads(str(subprocess.check_output(
['terraform', 'output', '-json']).decode('utf-8')))
except subprocess.CalledProcessError as e:
print('error executing terraform command, exit.')
# Populate variables from terraform output
cluster_name = tf('cluster_domain_name')
dns_zone = cluster_name
kops_state_store = 's3://' + tf('kops_state_store') + '/' + cluster_name
availability_zones = tf('availability_zones')
master_public_name = 'api.' + cluster_name
network_id = tf('vpc_id')
network_cidr = tf('network_cidr_block')
internal_subnets_ids = tf('internal_subnets_ids')
internal_subnets_cidrs = tf('internal_subnets')
external_subnets_ids = tf('external_subnets_ids')
external_subnets_cidrs = tf('external_subnets')
hosted_zone_id = tf('hosted_zone_id')
# Organize in lists by kind and role
for template in templates:
if template['kind'] == 'Cluster':
clusters.append(template)
if template['kind'] == 'InstanceGroup':
instances.append(template)
if template['spec']['role'] == 'Node':
nodes.append(template)
if template['spec']['role'] == 'Master':
masters.append(template)
# Update all Cluster kind
for template in clusters:
template['metadata']['name'] = cluster_name
policies = []
for item in json.loads(template['spec']['additionalPolicies']['node']):
if item['Resource'] == ['arn:aws:route53:::hostedzone/']:
item['Resource'] = ['arn:aws:route53:::hostedzone/' + hosted_zone_id]
policies.append(item)
template['spec']['additionalPolicies']['node'] = json.dumps(policies)
template['spec']['configBase'] = kops_state_store
template['spec']['dnsZone'] = dns_zone
etcdclusters = []
etcdmembers = []
for az in availability_zones:
etcdmembers.append({'instanceGroup': 'master-' + az, 'name': az[-1:]})
for name in ['main', 'events']:
etcdclusters.append({'name': name, 'etcdMembers': etcdmembers})
template['spec']['etcdClusters'] = etcdclusters
template['spec']['masterPublicName'] = master_public_name
template['spec']['networkCIDR'] = network_cidr
template['spec']['networkID'] = network_id
template['spec'].update({
'topology': {
'dns': {
'type': 'Public'
},
'masters': 'private',
'nodes': 'private'
},
'hooks': [
{
'name': 'authorized-keys-manager.service',
'roles': [ 'Master', 'Node'],
'manifest': tf('authorized_keys_manager_systemd_unit'),
}
],
})
template['spec']['sshKeyName'] = tf('instance_key_name')
subnets = []
if len(internal_subnets_cidrs) == len(
external_subnets_cidrs) == len(availability_zones):
for i in range(len(internal_subnets_cidrs)):
subnets.append(
{
'cidr': internal_subnets_cidrs[i],
'id': internal_subnets_ids[i],
'name': availability_zones[i],
'type': 'Private',
'zone': availability_zones[i]})
for i in range(len(external_subnets_cidrs)):
subnets.append(
{
'cidr': external_subnets_cidrs[i],
'id': external_subnets_ids[i],
'name': 'utility-' + availability_zones[i],
'type': 'Utility',
'zone': availability_zones[i]})
template['spec']['subnets'] = subnets
output.append(template)
# Update all instanceGroup kind
for template in instances:
template['metadata'].update(
{'labels': {'kops.k8s.io/cluster': cluster_name}})
# Update masters
if len(masters) == len(availability_zones):
for i in range(len(masters)):
masters[i]['spec'].update({'subnets': [availability_zones[i]]})
masters[i]['spec'].update({
'nodeLabels': {
'kops.k8s.io/instancegroup': 'master-' + availability_zones[i]
}
})
output.append(masters[i])
# Update nodes
for template in nodes:
template['spec'].update({'subnets': availability_zones})
template['spec'].update(
{'nodeLabels': {'kops.k8s.io/instancegroup': 'nodes'}})
output.append(template)
# Print outputs
for item in output:
print('---')
print(yaml.safe_dump(item, default_flow_style=False))
```
#### File: cloud-platform-infrastructure/bin/99-multiple_clusters.py
```python
import subprocess
import json
import shlex
statestore = 'moj-cp-k8s-investigation-kops'
domain = '.k8s.integration.dsd.io'
def run(command):
process = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE)
while True:
output = process.stdout.readline()
if output == '' and process.poll() is not None:
break
if output:
print(output.strip())
rc = process.poll()
return rc
# terraform init
try:
print(subprocess.check_output(['terraform', 'init']))
except subprocess.CalledProcessError as e:
print('error initing terraform, exit.')
# get terraform workspace list
try:
workspaces = subprocess.check_output(
['terraform', 'workspace', 'list']).decode('utf-8').split()
workspaces.remove('*')
except subprocess.CalledProcessError as e:
print('error listing workspaces, exit.')
try:
yaml = [s.strip('.yaml') for s in subprocess.check_output(
['ls', '../../kops/']).decode('utf-8').split()]
except subprocess.CalledProcessError as e:
print('error listing cluster yaml files, exit.')
# query the kops state store to list the active clusters
try:
clusters = json.loads(str(subprocess.check_output(
['kops', 'get', 'clusters', '--state=s3://' + statestore,
'--output=json']).decode('utf-8')))
except subprocess.CalledProcessError as e:
print('error listing clusters, exit.')
# generate a list of cluster names only
clusterlist = []
for item in clusters:
clusterlist.append(item['metadata']['name'].replace(domain, ''))
# create the corresponding clusters
for item in yaml:
if item in workspaces and item in clusterlist:
print('cluster ' + item + ' already exists, skipping')
else:
print('creating cluster ' + item)
run('../../bin/00-cluster_pipeline.sh' + ' ' + item)
print('Done.')
```
#### File: aws-iam-suspend-inactive-users/lambda/slack_integration.py
```python
import boto3
import json
import logging
import os
from base64 import b64decode
from urllib.request import Request, urlopen
from urllib.error import URLError, HTTPError
slack_channel = os.environ['SLACK_CHANNEL']
slack_hook_url = os.environ['HOOK_URL']
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
logger.info("Event: " + str(event))
message = event['Records'][0]['Sns']['Subject'] + '\n' + event['Records'][0]['Sns']['Message']
try:
message = json.loads(message)
except Exception as e:
print(e)
logger.info("Message: " + str(message))
slack_message = {
'channel': slack_channel,
'username': "AWSSlack",
'text': message,
'icon_emoji' : ":ghost:"
}
req = Request(slack_hook_url, json.dumps(slack_message).encode('utf-8'))
try:
response = urlopen(req)
response.read()
logger.info("Message posted to %s", slack_message['channel'])
except HTTPError as e:
logger.error("Request failed: %d %s", e.code, e.reason)
except URLError as e:
logger.error("Server connection failed: %s", e.reason)
```
#### File: aws-s3-enable-encryption-block-public-access/lambda/s3-bucket-enable-default-encryption.py
```python
import boto3, json, datetime, os, sys
from time import gmtime, strftime
from datetime import date
#==================================================================================================
# Function handler
#==================================================================================================
def lambda_handler(event, context):
buckets = {}
buckets['Encryption_Applied'] = []
#buckets['Already_Encrypted'] = []
date_fmt = strftime("%d_%m_%Y_%H:%M:%S", gmtime()) #get to the current date
account_id = context.invoked_function_arn.split(":")[4]
sns_topic_arn = os.environ['TOPIC_ARN']
s3_bucket_exception_list = os.environ['S3_EXCEPTION']
s3client = boto3.client('s3')
print(boto3.__version__)
try:
# describe buckets
list_bucket_response = s3client.list_buckets()
for bucket_dictionary in list_bucket_response['Buckets']:
if bucket_dictionary['Name'] not in s3_bucket_exception_list:
try:
bucket_encryption_response = s3client.get_bucket_encryption(Bucket=bucket_dictionary['Name'])
for rules in bucket_encryption_response['ServerSideEncryptionConfiguration']['Rules']:
for key, value in rules['ApplyServerSideEncryptionByDefault'].items():
if (str(value) in ('AES256','aws:kms')):
print ("\n{0} is already encrypted".format(bucket_dictionary['Name']))
#buckets['Already_Encrypted'].append(bucket_dictionary['Name'])
except:
print ("\n{0} unencrypted".format(bucket_dictionary['Name']))
response = s3client.put_bucket_encryption(
Bucket=bucket_dictionary['Name'],
ServerSideEncryptionConfiguration={
'Rules': [{
'ApplyServerSideEncryptionByDefault': {'SSEAlgorithm': 'AES256'}
}, ]
})
print ("Default Encryption applied")
buckets['Encryption_Applied'].append(bucket_dictionary['Name'])
if (buckets['Encryption_Applied'] == []):
print ("Nothing to SNS")
else:
# SNS topic Section
sns_client = boto3.client('sns',region_name='eu-west-1')
subject = 'AWS Account - ' + account_id + ' S3 Bucket Encryption Status ' + date_fmt
message_body = '\n' + "Encryption applied to S3 buckets are " + str(buckets)
sns_client.publish(TopicArn=sns_topic_arn, Message=message_body, Subject=subject)
return buckets
except:
err = 'Error'
for e in sys.exc_info():
err += str(e)
print("error {0}".format(err))
``` |
{
"source": "jhewarren/COMP7005_Final_Project",
"score": 3
} |
#### File: COMP7005_Final_Project/src/noise.py
```python
import random
from debug import dump_func_name
import sys
import logging
import time
class noise(object):
# @dump_func_name
def __init__(self, ber, delay):
self.total_packets_sent = 0
self.total_errors = 0
self.logger = logging.getLogger('myapp')
hdlr = logging.FileHandler('./noise.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
self.logger.addHandler(hdlr)
self.logger.setLevel(logging.INFO)
self.set_err_rate(ber)
self.set_ave_delay(delay)
@dump_func_name
def set_ave_delay(self, delay):
if 2 * delay > sys.maxsize:
self.ave_delay = sys.maxsize / 2
else:
self.ave_delay = delay
# @dump_func_name
def get_delay(self):
self.delay = random.randint(0, 2 * self.ave_delay)
# logger.info('%sms delay',self.delay)
return self.delay
# @dump_func_name
def set_err_rate(self, ber):
self.err_rate = ber
# assume that rate < 1 / maxint is negligible
if self.err_rate > sys.maxsize:
self.err_rate = sys.maxsize
# set error packet as random packet between 1 & ber
self.err_pkt = random.randint(1, self.err_rate)
print("packet", self.err_pkt, " of ", self.err_rate, " will be lost")
# logger.info('BER: 1/',ber,'packet: ',self.err_pkt)
str = "BER:1/%s #%s" % (self.err_rate, self.err_pkt)
self.logger.warning(str)
# @dump_func_name
def is_packet_lost(self):
self.total_packets_sent += 1
# Is_error if total_packets mod ber = error_packet
if (self.total_packets_sent % self.err_rate == self.err_pkt):
self.lost = True
self.total_errors += 1
else:
self.lost = False
# change error packet sequence number on every ber packets
if (self.total_packets_sent % self.err_rate == self.err_rate - 1):
self.set_err_rate(self.err_rate)
self.get_delay()
str = "# %s err: %s BER:1/%s >%s %s - delay %sms" % (self.total_packets_sent,
self.total_errors, self.err_rate, self.err_pkt, self.lost, self.delay)
self.logger.info(str)
time.sleep(self.delay / 1000)
return self.lost
@dump_func_name
def apply_emu(self):
self.get_delay()
self.is_packet_lost()
if __name__ == "__main__":
# lose 1 in 500
n = noise(18, 100)
for i in range(50):
f = n.is_packet_lost()
if f is True:
print(i, " is lost - ", f)
# else:
# print(i, " is a-okay")
``` |
{
"source": "jhexan/SortableChallenge",
"score": 4
} |
#### File: SortableChallenge/auction/json_obj.py
```python
import json
class JsonObj(json.JSONEncoder):
"""
Class to abstract custom json object decoding/encoding. Inherits from
json.JSONEncoder
Attributes
----------
data : dict
A dictionary of Python decoded objects
properties : dict
A dictionary of Python class properties used to identify this class
object
Methods
-------
loads(class_name, json_str)
Static method to deserialize json_str (a str, bytes or bytearray
instance containing a JSON document) to a
Python object of type class_name
dumps(class_name, json_dict)
Static method to serialize json_dict with objects of type class_name to
a JSON formatted str
"""
_properties = dict()
def __init__(self, data, **kwargs):
"""__init__ method.
Parameters
----------
data : dict
A dictionary of Python decoded objects
**kwargs
Keyword arguments passed to JSONEncoder parent class
"""
super().__init__(**kwargs)
self._data = data
@property
def data(self):
"""dict: A dictionary of Python decoded objects"""
return self._data
@property
def properties(self):
"""dict: A dictionary of Python class properties used to identify
this class object """
return self._properties
@staticmethod
def _object_hook(json_dict):
"""Default object hook for decoding json elements with default types
Parameters
----------
json_dict : dict
Python dictionary with standard type elements
Returns
-------
bool
Python dictionary with decoded elements
"""
return json_dict
@staticmethod
def _get_properties(class_name):
"""Private method to return class properties (attributes with
@property decorator)
Parameters
----------
class_name : str
Class name
Returns
-------
dict
Dictionary containing property name,object key,values
"""
properties = {key: val
for key, val in class_name.__dict__.items()
if isinstance(val, property)}
return properties
@staticmethod
def _is_object(class_name, json_dict):
"""Private method to determine if class with class_name matches
element in json_dict. The method compares class_name properties with
json_dict keys to determine a match.
Parameters
----------
class_name : str
Class name to determine match
json_dict : dict
Python dictionary element
Returns
-------
bool
True if class_name properties matches json_dict keys
"""
if not class_name._properties:
class_name._properties = JsonObj._get_properties(class_name)
return class_name._properties.keys() == json_dict.keys()
@staticmethod
def loads(class_name, json_str):
"""Static method to to deserialize json_str (a str, bytes or
bytearray instance containing a JSON document) to a Python object of
type class_name. This method uses object_hook parameter in
json.loads to do the parsing of custom JsonObj type classes
Parameters
----------
class_name : JsonObj
Class used to decode additional Python objects (e.g. bid objects)
json_str : str
A str, bytes or bytearray instance containing a JSON document
Returns
-------
dict
Python dictionary containing python object elements
"""
return json.loads(json_str, object_hook=class_name._object_hook)
@staticmethod
def dumps(class_name, json_dict):
"""Static method to serialize json_dict with objects of type
class_name to a JSON formatted str
Parameters
----------
class_name : str
Class to serialise additional types
json_dict : dict
A str, bytes or bytearray instance containing a JSON document
Returns
-------
str
JSON formatted str
"""
return json.dumps(json_dict, cls=class_name, indent=4)
```
#### File: SortableChallenge/auction/__main__.py
```python
import sys
import os
from auction.manager import AuctionManager
_CONFIG_FILE = 'config.json'
"""str: Constant to config file name"""
_CONFIG_PATH = os.path.dirname(os.path.realpath(__file__))
"""str: Constant to store config path as __main__.py path"""
def _get_config():
"""Method to parse config file _CONFIG_PATH/_CONFIG_FILE and return
contents
Returns
-------
str
config.json content as a string
"""
config = ""
try:
with open(os.path.join(_CONFIG_PATH, _CONFIG_FILE), "r") as file:
config = file.read()
except OSError as err:
print("Cannot read config file: {0}".format(err))
return config
def execute_auctions(auctions):
"""Method to execute auctions and to print the results to screen
Parameters
----------
auctions : str
Auctions as a json string
"""
config = _get_config()
if config:
auction_mgr = AuctionManager(config)
print(auction_mgr.execute_auctions(auctions))
if __name__ == '__main__':
execute_auctions(sys.stdin.read())
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.