metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jelu/dsc-datatool",
"score": 2
}
|
#### File: dsc_datatool/generator/client_subnet_country.py
```python
import maxminddb
import os
import logging
from dsc_datatool import Generator, Dataset, Dimension, args
class client_subnet_country(Generator):
reader = None
nonstrict = False
def __init__(self, opts):
Generator.__init__(self, opts)
paths = opts.get('path', ['/var/lib/GeoIP', '/usr/share/GeoIP', '/usr/local/share/GeoIP'])
if not isinstance(paths, list):
paths = [ paths ]
filename = opts.get('filename', 'GeoLite2-Country.mmdb')
db = opts.get('db', None)
if db is None:
for path in paths:
db = '%s/%s' % (path, filename)
if os.path.isfile(db) and os.access(db, os.R_OK):
break
db = None
if db is None:
raise Exception('Please specify valid Maxmind database with path=,filename= or db=')
logging.info('Using %s' % db)
self.reader = maxminddb.open_database(db)
if opts.get('nonstrict', False):
self.nonstrict = True
def process(self, datasets):
gen_datasets = []
for dataset in datasets:
if dataset.name != 'client_subnet':
continue
subnets = {}
for d1 in dataset.dimensions:
for d2 in d1.dimensions:
for k, v in d2.values.items():
if k == args.skipped_key:
continue
elif k == args.skipped_sum_key:
continue
if k in subnets:
subnets[k] += v
else:
subnets[k] = v
cc = {}
for subnet in subnets:
try:
c = self.reader.get(subnet)
except Exception as e:
if not self.nonstrict:
raise e
continue
if c:
iso_code = c.get('country', {}).get('iso_code', '??')
if iso_code in cc:
cc[iso_code] += subnets[subnet]
else:
cc[iso_code] = subnets[subnet]
if cc:
ccd = Dataset()
ccd.name = 'client_subnet_country'
ccd.start_time = dataset.start_time
ccd.stop_time = dataset.stop_time
gen_datasets.append(ccd)
ccd1 = Dimension('ClientCountry')
ccd1.values = cc
ccd.dimensions.append(ccd1)
return gen_datasets
import sys
if sys.version_info[0] == 3 and sys.version_info[1] == 5: # pragma: no cover
Generator.__init_subclass__(client_subnet_country)
```
#### File: dsc_datatool/input/dat.py
```python
import re
from dsc_datatool import Input, Dataset, Dimension, process_dataset
_dataset1d = [
'client_subnet_count',
'ipv6_rsn_abusers_count',
]
_dataset2d = {
'qtype': 'Qtype',
'rcode': 'Rcode',
'do_bit': 'D0',
'rd_bit': 'RD',
'opcode': 'Opcode',
'dnssec_qtype': 'Qtype',
'edns_version': 'EDNSVersion',
'client_subnet2_count': 'Class',
'client_subnet2_trace': 'Class',
'edns_bufsiz': 'EDNSBufSiz',
'idn_qname': 'IDNQname',
'client_port_range': 'PortRange',
'priming_responses': 'ReplyLen',
}
_dataset3d = {
'chaos_types_and_names': [ 'Qtype', 'Qname' ],
'certain_qnames_vs_qtype': [ 'CertainQnames', 'Qtype' ],
'direction_vs_ipproto': [ 'Direction', 'IPProto' ],
'pcap_stats': [ 'pcap_stat', 'ifname' ],
'transport_vs_qtype': [ 'Transport', 'Qtype' ],
'dns_ip_version': [ 'IPVersion', 'Qtype' ],
'priming_queries': [ 'Transport', 'EDNSBufSiz' ],
'qr_aa_bits': [ 'Direction', 'QRAABits' ],
}
class DAT(Input):
def process(self, dir):
global _dataset1d, _dataset2d, _dataset3d
datasets = []
for d in _dataset1d:
if process_dataset and not d in process_dataset:
continue
try:
datasets += self.process1d('%s/%s.dat' % (dir, d), d)
except FileNotFoundError:
pass
for k, v in _dataset2d.items():
if process_dataset and not k in process_dataset:
continue
try:
datasets += self.process2d('%s/%s.dat' % (dir, k), k, v)
except FileNotFoundError:
pass
for k, v in _dataset3d.items():
if process_dataset and not k in process_dataset:
continue
try:
datasets += self.process3d('%s/%s.dat' % (dir, k), k, v[0], v[1])
except FileNotFoundError:
pass
return datasets
def process1d(self, file, name):
datasets = []
with open(file, 'r') as f:
for l in f.readlines():
if re.match(r'^#', l):
continue
l = re.sub(r'[\r\n]+$', '', l)
dat = re.split(r'\s+', l)
if len(dat) != 2:
raise Exception('DAT %r dataset %r: invalid number of elements for a 1d dataset' % (file, name))
dataset = Dataset()
dataset.name = name
dataset.start_time = int(dat.pop(0))
dataset.stop_time = dataset.start_time + 60
d1 = Dimension('All')
d1.values = { 'ALL': int(dat[0]) }
dataset.dimensions.append(d1)
datasets.append(dataset)
return datasets
def process2d(self, file, name, field):
datasets = []
with open(file, 'r') as f:
for l in f.readlines():
if re.match(r'^#', l):
continue
l = re.sub(r'[\r\n]+$', '', l)
dat = re.split(r'\s+', l)
dataset = Dataset()
dataset.name = name
dataset.start_time = int(dat.pop(0))
dataset.stop_time = dataset.start_time + 60
d1 = Dimension('All')
d1.value = 'ALL'
dataset.dimensions.append(d1)
d2 = Dimension(field)
while dat:
if len(dat) < 2:
raise Exception('DAT %r dataset %r: invalid number of elements for a 2d dataset' % (file, name))
k = dat.pop(0)
v = dat.pop(0)
d2.values[k] = int(v)
d1.dimensions.append(d2)
datasets.append(dataset)
return datasets
def process3d(self, file, name, first, second):
datasets = []
with open(file, 'r') as f:
for l in f.readlines():
if re.match(r'^#', l):
continue
l = re.sub(r'[\r\n]+$', '', l)
dat = re.split(r'\s+', l)
dataset = Dataset()
dataset.name = name
dataset.start_time = int(dat.pop(0))
dataset.stop_time = dataset.start_time + 60
while dat:
if len(dat) < 2:
raise Exception('DAT %r dataset %r: invalid number of elements for a 2d dataset' % (file, name))
k = dat.pop(0)
v = dat.pop(0)
d1 = Dimension(first)
d1.value = k
dataset.dimensions.append(d1)
d2 = Dimension(second)
dat2 = v.split(':')
while dat2:
if len(dat2) < 2:
raise Exception('DAT %r dataset %r: invalid number of elements for a 2d dataset' % (file, name))
k2 = dat2.pop(0)
v2 = dat2.pop(0)
d2.values[k2] = int(v2)
d1.dimensions.append(d2)
datasets.append(dataset)
return datasets
import sys
if sys.version_info[0] == 3 and sys.version_info[1] == 5: # pragma: no cover
Input.__init_subclass__(DAT)
```
#### File: dsc_datatool/output/influxdb.py
```python
import re
import sys
import atexit
from dsc_datatool import Output,args
_re = re.compile(r'([,=\s])')
def _key(key):
return re.sub(_re, r'\\\1', key)
def _val(val):
ret = re.sub(_re, r'\\\1', val)
if ret == '':
return '""'
return ret
def _process(tags, timestamp, dimension, fh):
if dimension.dimensions is None:
return
if len(dimension.dimensions) > 0:
if not (dimension.name == 'All' and dimension.value == 'ALL'):
tags += ',%s=%s' % (_key(dimension.name.lower()), _val(dimension.value))
for d2 in dimension.dimensions:
_process(tags, timestamp, d2, fh)
return
if dimension.values is None:
return
if len(dimension.values) > 0:
tags += ',%s=' % _key(dimension.name.lower())
for k, v in dimension.values.items():
print('%s%s value=%s %s' % (tags, _val(k), v, timestamp), file=fh)
class InfluxDB(Output):
start_timestamp = True
fh = None
def __init__(self, opts):
Output.__init__(self, opts)
timestamp = opts.get('timestamp', 'start')
if timestamp == 'start':
pass
elif timestamp == 'stop':
self.timestamp = False
else:
raise Exception('timestamp option invalid')
file = opts.get('file', None)
append = opts.get('append', False)
if file:
if append:
self.fh = open(file, 'a')
else:
self.fh = open(file, 'w')
atexit.register(self.close)
else:
self.fh = sys.stdout
if opts.get('dml', False):
print('# DML', file=self.fh)
database = opts.get('database', None)
if database:
print('# CONTEXT-DATABASE: %s' % database, file=self.fh)
def close(self):
if self.fh:
self.fh.close()
self.fh = None
def process(self, datasets):
for dataset in datasets:
tags = '%s,server=%s,node=%s' % (_key(dataset.name.lower()), args.server, args.node)
if self.start_timestamp:
timestamp = dataset.start_time * 1000000000
else:
timestamp = dataset.end_time * 1000000000
for d in dataset.dimensions:
_process(tags, timestamp, d, self.fh)
if sys.version_info[0] == 3 and sys.version_info[1] == 5: # pragma: no cover
Output.__init_subclass__(InfluxDB)
```
#### File: dsc_datatool/transformer/re_ranger.py
```python
import re
from dsc_datatool import Transformer, args
_key_re = re.compile(r'^(?:(\d+)|(\d+)-(\d+))$')
class ReRanger(Transformer):
key = None
func = None
allow_invalid_keys = None
range = None
split_by = None
def __init__(self, opts):
Transformer.__init__(self, opts)
self.key = opts.get('key', 'mid')
self.func = opts.get('func', 'sum')
self.allow_invalid_keys = opts.get('allow_invalid_keys', False)
self.range = opts.get('range', None)
if self.allow_invalid_keys != False:
self.allow_invalid_keys = True
if self.range is None:
raise Exception('range must be given')
m = re.match(r'^/(\d+)$', self.range)
if m is None:
raise Exception('invalid range')
self.split_by = int(m.group(1))
if self.key != 'low' and self.key != 'mid' and self.key != 'high':
raise Exception('invalid key %r' % self.key)
if self.func != 'sum':
raise Exception('invalid func %r' % self.func)
def _process(self, dimension):
global _key_re
if not dimension.values:
for d2 in dimension.dimensions:
self._process(d2)
return
values = dimension.values
dimension.values = {}
skipped = None
for k, v in values.items():
low = None
high = None
m = _key_re.match(k)
if m:
low, low2, high = m.group(1, 2, 3)
if high is None:
low = int(low)
high = low
else:
low = int(low2)
high = int(high)
elif k == args.skipped_key:
continue
elif k == args.skipped_sum_key:
if skipped is None:
skipped = v
else:
skipped += v
continue
elif self.allow_invalid_keys:
dimension.values[k] = v
continue
else:
raise Exception('invalid key %r' % k)
if self.key == 'low':
nkey = low
elif self.key == 'mid':
nkey = int(low + ( (high - low) / 2 ))
else:
nkey = high
nkey = int(nkey / self.split_by) * self.split_by
low = nkey
high = nkey + self.split_by - 1
if self.func == 'sum':
if low != high:
nkey = '%d-%d' % (low, high)
else:
nkey = str(nkey)
if nkey in dimension.values:
dimension.values[nkey] += v
else:
dimension.values[nkey] = v
if skipped:
dimension.values['skipped'] = skipped
def process(self, datasets):
for dataset in datasets:
for dimension in dataset.dimensions:
self._process(dimension)
import sys
if sys.version_info[0] == 3 and sys.version_info[1] == 5: # pragma: no cover
Transformer.__init_subclass__(ReRanger)
```
|
{
"source": "jelumpp/mplcursors",
"score": 2
}
|
#### File: jelumpp/mplcursors/setupext.py
```python
from distutils.version import LooseVersion
from functools import partial
import inspect
import re
from pathlib import Path
import setuptools
from setuptools import Extension, find_namespace_packages, find_packages
from setuptools.command.develop import develop
from setuptools.command.install_lib import install_lib
if LooseVersion(setuptools.__version__) < "40.1": # find_namespace_packages
raise ImportError("setuptools>=40.1 is required")
__all__ = ["Extension", "find_namespace_packages", "find_packages", "setup"]
_pth_hooks = []
class pth_hook_mixin:
def run(self):
super().run()
for fname, name, source in _pth_hooks:
with Path(self.install_dir, fname).open("w") as file:
file.write("import os; exec({!r}); {}()"
.format(source, name))
def get_outputs(self):
return (super().get_outputs()
+ [str(Path(self.install_dir, fname))
for fname, _, _ in _pth_hooks])
def setup(**kwargs):
cmdclass = kwargs.setdefault("cmdclass", {})
cmdclass["develop"] = type(
"develop_with_pth_hook",
(pth_hook_mixin, cmdclass.get("develop", develop)),
{})
cmdclass["install_lib"] = type(
"install_lib_with_pth_hook",
(pth_hook_mixin, cmdclass.get("install_lib", install_lib)),
{})
setuptools.setup(**kwargs)
def register_pth_hook(fname, func=None):
if func is None:
return partial(register_pth_hook, fname)
source = inspect.getsource(func)
if not re.match(r"\A@setup\.register_pth_hook.*\ndef ", source):
raise SyntaxError("register_pth_hook must be used as a toplevel "
"decorator to a function")
_, source = source.split("\n", 1)
d = {}
exec(source, {}, d)
if set(d) != {func.__name__}:
raise SyntaxError(
"register_pth_hook should define a single function")
_pth_hooks.append((fname, func.__name__, source))
setup.register_pth_hook = register_pth_hook
```
|
{
"source": "jemand2001/knausj_talon",
"score": 2
}
|
#### File: knausj_talon/code/mouse.py
```python
import os
import pathlib
import subprocess
from talon import (Context, Module, actions, app, cron, ctrl, imgui, noise,
settings, ui)
from talon_plugins import eye_mouse, eye_zoom_mouse, speech
from talon_plugins.eye_mouse import (config, toggle_camera_overlay,
toggle_control)
key = actions.key
self = actions.self
scroll_amount = 0
click_job = None
scroll_job = None
gaze_job = None
cancel_scroll_on_pop = True
default_cursor = {
"AppStarting": r"%SystemRoot%\Cursors\aero_working.ani",
"Arrow": r"%SystemRoot%\Cursors\aero_arrow.cur",
"Hand": r"%SystemRoot%\Cursors\aero_link.cur",
"Help": r"%SystemRoot%\Cursors\aero_helpsel.cur",
"No": r"%SystemRoot%\Cursors\aero_unavail.cur",
"NWPen": r"%SystemRoot%\Cursors\aero_pen.cur",
"Person": r"%SystemRoot%\Cursors\aero_person.cur",
"Pin": r"%SystemRoot%\Cursors\aero_pin.cur",
"SizeAll": r"%SystemRoot%\Cursors\aero_move.cur",
"SizeNESW": r"%SystemRoot%\Cursors\aero_nesw.cur",
"SizeNS": r"%SystemRoot%\Cursors\aero_ns.cur",
"SizeNWSE": r"%SystemRoot%\Cursors\aero_nwse.cur",
"SizeWE": r"%SystemRoot%\Cursors\aero_ew.cur",
"UpArrow": r"%SystemRoot%\Cursors\aero_up.cur",
"Wait": r"%SystemRoot%\Cursors\aero_busy.ani",
"Crosshair": "",
"IBeam": "",
}
# todo figure out why notepad++ still shows the cursor sometimes.
hidden_cursor = os.path.join(
os.path.dirname(os.path.realpath(__file__)), r"Resources\HiddenCursor.cur"
)
mod = Module()
mod.list(
"mouse_button", desc="List of mouse button words to mouse_click index parameter"
)
setting_mouse_enable_pop_click = mod.setting(
"mouse_enable_pop_click",
type=int,
default=0,
desc="Enable pop to click when control mouse is enabled.",
)
setting_mouse_enable_pop_stops_scroll = mod.setting(
"mouse_enable_pop_stops_scroll",
type=int,
default=0,
desc="When enabled, pop stops continuous scroll modes (wheel upper/downer/gaze)",
)
setting_mouse_wake_hides_cursor = mod.setting(
"mouse_wake_hides_cursor",
type=int,
default=0,
desc="When enabled, mouse wake will hide the cursor. mouse_wake enables zoom mouse.",
)
setting_mouse_hide_mouse_gui = mod.setting(
"mouse_hide_mouse_gui",
type=int,
default=0,
desc="When enabled, the 'Scroll Mouse' GUI will not be shown.",
)
setting_mouse_continuous_scroll_amount = mod.setting(
"mouse_continuous_scroll_amount",
type=int,
default=80,
desc="The default amount used when scrolling continuously",
)
setting_mouse_wheel_down_amount = mod.setting(
"mouse_wheel_down_amount",
type=int,
default=120,
desc="The amount to scroll up/down (equivalent to mouse wheel on Windows by default)",
)
continuous_scoll_mode = ""
@imgui.open(x=700, y=0, software=False)
def gui_wheel(gui: imgui.GUI):
gui.text("Scroll mode: {}".format(continuous_scoll_mode))
gui.line()
if gui.button("Wheel Stop [stop scrolling]"):
actions.user.mouse_scroll_stop()
@mod.action_class
class Actions:
def mouse_show_cursor():
"""Shows the cursor"""
show_cursor_helper(True)
def mouse_hide_cursor():
"""Hides the cursor"""
show_cursor_helper(False)
def mouse_wake():
"""Enable control mouse, zoom mouse, and disables cursor"""
eye_zoom_mouse.toggle_zoom_mouse(True)
# eye_mouse.control_mouse.enable()
if setting_mouse_wake_hides_cursor.get() >= 1:
show_cursor_helper(False)
def mouse_calibrate():
"""Start calibration"""
eye_mouse.calib_start()
def mouse_toggle_control_mouse():
"""Toggles control mouse"""
toggle_control(not config.control_mouse)
def mouse_toggle_camera_overlay():
"""Toggles camera overlay"""
toggle_camera_overlay(not config.show_camera)
def mouse_toggle_zoom_mouse():
"""Toggles zoom mouse"""
eye_zoom_mouse.toggle_zoom_mouse(not eye_zoom_mouse.zoom_mouse.enabled)
def mouse_cancel_zoom_mouse():
"""Cancel zoom mouse if pending"""
if (
eye_zoom_mouse.zoom_mouse.enabled
and eye_zoom_mouse.zoom_mouse.state != eye_zoom_mouse.STATE_IDLE
):
eye_zoom_mouse.zoom_mouse.cancel()
def mouse_drag():
"""(TEMPORARY) Press and hold/release button 0 depending on state for dragging"""
if 1 not in ctrl.mouse_buttons_down():
# print("start drag...")
ctrl.mouse_click(button=0, down=True)
# app.notify("drag started")
else:
# print("end drag...")
ctrl.mouse_click(button=0, up=True)
# app.notify("drag stopped")
def mouse_sleep():
"""Disables control mouse, zoom mouse, and re-enables cursor"""
eye_zoom_mouse.toggle_zoom_mouse(False)
toggle_control(False)
show_cursor_helper(True)
stop_scroll()
if 1 in ctrl.mouse_buttons_down():
actions.user.mouse_drag()
def mouse_scroll_down():
"""Scrolls down"""
mouse_scroll(setting_mouse_wheel_down_amount.get())()
def mouse_scroll_down_continuous():
"""Scrolls down continuously"""
global continuous_scoll_mode
continuous_scoll_mode = "scroll down continuous"
mouse_scroll(setting_mouse_continuous_scroll_amount.get())()
if scroll_job is None:
start_scroll()
gui_wheel.show()
def mouse_scroll_up():
"""Scrolls up"""
mouse_scroll(-setting_mouse_wheel_down_amount.get())()
def mouse_scroll_up_continuous():
"""Scrolls up continuously"""
global continuous_scoll_mode
continuous_scoll_mode = "scroll up continuous"
mouse_scroll(-setting_mouse_continuous_scroll_amount.get())()
if scroll_job is None:
start_scroll()
if setting_mouse_hide_mouse_gui.get() == 0:
gui_wheel.show()
def mouse_scroll_stop():
"""Stops scrolling"""
stop_scroll()
def mouse_gaze_scroll():
"""Starts gaze scroll"""
global continuous_scoll_mode
continuous_scoll_mode = "gaze scroll"
start_cursor_scrolling()
if setting_mouse_hide_mouse_gui.get() == 0:
gui_wheel.show()
def copy_mouse_position():
"""Copy the current mouse position coordinates"""
position = ctrl.mouse_pos()
clip.set(repr(position))
def mouse_move_center_active_window():
"""move the mouse cursor to the center of the currently active window"""
rect = ui.active_window().rect
ctrl.mouse_move(rect.left + (rect.width / 2), rect.top + (rect.height / 2))
def show_cursor_helper(show):
"""Show/hide the cursor"""
if app.platform == "windows":
import ctypes
import winreg
import win32con
try:
Registrykey = winreg.OpenKey(
winreg.HKEY_CURRENT_USER, r"Control Panel\Cursors", 0, winreg.KEY_WRITE
)
for value_name, value in default_cursor.items():
if show:
winreg.SetValueEx(
Registrykey, value_name, 0, winreg.REG_EXPAND_SZ, value
)
else:
winreg.SetValueEx(
Registrykey, value_name, 0, winreg.REG_EXPAND_SZ, hidden_cursor
)
winreg.CloseKey(Registrykey)
ctypes.windll.user32.SystemParametersInfoA(
win32con.SPI_SETCURSORS, 0, None, 0
)
except WindowsError:
print("Unable to show_cursor({})".format(str(show)))
else:
ctrl.cursor_visible(show)
def on_pop(active):
if gaze_job or scroll_job:
if setting_mouse_enable_pop_stops_scroll.get() >= 1:
stop_scroll()
elif (
not eye_zoom_mouse.zoom_mouse.enabled
and eye_mouse.mouse.attached_tracker is not None
):
if setting_mouse_enable_pop_click.get() >= 1:
ctrl.mouse_click(button=0, hold=16000)
noise.register("pop", on_pop)
def mouse_scroll(amount):
def scroll():
global scroll_amount
if (scroll_amount >= 0) == (amount >= 0):
scroll_amount += amount
else:
scroll_amount = amount
actions.mouse_scroll(y=int(amount))
return scroll
def scroll_continuous_helper():
global scroll_amount
# print("scroll_continuous_helper")
if scroll_amount and (
eye_zoom_mouse.zoom_mouse.state == eye_zoom_mouse.STATE_IDLE
): # or eye_zoom_mouse.zoom_mouse.state == eye_zoom_mouse.STATE_SLEEP):
actions.mouse_scroll(by_lines=False, y=int(scroll_amount / 10))
def start_scroll():
global scroll_job
scroll_job = cron.interval("60ms", scroll_continuous_helper)
# if eye_zoom_mouse.zoom_mouse.enabled and eye_mouse.mouse.attached_tracker is not None:
# eye_zoom_mouse.zoom_mouse.sleep(True)
def gaze_scroll():
# print("gaze_scroll")
if (
eye_zoom_mouse.zoom_mouse.state == eye_zoom_mouse.STATE_IDLE
): # or eye_zoom_mouse.zoom_mouse.state == eye_zoom_mouse.STATE_SLEEP:
x, y = ctrl.mouse_pos()
# the rect for the window containing the mouse
rect = None
# on windows, check the active_window first since ui.windows() is not z-ordered
if app.platform == "windows" and ui.active_window().rect.contains(x, y):
rect = ui.active_window().rect
else:
windows = ui.windows()
for w in windows:
if w.rect.contains(x, y):
rect = w.rect
break
if rect is None:
# print("no window found!")
return
midpoint = rect.y + rect.height / 2
amount = int(((y - midpoint) / (rect.height / 10)) ** 3)
actions.mouse_scroll(by_lines=False, y=amount)
# print(f"gaze_scroll: {midpoint} {rect.height} {amount}")
def stop_scroll():
global scroll_amount, scroll_job, gaze_job
scroll_amount = 0
if scroll_job:
cron.cancel(scroll_job)
if gaze_job:
cron.cancel(gaze_job)
scroll_job = None
gaze_job = None
gui_wheel.hide()
# if eye_zoom_mouse.zoom_mouse.enabled and eye_mouse.mouse.attached_tracker is not None:
# eye_zoom_mouse.zoom_mouse.sleep(False)
def start_cursor_scrolling():
global scroll_job, gaze_job
stop_scroll()
gaze_job = cron.interval("60ms", gaze_scroll)
# if eye_zoom_mouse.zoom_mouse.enabled and eye_mouse.mouse.attached_tracker is not None:
# eye_zoom_mouse.zoom_mouse.sleep(True)
```
|
{
"source": "jemand2001/python-utils",
"score": 3
}
|
#### File: utils/decorators/template.py
```python
from typing import Type, TypeVar, Callable
from functools import lru_cache
__all__ = ['template']
T = TypeVar('T')
def template(*args: str):
"""A template decorator.
This decorator is supposed to act similarly to C++'s `template` keyword for classes.
Example:
```py
@template('test')
class Example:
def f(self):
return self.test
example = Example(test=12)()
print(example.f()) # prints 12
```
"""
def decorator(cls: Type) -> Callable[..., type]:
@lru_cache(None)
def wrapper(**kwargs):
masked = {k: v for k, v in kwargs.items() if k in args}
name = f'{cls.__qualname__}(' + ', '.join(f'{k}={v}' for k, v in masked.items()) + ')'
meta = type(cls)
actual = meta(name, tuple(cls.mro())[1:], cls.__dict__ | masked)
return actual
return wrapper
return decorator
```
#### File: utils/types_/__init__.py
```python
__all__ = ['NaturalNumber', 'StrictNaturalNumber']
class NaturalNumberMeta(type):
"""metaclass"""
def __instancecheck__(self, instance):
return isinstance(instance, int) and instance >= 0
class StrictNaturalNumberMeta(type):
"""metaclass"""
def __instancecheck__(self, instance):
return isinstance(instance, int) and instance >= 1
class NaturalNumber(metaclass=NaturalNumberMeta):
"""A natural number is any non-negative integer"""
pass
class StrictNaturalNumber(metaclass=StrictNaturalNumberMeta):
"""A strict natural number is any integer bigger than 0"""
pass
```
|
{
"source": "jemarulanda/microservicioMapeo",
"score": 2
}
|
#### File: microservicioMapeo/app/app.py
```python
import json
import os
from rabbitmq import RabbitMQ
from pika import exceptions
from parameter import Parameter
from send_grid import SendGrid
from traceability import Traceability
from transform import Transform
import uuid
class App:
'''class Application'''
@classmethod
def __init__(cls):
'''Method init'''
cls.accountName = os.getenv('ACCOUNT_NAME')
print('cls.accountName ',cls.accountName)
#cls.accountKey = os.getenv('ACCOUNT_KEY')
print('cls.accountKey ', cls.accountKey )
cls.config = Parameter(cls.accountName, cls.accountKey).get_parameters()
@classmethod
def callback(cls, channel, method, properties, body):
'''Receive message '''
try:
del properties
transaction_id = str(uuid.uuid4())
businessKey = cls.config['traceability']['businessKey']
data = json.loads(body.decode('utf-8'))
#print(data)
#ibmmq(**cls.config['traceability']).send_json('message')
#Traceability(**cls.config['traceability']).save(
# businessKey,transaction_id,"Desencolar topico",
# "Subscriber-Callback", "IN", str(data),
# "OK", "Mensaje recibido")
print('Transform.transformacion(data)', Transform.transformacion(data))
except Exception as error:
print(error)
SendGrid().create_message(
cls.config['sendGrid']['apiKey'],
cls.config['sendGrid']['fromEmail'],
cls.config['sendGrid']['toEmail'],
str(error))
#Traceability(**cls.config['traceability']).save(
# businessKey,transaction_id,"Error en la calidad del mensaje enviado",
# "Subscriber", "IN", str(body),
# "ERROR", "Lectura Fallida, "+str(error))
finally:
channel.basic_ack(delivery_tag=method.delivery_tag)
@classmethod
def main(cls):
while True:
try:
objqueue = RabbitMQ(**cls.config['source'])
objqueue.connect()
objqueue.channel.basic_consume(
queue=cls.config['source']['queue'],
on_message_callback=cls.callback,
auto_ack=False
)
#cls.traceability = Traceability(**cls.config['traceability'])
try:
objqueue.channel.start_consuming()
except KeyboardInterrupt:
objqueue.disconnect()
objqueue.channel.stop_consuming()
break
except (exceptions.ConnectionClosedByBroker,exceptions.AMQPChannelError,exceptions.AMQPConnectionError) as error_connection:
print('Conexion cerrada con a RabbitMQ', error_connection)
continue
if __name__ == '__main__':
App().main()
```
#### File: microservicioMapeo/app/send_grid.py
```python
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
class SendGrid(object):
@classmethod
def create_message(cls, api_key:str, from_email:str, emails:list ,message:str):
for email in emails:
message_send = Mail(
from_email=from_email,
to_emails=email['email'],
subject='Integración WMSOrdenesCompra Publish',
html_content=f"<strong>El sistema presenta la siguiente novedad:</strong></br><p>{message}</p>"
)
cls.send_message(api_key, message_send)
@classmethod
def send_message(cls, api_key:str, message:str):
try:
sg = SendGridAPIClient(api_key)
sg.send(message)
except Exception as e:
print(str(e))
```
#### File: microservicioMapeo/app/transform.py
```python
from json import loads
import json
class Transform:
'''Clase principal'''
@classmethod
def toIon(cls, source):
data_header = source.get('IMI','')
data_detail = source.get('ID','')
new_data = '<PurchaseOrder>\n'+ \
' <PurchaseOrderHeader>\n'+ \
' <DocumentID>\n'+ \
' <ID accountingEntity="WHSE1">'+data_header.get('eaoid_ordersce','').strip()+'</ID>\n'+ \
' </DocumentID>\n'+ \
' <Note>'+data_header.get('eaoim_comme01','').strip()+data_header.get('eaoim_comme02','').strip()+ \
data_header.get('eaoim_comme03','').strip()+data_header.get('eaoim_comme04','').strip()+ \
data_header.get('eaoim_comme05','').strip()+'</Note>\n'+ \
' <DocumentDateTime>'+data_header.get('eaoim_dtorder','').strip()+'</DocumentDateTime>\n'+ \
' <ExpectedrReceiptDate>'+data_header.get('eaoim_dtreq','').strip()+'</ExpectedrReceiptDate>\n'+ \
' <Status>\n'+ \
' <Code>Open</Code>\n'+ \
' <ArchiveIndicator>false</ArchiveIndicator>\n'+ \
' </Status>\n'+ \
' <SupplierParty>\n'+ \
' <PartyIDs>\n'+ \
' <ID>'+data_header.get('eaoim_owner','').strip()+'</ID>\n'+ \
' </PartyIDs>\n'+ \
' </SupplierParty>\n'+ \
' <ShipToParty>\n'+ \
' <Location type="Warehouse">\n'+ \
' <ID accountingEntity="">'+data_header.get('eaoim_dummy6401','').strip()+'</ID>\n'+ \
' </Location>\n'+ \
' </ShipToParty>\n'+ \
' <SUSR1>'+data_header.get('eaoim_ordtyp','').strip()+'</SUSR1>\n'+ \
' <SUSR2>'+data_header.get('eaoim_grpcod1','').strip()+'</SUSR2>\n'+ \
' <SUSR3>'+data_header.get('eaoim_grpcod2','').strip()+'</SUSR3>\n'+ \
' <SUSR4>'+data_header.get('eaoim_grpcod3','').strip()+'</SUSR4>\n'+ \
' <TOTALORDERLINES>'+data_header.get('eaoim_totalid','').strip()+'</TOTALORDERLINES>\n'+ \
' </PurchaseOrderHeader>\n'
for detail in data_detail:
new_data = new_data + ' <PurchaseOrderLine>\n'+ \
' <LineNumber>'+detail.get('eaoid_lineitm','').strip()+'</LineNumber>\n'+ \
' <Note>'+detail.get('eaoid_comme01','').strip()+detail.get('eaoid_comme02','').strip()+ \
detail.get('eaoid_comme03','').strip()+detail.get('eaoid_comme04','').strip()+ \
detail.get('eaoid_comme05','').strip()+'</Note>\n'+ \
' <Status>\n'+ \
' <Code>Open</Code>\n'+ \
' <ArchiveIndicator>false</ArchiveIndicator>\n'+ \
' </Status>\n'+ \
' <Item>\n'+ \
' <ItemID>\n'+ \
' <ID accountingEntity="EXITO">'+detail.get('eaoid_part','').strip()+'</ID>\n'+ \
' </ItemID>\n'+ \
' <ServiceIndicator>false</ServiceIndicator>\n'+ \
' </Item>\n'+ \
' <Quantity unitCode="pza">'+detail.get('eaoid_ordqty','').strip()+'</Quantity>\n'+ \
' <BaseUOMQuantity unitCode="pza">'+detail.get('eaoid_ordqty','').strip()+'</BaseUOMQuantity>\n'+ \
' <ShipToParty>\n'+ \
' <Location type="Warehouse">\n'+ \
' <ID accountingEntity="">'+detail.get('eaoid_wareh','').strip()+'</ID>\n'+ \
' </Location>\n'+ \
' </ShipToParty>\n'+ \
' <SUSR1>'+detail.get('eaoid_vuser1','').strip()+'</SUSR1>\n'+ \
' <SUSR2>'+detail.get('eaoid_vuser3','').strip()+'</SUSR2>\n'+ \
' <SUSR3>'+detail.get('eaoid_dummy6403','').strip()+'</SUSR3>\n'+ \
' </PurchaseOrderLine>\n'
new_data = new_data + '</PurchaseOrder>'
return new_data
@classmethod
def transformacion(cls, source):
response = cls.toIon(source)
return response
```
|
{
"source": "jeMATHfischer/OECD_Scrapper_Selenium",
"score": 3
}
|
#### File: jeMATHfischer/OECD_Scrapper_Selenium/simple_scraper.py
```python
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.action_chains import ActionChains
import time
file_path = '/home/jens/Documents_Ubuntu/'
options = Options()
options.add_argument('--headless') #remove comment to let it run in the background
profile = webdriver.FirefoxProfile()
profile.set_preference('browser.download.folderList', 2)
profile.set_preference('browser.download.manager.showWhenStarting', False)
profile.set_preference('browser.download.dir', file_path)
profile.set_preference('browser.helperApps.neverAsk.saveToDisk', 'text/csv')
driver = webdriver.Firefox(firefox_profile=profile, options=options)
driver.get("https://stats.oecd.org/")
begin = driver.find_element_by_id("browsethemes")
begin = begin.text.split('\n')
def tree_disolver(highest_level_names = begin):
'''
-> This function runs through the whole tree starting from a highest given level.
-> Highest given level has to be visible after driver loards page.
-> Highest_level_names has to contain elements in begin. Has to be given as a single list.
'''
if len(highest_level_names) != 0:
for name in highest_level_names:
# replace html formating to python interpretable.
name = name.replace(" ", "\u00a0")
single_path_closed = '//li[contains(@class ,"t closed") and span[text()="{}"]]'.format(name)
single_path_opened = '//li[contains(@class ,"t opened") and span[text()="{}"]]'.format(name)
parent_click = driver.find_elements_by_xpath(single_path_closed)
if len(parent_click) == 1:
parent_click = parent_click[0]
parent_click.click()
else:
for item in parent_click:
if item.is_displayed():
item.click()
increase_depth = driver.find_elements_by_xpath(single_path_opened + '/ul/li/span')
if name in [e.get_attribute("innerHTML") for e in increase_depth]:
# Difficulties arise if parent has child with same name parent_name.
# The branch parent_name - parent_name has to be evaluated taking two steps.
parent_click = driver.find_elements_by_xpath(single_path_closed)
double_path = '//li[contains(@class ,"t closed") and span[text()="{}"]]/ul/li/span[text()="{}"]'.format(name, name)
double_increase_depth = driver.find_elements_by_xpath(double_path + '/ul/li/span')
tree_disolver([e.get_attribute("innerHTML") for e in double_increase_depth])
if len(parent_click) == 1:
parent_click = parent_click[0]
parent_click.click()
else:
for item in parent_click:
if item.isDisplayed():
item.click()
else:
tree_disolver([e.get_attribute("innerHTML") for e in increase_depth])
def download_clicker(download_section):
'''
-> Navigates through open tree sections to the download dialog window.
-> Applies to all full data sets in the given download section.
->The download_section has to be visible and all sublevels need to be unfolded.
-> Use only after tree_disolver.
'''
ds_xpath = '//li[span[text() = "{}"]]//a[contains(@class ,"ds")]'.format(download_section)
ds_elements = driver.find_elements_by_xpath(ds_xpath)
for e_ds in ds_elements:
e_ds.click()
try:
Export_Button = WebDriverWait(driver, 40).until(EC.element_to_be_clickable((By.ID, 'export-icon')))
Export_Button.click()
except:
print('Timeout while determining position of export button.')
try:
csv_button = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, "//a[span[@id='export-csv-icon']]")))
csv_button.click()
csv_button.click()
except:
print('Timeout while determining position of csv category.')
try:
iframe_choice = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, '//iframe[@id="DialogFrame"]')))
driver.switch_to.frame(iframe_choice)
download = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, '//input[@id = "_ctl12_btnExportCSV"]')))
ActionChains(driver).click(download).perform()
driver.switch_to.default_content()
except:
print('Timeout while determining position of download button.')
try:
close = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, '//span[contains(@class,"ui-icon ui-icon-closethick")]')))
close.click()
except:
print('Timeout while determining position of exit button.')
tree_disolver(['Labour', 'Globalisation'])
#download_clicker('Africapolis')
time.sleep(10)
driver.quit()
```
|
{
"source": "jemaw/klvdata",
"score": 2
}
|
#### File: klvdata/klvdata/setparser.py
```python
from pprint import pformat
from abc import ABCMeta
from abc import abstractmethod
from collections import OrderedDict
from klvdata.element import Element
from klvdata.element import UnknownElement
from klvdata.klvparser import KLVParser
class SetParser(Element, metaclass=ABCMeta):
"""Parsable Element. Not intended to be used directly. Always as super class."""
_unknown_element = UnknownElement
def __init__(self, value, key_length=1):
"""All parser needs is the value, no other information"""
super().__init__(self.key, value)
self.key_length = key_length
self.items = OrderedDict()
self.parse()
def __getitem__(self, key):
"""Return element provided bytes key.
For consistency of this collection of modules, __getitem__ does not
attempt to add convenience of being able to index by the int equivalent.
Instead, the user should pass keys with method bytes.
"""
return self.items[bytes(key)]
def parse(self):
"""Parse the parent into items. Called on init and modification of parent value.
If a known parser is not available for key, parse as generic KLV element.
"""
for key, value in KLVParser(self.value, self.key_length):
try:
self.items[key] = self.parsers[key](value)
except (KeyError, ValueError, TypeError):
self.items[key] = self._unknown_element(key, value)
@classmethod
def add_parser(cls, obj):
"""Decorator method used to register a parser to the class parsing repertoire.
obj is required to implement key attribute supporting bytes as returned by KLVParser key.
"""
# If sublcass of ElementParser does not implement key, dict accepts key of
# type property object. bytes(obj.key) will raise TypeError. ElementParser
# requires key as abstract property but no raise until instantiation which
# does not occur because the value is never recalled and instantiated from
# parsers.
cls.parsers[bytes(obj.key)] = obj
return obj
@property
@classmethod
@abstractmethod
def parsers(cls):
# Property must define __getitem__
pass
@parsers.setter
@classmethod
@abstractmethod
def parsers(cls):
# Property must define __setitem__
pass
def __repr__(self):
return pformat(self.items, indent=1)
def __str__(self):
return str_dict(self.items)
def MetadataList(self):
''' Return metadata dictionary'''
metadata = {}
def repeat(items, indent=1):
for item in items:
try:
metadata[item.TAG] = (item.LDSName, item.ESDName, item.UDSName, str(item.value.value))
except:
None
if hasattr(item, 'items'):
repeat(item.items.values(), indent + 1)
repeat(self.items.values())
return OrderedDict(metadata)
def structure(self):
print(str(type(self)))
def repeat(items, indent=1):
for item in items:
print(indent * "\t" + str(type(item)))
if hasattr(item, 'items'):
repeat(item.items.values(), indent+1)
repeat(self.items.values())
def str_dict(values):
out = []
def per_item(value, indent=0):
for item in value:
if isinstance(item):
out.append(indent * "\t" + str(item))
else:
out.append(indent * "\t" + str(item))
per_item(values)
return '\n'.join(out)
```
|
{
"source": "jemay/where-in-the-world",
"score": 3
}
|
#### File: jemay/where-in-the-world/server.py
```python
from flask import Flask, render_template, request
import psycopg2
import psycopg2.extras
import sys
reload(sys)
sys.setdefaultencoding("UTF8")
app = Flask(__name__)
def connectToDB():
connectionString = 'dbname=world user=searcher password=<PASSWORD>clo host=localhost'
try:
return psycopg2.connect(connectionString)
except:
print("Can't connect to database.")
@app.route('/', methods=['GET', 'POST'])
def mainIndex():
#Show the main page
if request.method == 'GET':
return render_template('index.html', selectedMenu='Home')
#Show the search page
elif request.method == 'POST':
conn=connectToDB()
cur=conn.cursor()
try:
query = {'place': request.form['worldSearch']}
print("Query found")
cur.execute("SELECT name, code, continent FROM Country WHERE name = %(place)s OR Code = %(place)s OR Continent = %(place)s;", query)
headers = ['Country', 'Code', 'Continent']
if cur.rowcount == 0:
headers = ['City', 'District', 'Country']
cur.execute("SELECT name, district, countryCode FROM City WHERE name = %(place)s OR district = %(place)s;", query)
except:
print("ERROR executing SELECT")
try:
searchResults = cur.fetchall()
except:
return render_template('index.html', selectedMenu='Nothing')
if cur.rowcount == 0:
return render_template('index.html', selectedMenu='Nothing')
return render_template('index.html', selectedMenu='Find', results=searchResults, headers=headers)
"""
@app.route('/find', methods=['POST'])
def find():
return render_template('find.html')
"""
if __name__ == '__main__':
app.debug=True
app.run(host='0.0.0.0', port=8080)
```
|
{
"source": "Jembe/jembe-demo",
"score": 2
}
|
#### File: jembe-demo/jembe_demo/__init__.py
```python
import os
from flask import Flask
from . import jmb, db, commands
def create_app(config=None):
from . import models, views, pages
app = Flask(__name__, instance_relative_config=True)
# app.config.from_mapping({SECRET_KEY="dev",})
if config is not None:
if isinstance(config, dict):
app.config.from_mapping(config)
elif config.endswith(".py"):
app.config.from_pyfile(config)
else:
app.config.from_pyfile("config.py", silent=True)
try:
os.makedirs(app.instance_path)
except OSError:
pass
jmb.init_jembe(app)
db.init_db(app)
views.init_views(app)
commands.init_commands(app)
return app
```
#### File: jembe_demo/pages/demo_upload.py
```python
from typing import TYPE_CHECKING, Optional, Union, List, Any
from jembe import Component, File, listener
from wtforms import Form, BooleanField, FileField, validators
from jembe_demo.jmb import jmb
# from wtforms import Form, FileField
if TYPE_CHECKING:
from flask import Response
from jembe import Event, ComponentConfig
class DemoUploadSimple(Component):
"""
Uploades files to public or private storage and
redisplays itself showing uploaded file
"""
def __init__(self, photo: Optional[File] = None, upload_to_public: bool = False):
if photo is not None and photo.in_temp_storage():
# TODO check if photo is acctual photo
if upload_to_public:
photo.move_to_public()
else:
photo.move_to_private()
photo.grant_access()
if photo and photo.in_temp_storage():
self.state.photo = None
super().__init__()
class MultiUploadSimple(Component):
"""
Uploades multipe files to public or private storage and
redisplays itself showing uploaded file
"""
def __init__(
self, photos: Optional[List[File]] = None, upload_to_public: bool = True
):
if photos is not None:
for photo in photos:
if photo.in_temp_storage():
# TODO check if photo is acctual photo
if upload_to_public:
photo.move_to_public()
else:
photo.move_to_private()
photo.grant_access()
else:
self.state.photos = list()
super().__init__()
class PhotoForm(Form):
photo = FileField("Photo", [validators.regexp("^[^/\\]\.[jpg|png]$")])
upload_to_public = BooleanField(
"Upload to public storage", [validators.input_required()], default=True
)
class DemoUploadWtForm(Component):
"""Uses wtForm and simulates saving to database by processing photo in save() action"""
def __init__(self, form: Optional[PhotoForm] = None):
if form is None:
form = PhotoForm()
if (
form.photo.data is not None
and form.photo.data.in_temp_storage()
):
if form.upload_to_public.data:
form.photo.data.move_to_public()
else:
form.photo.data.move_to_private()
form.photo.data.grant_access()
self.state.form = form
super().__init__()
@classmethod
def dump_init_param(cls, name: str, value: Any) -> Any:
if name == "form":
result = value.data.copy() if value is not None else dict()
if "photo" in result and result["photo"] is not None:
result ["photo"] = File.dump_init_param(result ["photo"])
return result
return super().dump_init_param(name, value) # type:ignore
@classmethod
def load_init_param(cls, config: "ComponentConfig", name: str, value: Any) -> Any:
if name == "form":
if "photo" in value and value["photo"] is not None:
value["photo"] = File.load_init_param(value["photo"])
return PhotoForm(data=value)
return super().load_init_param(config, name, value) # type:ignore
@jmb.page(
"demo_upload",
Component.Config(
components=dict(
simple=DemoUploadSimple, wtform=DemoUploadWtForm, multi=MultiUploadSimple
)
),
)
class DemoUploadPage(Component):
def __init__(self, display_mode: str = "simple"):
if display_mode not in self._config.components_configs.keys():
self.state.display_mode = list(self._config.components_configs.keys())[0]
super().__init__()
@listener(event="_display", source="./*")
def on_child_display(self, event: "Event"):
self.state.display_mode = event.source_name
def display(self) -> Union[str, "Response"]:
return self.render_template_string(
"""
<html>
<head>
<link rel="stylesheet" href="{{ url_for('static', filename='css/jembe_demo.css') }}">
<link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,300italic,700,700italic">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/milligram/1.4.1/milligram.css">
</head>
<body>
<div class="container">
{% include "global_nav.html" %}
<nav>
<a href="{{component('simple').url}}" jmb-on:click.stop.prevent="{{component().jrl}}">Simple Upload</a>
<a href="{{component('multi').url}}" jmb-on:click.stop.prevent="{{component().jrl}}">Multiple Upload</a>
<a href="{{component('wtform').url}}" jmb-on:click.stop.prevent="{{component().jrl}}">WTForm Upload</a>
</nav>
{{component(display_mode)}}
</div>
<script src="{{ url_for('jembe.static', filename='js/jembe.js') }}"></script>
</body><html>"""
)
```
|
{
"source": "jemberton/haccuweather",
"score": 2
}
|
#### File: jemberton/haccuweather/__init__.py
```python
from . import weather
DOMAIN = 'haccuweather'
def setup(hass, config):
def call_api(call):
weather._LOGGER.info("Haccuweather is calling API manually/forcefully ...")
weather.haccuweather_entity.call_api()
hass.services.register(DOMAIN, 'call_api', call_api)
return True
```
|
{
"source": "jembrown/revbayes_kernel",
"score": 2
}
|
#### File: revbayes_kernel/revbayes_kernel/install.py
```python
import json
import os
import sys
try:
from jupyter_client.kernelspec import install_kernel_spec
except ImportError:
from IPython.kernel.kernelspec import install_kernel_spec
from IPython.utils.tempdir import TemporaryDirectory
kernel_json = {
"argv": [sys.executable,
"-m", "revbayes_kernel",
"-f", "{connection_file}"],
"display_name": "RevBayes",
"language": "bash",
"mimetype": "text/x-rsrc",
"codemirror_mode": "R",
"name": "revbayes_kernel",
}
def install_my_kernel_spec(user=True):
user = '--user' in sys.argv or not _is_root()
with TemporaryDirectory() as td:
os.chmod(td, 0o755) # Starts off as 700, not user readable
with open(os.path.join(td, 'kernel.json'), 'w') as f:
json.dump(kernel_json, f, sort_keys=True)
kernel_name = kernel_json['name']
try:
install_kernel_spec(td, kernel_name, user=user,
replace=True)
except:
install_kernel_spec(td, kernel_name, user=not user,
replace=True)
def _is_root():
try:
return os.geteuid() == 0
except AttributeError:
return False # assume not an admin on non-Unix platforms
def main(argv=[]):
user = '--user' in argv or not _is_root()
install_my_kernel_spec(user=user)
if __name__ == '__main__':
main(argv=sys.argv)
```
#### File: revbayes_kernel/revbayes_kernel/kernel.py
```python
from __future__ import print_function
import codecs
import glob
import os
import re
import shutil
import subprocess
import sys
import tempfile
import uuid
from xml.dom import minidom
from metakernel import MetaKernel, ProcessMetaKernel, REPLWrapper, u
from metakernel.pexpect import which
from . import __version__
STDIN_PROMPT = '> '
STDIN_PROMPT_REGEX = re.compile(r'%s' % STDIN_PROMPT)
HELP_LINKS = [
{
'text': "RevBayes",
'url': "https://revbayes.org",
},
{
'text': "RevBayes Kernel",
'url': "https://github.com/sdwfrost/revbayes_kernel",
},
] + MetaKernel.help_links
class RevBayesKernel(ProcessMetaKernel):
implementation = 'RevBayes Kernel'
implementation_version = __version__,
language = 'Rev'
help_links = HELP_LINKS
_revbayes_engine = None
@property
def language_info(self):
return {'mimetype': 'text/x-rsrc',
'name': 'RevBayes',
'file_extension': '.Rev',
'help_links': HELP_LINKS,
'pygments_lexer': 'R',
'codemirror_mode': {'name': 'r' }}
@property
def banner(self):
msg = 'RevBayes Kernel v%s'
return msg % (__version__)
@property
def revbayes_engine(self):
if self._revbayes_engine:
return self._revbayes_engine
self._revbayes_engine = RevBayesEngine(error_handler=self.Error,
stdin_handler=self.raw_input,
stream_handler=self.Print,
logger=self.log)
return self._revbayes_engine
def makeWrapper(self):
"""Start an RevBayes process and return a :class:`REPLWrapper` object.
"""
return self.revbayes_engine.repl
def do_execute_direct(self, code, silent=False):
if code.strip() in ['q()', 'quit()']:
self._revbayes_engine = None
self.do_shutdown(True)
return
val = ProcessMetaKernel.do_execute_direct(self, code, silent=silent)
return val
def get_kernel_help_on(self, info, level=0, none_on_fail=False):
obj = info.get('help_obj', '')
if not obj or len(obj.split()) > 1:
if none_on_fail:
return None
else:
return ""
return self.revbayes_engine.eval('?%s' % obj, silent=True)
def Print(self, *args, **kwargs):
# Ignore standalone input hook displays.
out = []
for arg in args:
if arg.strip() == STDIN_PROMPT:
return
if arg.strip().startswith(STDIN_PROMPT):
arg = arg.replace(STDIN_PROMPT, '')
out.append(arg)
super(RevBayesKernel, self).Print(*out, **kwargs)
def raw_input(self, text):
# Remove the stdin prompt to restore the original prompt.
text = text.replace(STDIN_PROMPT, '')
return super(RevBayesKernel, self).raw_input(text)
class RevBayesEngine(object):
def __init__(self, error_handler=None, stream_handler=None,
stdin_handler=None,
logger=None):
self.logger = logger
self.executable = self._get_executable()
self.repl = self._create_repl()
self.error_handler = error_handler
self.stream_handler = stream_handler
self.stdin_handler = stdin_handler
self._startup()
def eval(self, code, timeout=None, silent=False):
"""Evaluate code using the engine.
"""
stream_handler = None if silent else self.stream_handler
if self.logger:
self.logger.debug('RevBayes eval:')
self.logger.debug(code)
try:
resp = self.repl.run_command(code.rstrip(),
timeout=timeout,
stream_handler=stream_handler,
stdin_handler=self.stdin_handler)
resp = resp.replace(STDIN_PROMPT, '')
if self.logger and resp:
self.logger.debug(resp)
return resp
except KeyboardInterrupt:
return self._interrupt(True)
except Exception as e:
if self.error_handler:
self.error_handler(e)
else:
raise e
def _startup(self):
here = os.path.realpath(os.path.dirname(__file__))
self.eval('setwd("%s")' % here.replace(os.path.sep, '/'))
def _create_repl(self):
cmd = self.executable
# Interactive mode prevents crashing on Windows on syntax errors.
# Delay sourcing the "~/.octaverc" file in case it displays a pager.
repl = REPLWrapper(cmd_or_spawn=cmd,
prompt_regex=r'[>+] $',
prompt_change_cmd=None)
if os.name == 'nt':
repl.child.crlf = '\n'
repl.interrupt = self._interrupt
# Remove the default 50ms delay before sending lines.
repl.child.delaybeforesend = None
return repl
def _interrupt(self, silent=False):
if (os.name == 'nt'):
msg = '** Warning: Cannot interrupt RevBayes on Windows'
if self.stream_handler:
self.stream_handler(msg)
elif self.logger:
self.logger.warn(msg)
return self._interrupt_expect(silent)
return REPLWrapper.interrupt(self.repl)
def _interrupt_expect(self, silent):
repl = self.repl
child = repl.child
expects = [repl.prompt_regex, child.linesep]
expected = uuid.uuid4().hex
repl.sendline('print("%s");' % expected)
if repl.prompt_emit_cmd:
repl.sendline(repl.prompt_emit_cmd)
lines = []
while True:
# Prevent a keyboard interrupt from breaking this up.
while True:
try:
pos = child.expect(expects)
break
except KeyboardInterrupt:
pass
if pos == 1: # End of line received
line = child.before
if silent:
lines.append(line)
else:
self.stream_handler(line)
else:
line = child.before
if line.strip() == expected:
break
if len(line) != 0:
# prompt received, but partial line precedes it
if silent:
lines.append(line)
else:
self.stream_handler(line)
return '\n'.join(lines)
def _get_executable(self):
"""Find the best RevBayes executable.
"""
executable = os.environ.get('REVBAYES_JUPYTER_EXECUTABLE', None)
if not executable or not which(executable):
if which('rb-jupyter'):
executable = 'rb-jupyter'
else:
msg = ('RevBayes Executable not found, please add to path or set',
'\"REVBAYES_JUPYTER_EXECUTABLE\" environment variable.',
'See README.md for instructions to build rb-jupyter.')
raise OSError(msg)
executable = executable.replace(os.path.sep, '/')
return executable
```
|
{
"source": "jemcghee3/60001",
"score": 4
}
|
#### File: jemcghee3/60001/ch10fe4.py
```python
import datetime
class Person(object):
def __init__(self, name):
"""Assumes name a string. Create a person"""
self._name = name
try:
last_blank = name.rindex(' ')
self._last_name = name[last_blank+1:]
except:
self._last_name = name
self._birthday = None # Error in book code excludes _
def get_name(self):
"""Returns self's full name"""
return self._name
def get_last_name(self):
"""Returns self's last name"""
return self._last_name
def set_birthday(self, birthdate):
"""Assumes birthdate is of type datetime.date
Sets self's birthday to birthdate"""
self._birthday = birthdate
def get_age(self):
"""Returns self's current age in days"""
if self._birthday == None:
raise ValueError
return (datetime.date.today() - self._birthday).days
def __lt__(self, other):
"""Assume other a Person
Returns True if self precedes other in alphabetical
order, and False otherwise. Comparison is based on last
names, but if these are the same full names are
compared."""
if self._last_name == other._last_name:
return self._name < other._name
return self._last_name < other._last_name
def __str__(self):
"""Returns self's name"""
return self._name
class MIT_Person(Person):
_next_id_num = 0 #identification number
def __init__(self, name):
super().__init__(name)
self._id_num = MIT_Person._next_id_num
MIT_Person._next_id_num += 1
def get_id_num(self):
return self._id_num
def __lt__(self, other):
return self._id_num < other._id_num
class Student(MIT_Person):
pass
class UG(Student):
def __init__(self, name, class_year):
super().__init__(name)
self._year = class_year
def get_class(self):
return self._year
class Grad(Student):
pass
class Grades(object):
def __init__(self):
"""Create empty grade book"""
self._students = []
self._grades = {}
self._is_sorted = True
def add_student(self, student):
"""Assumes: student is of type Student
Add student to the grade book"""
if student in self._students:
raise ValueError('Duplicate student')
self._students.append(student)
self._grades[student.get_id_num()] = []
self._is_sorted = False
def add_grade(self, student, grade):
"""Assumes: grade is a float
Add grade to the list of grades for student"""
try:
self._grades[student.get_id_num()].append(grade)
except:
raise ValueError('Student not in mapping')
def get_grades(self, student):
"""Return a list of grades for student"""
try:
return self._grades[student.get_id_num()][:]
except:
raise ValueError('Student not in mapping')
def get_students(self):
"""Return a sorted list of the students in the grade book"""
if not self._is_sorted:
self._students.sort()
self._is_sorted = True
for s in self._students:
yield s
# Finger exercise: Add to Grades a generator that meets the
#specification
def get_students_above(self, grade):
"""Return the students a mean grade > g one at a time"""
for s in self.get_students():
if sum(self.get_grades(s))/len(self.get_grades(s)) > grade:
yield s
book = Grades()
j = Grad('Julie')
l = Grad('Lisa')
k = Grad('Katherine')
book.add_student(j)
book.add_student(l)
book.add_student(k)
book.add_grade(j, 100)
book.add_grade(j, 10)
book.add_grade(j, 0)
book.add_grade(l, 20)
book.add_grade(l, 10)
book.add_grade(l, 1)
book.add_grade(k, 0)
book.add_grade(k, 1)
book.add_grade(k, 19)
for s in book.get_students():
print(s)
print('')
for s in book.get_students_above(10):
print(s)
```
#### File: jemcghee3/60001/ch5fe3.py
```python
def f(L1, L2):
"""L1, L2 lists of same length of numbers
returns the sum of raising each element in L1
to the power of the element in the same index in L2
For example, f([1,2], [2,3]) returns 9"""
return sum(n for n in map(lambda x, y: x ** y, L1, L2))
list1 = [2, 6, 4]
list2 = [1, 2, 3]
print(f(list1, list2))
```
#### File: jemcghee3/60001/ch7fe1.py
```python
import calendar
def shopping_days(year):
"""year a number >= 1941
returns the number of days between U.S. Thanksgiving
and Christmas in year"""
# calculate the date of Thanksgiving each year
turkey_day = find_thanksgiving(year)
# calculate how many more days are after Thanksgiving in November
nov_shopping_days = november_days_left(turkey_day)
# add the 24 day in December before christmas
dec_shopping_days = 24
# return the sum
return nov_shopping_days + dec_shopping_days
def find_thanksgiving(year):
"""code given in book
year a number >= 1941
returns the date in November for Thanksgiving that year"""
month = calendar.monthcalendar(year, 11)
if month[0][calendar.THURSDAY] != 0:
thanksgiving = month[3][calendar.THURSDAY]
else:
thanksgiving = month[4][calendar.THURSDAY]
return thanksgiving
def find_thanksgiving_checker():
for i in range(1941,2022):
print(f'In {i}, Thanksgiving was on November {find_thanksgiving(i)}')
# find_thanksgiving_checker()
def november_days_left(d):
"""d is an integer representing the date in November for Thanksgiving
returns the remaining days in November"""
return 30 - d
def shopping_days_checker():
for i in range(1941,2022):
print(f'In {i}, there were {shopping_days(i)} shopping days between Thanksgiving and Christmas.')
shopping_days_checker()
```
#### File: jemcghee3/60001/ch7fe2.py
```python
import calendar
def shopping_days(year):
"""year a number > 1957
returns the number of days between Canadian Thanksgiving
and Christmas in year"""
# calculate the date of Canadian Thanksgiving each year
turkey_day = find_canadian_thanksgiving(year) # do Canadians eat turkey for Thanksgiving?
# calculate how many more days are after Thanksgiving in October
oct_shopping_days = october_days_left(turkey_day)
# add the 30 days in November
nov_shopping_days = 30
# add the 24 day in December before christmas
dec_shopping_days = 24
# return the sum
return oct_shopping_days + nov_shopping_days + dec_shopping_days
def find_canadian_thanksgiving(year):
"""code given in book for November, modified for October
year a number >= 1941
returns the date in October for Canadian Thanksgiving that year"""
month = calendar.monthcalendar(year, 10)
if month[0][calendar.MONDAY] != 0:
thanksgiving = month[1][calendar.MONDAY]
else:
thanksgiving = month[2][calendar.MONDAY]
return thanksgiving
def find_canadian_thanksgiving_checker():
for i in range(1958,2022):
print(f'In {i}, Thanksgiving was on October {find_canadian_thanksgiving(i)}')
# find_canadian_thanksgiving_checker()
def october_days_left(d):
"""d is an integer representing the date in October for Canadian Thanksgiving
returns the remaining days in October"""
return 31 - d
def shopping_days_checker():
for i in range(1958,2022):
print(f'In {i}, there were {shopping_days(i)} shopping days between Canadian Thanksgiving and Christmas.')
shopping_days_checker()
```
#### File: jemcghee3/60001/ch7fe3.py
```python
def fib(n):
"""Assumes n int >= 0
Returns Fibonacci of n"""
if n == 0 or n == 1:
return 1
else:
return fib(n-1) + fib(n-2)
with open('fib_file.txt', 'w') as fib_handle:
for i in range(10):
fib_handle.write(str(fib(i)) + '\n')
with open('fib_file.txt', 'r') as fib_handle:
for line in fib_handle:
print(line)
```
|
{
"source": "jemcmahan13/pygll",
"score": 3
}
|
#### File: jemcmahan13/pygll/emitter.py
```python
from stock import GOBJ
import re
from itertools import count
log = False
class GrammarTerm(object):
def __init__(self):
self.endOf = set() # productions this term ends (for follow set)
# Keep track of which Terminals and Nonterminals follow this one;
# used to compute the follow set
self.followers = set()
def findfollows(self):
follow = set()
for x in self.endOf:
if x != self:
self.followers.update(x.follow)
for x in self.followers:
follow.update(set(y for y in x.first if y))
return follow
class Terminal(GrammarTerm):
def __init__(self, name, pattern):
super().__init__()
self.name = name
self.pattern = re.compile(pattern)
self._follow = None
self.compiled = True
@property
def first(self):
return set((self,)) # the terminal is a first token
@property
def follow(self):
if self._follow:
return self._follow
follow = self.findfollows()
self._follow = follow
return self._follow
def compile(self, caller=None):
return
def __call__(self, token):
# See if the given token matches this terminal
if log:
print("Checking token ", token, "against ", self.name)
return self.name == token
def __str__(self):
return "{}".format(self.name)
def __repr__(self):
return "{}:{}".format(self.name, self.pattern.pattern)
class Nonterminal(GrammarTerm):
def __init__(self, name):
super().__init__()
self.name = name
self.productions = []
self._first = set()
self._follow = set()
self.compiled = False
self.top = False
self.rules = [] # will eventually hold tuples of (first-set, production) for parsing
def addProduction(self, prod):
self.productions.append(prod)
@property
def first(self):
if self._first:
return self._first
first = set()
if isinstance(self, Set):
for prod in self.productions:
for p in prod.prod:
first.update(p.first)
else:
for prod in self.productions:
first.update(prod.first)
self._first = first
return first
@property
def follow(self):
if self._follow:
return self._follow
if self.top:
self._follow.add(Terminal('EOF', r'\Z')) # end of input terminal
if log:
print("Follow of ", self, self._follow)
self._follow.update(self.findfollows())
if self in self.followers:
self.followers.remove(self)
for f in self.followers:
if log:
print("Getting first set of {}: {}".format(f, f.first))
self._follow.update(set(x for x in f.first if x))
if log:
print(self._follow, '\n')
return self._follow
def root(self):
# This nonterminal is the top level (start) symbol
# Add EOF to its follow set
# print("Root called on ", self)
try:
self.compile()
self.top = True
except RecursionError as e:
print("RecursionError: Are you sure your grammar has no left-recursion?")
sys.exit(1)
def compile(self, caller=None):
if self.compiled:
return
self.compiled = True
for prod in self.productions:
if prod != caller:
prod.compile()
def __str__(self):
return "{}".format(self.name)
def __repr__(self):
return "{}: {}".format(self.name, ' | '.join([str(x) for x in self.productions]))
class Production(object):
''' A Production is an ordered list of terms (Terminals and Nonterminals).
All terms should already exist before declaring Productions.
Once all Productions exist, one can compute first then follow sets.
'''
def __init__(self, head, *args):
# head is the LHS (deriving Nonterminal)
# args should be a sequence of Nonterminals and Terminals
# Null production represents epsilon
self.prod = args
self._first = None
self._follow = set()
self.head = head
self.head.addProduction(self)
self.compiled = False
self.pclass = None
#print(head, type(head), args, [type(x) for x in args])
# Note which terms follow which
for i,arg in enumerate(args):
if i < len(args)-1:
arg.followers.add(args[i+1])
# Still might have other terms at end of this if last -> epsilon,
# but we can't check for that until we have the first sets.
# Do that in function updateEnds()
@property
def first(self):
if self._first:
return self._first
# otherwise, compute first set
if not self.prod[0]: # epsilon
return set((None,))
first = set()
for term in self.prod:
fs = term.first
if None not in fs: # we can stop here
first.update(fs)
self._first = first
return first
else: # nonterminal could be epsilon, so keep going
first.update(set(x for x in fs if x)) # add everything but epsilon
self._first = first
return first
def updateEnds(self):
# Call this function after firsts are done, but before follows
if not self.prod[0]:
return # epsilon
self.prod[-1].endOf.add(self.head) # tell terms when they end productions
last = 0
finding = True # find term that reflects productions follow set
for i, arg in enumerate(reversed(self.prod)):
if None in arg.first and i < len(self.prod)-1:
# if a term can go to epsilon, then the one before it gets its follow set
term = self.prod[i+1]
term.followers.update(arg.followers)
term.endOf.update(arg.endOf)
if finding:
last += 1
else:
finding = False
self.last = len(self.prod) - last
def compile(self):
# Do some booking needed before follow sets can be computed, and build
# a map of terminal_list -> production
if self.compiled:
return
self.compiled = True
self.head.rules.append((self.first, self))
if not self.prod[0]:
return # epsilon
self.updateEnds()
for t in self.prod:
t.compile(self)
@property
def follow(self):
# Call only after grammar is finished and all followers have been added
if self._follow:
return self._follow
term = self.prod[self.last]
self._follow = term.follow
def __str__(self):
return "{} => {}".format(self.head, ' '.join(map(str, self.prod)))
class Operator(Nonterminal):
def __init__(self, name, *args):
super().__init__(name)
self.args = args
self.name = name
class Repeat(Operator):
def __init__(self, *args):
super().__init__(*args)
self.optype = "Repeat"
class Set(Operator):
def __init__(self, *args):
super().__init__(*args)
self.optype = "Set"
class Optional(Operator):
def __init__(self, *args):
super().__init__(*args)
self.optype = "Optional"
class Emitter(object):
def __init__(self, tree):#, grammar):
self.tree = tree # parse tree made of lists
self.start = None # name of root grammar rule
self.parser = '' # build up parser classes/functions here
self.objs = GOBJ # put named class definitions here
self.tokenmap = {} # maps string to Terminal object
self.namemap = {} # maps string to Nonterminal object
self.operators = [] # maps string name to Operator object
self.lexmap = [] # (name, regex) pairs for lexer
self.itercount = count(0) # need unique number for anonymous variables
def count(self):
return next(self.itercount)
def findterms(self, root):
# Find the terminals, nonterminals, and productions in the grammar
# Requires root grammar nonterminal
nonterms = []
prods = []
terms = []
stack = [root, ]
seen = set()
while stack:
term = stack.pop(0)
if term in seen:
continue
seen.add(term)
nonterms.append(term)
for prod in term.productions:
prods.append(prod)
for item in prod.prod:
if isinstance(item, Nonterminal):
stack.append(item)
elif isinstance(item, Terminal):
terms.append(item)
self.nonterminals = nonterms
#self.productions = prods
#self.terminals = terms
def emit(self):
#print("emit", self.tree)
tree = self.tree
rootdecl = tree[0]
toktree = tree[1]
grammars = tree[2]
assert(rootdecl[0] == "%root")
assert(toktree[0] == "%tokens")
assert(grammars[0] == "%grammar")
root = rootdecl[1]
self.gettokens(toktree)
tree = grammars[1]
self.start = tree.decl.dname # save root rule for parsing
prods = []
while tree:
prods += self.emitdecl(tree.decl)
tree = tree.rest
# prods += self.operators
for op in self.operators:
prods.append(op)
if op[0] in self.namemap and isinstance(self.namemap[op[0]], Optional):
prods.append((op[0], [None,], op[2]))
# Now that all terminal and nonterminals seen and created, instantiate productions
allmap = self.namemap.copy()
allmap.update(self.tokenmap)
allmap[None] = None # for epsilon terms
#print('\n'.join(map(str,prods)))
for name, args, binding in prods:
# print("Generating production for", name, "with", args)
# print(self.namemap)
# for i, term in enumerate(args):
# if isinstance(term, Operator):
# nt = Nonterminal("anonymousNonterminal{}_{}".format(self.count())
# self.namemap[nt.name] = nt
# self.allmap[nt.name] = nt
# p = Production(self.namemap[term.arg[0]], *[allmap[x] for x in term.args[1:]])
x = Production(self.namemap[name], *[allmap[x] for x in args])
if binding:
s = ''
s += "class {}(GrammarObj):\n".format(binding[0])
s += " def __init__(self, *args):\n"
s += " super().__init__(*args)\n"
s += " self.name = \"{}\"\n".format(binding[0])
# for i,b in enumerate(binding[1:]):
# s += " self.{} = args[{}]\n".format(b, i)
self.objs += s + '\n'
x.pclass = binding
else:
x.pclass = None
# Emit lexer map
self.objs += "LEXMAP = {}\n\n".format(self.lexmap).replace('\\\\','\\')
# Find all the nonterminals and make a parse function for each
root = self.namemap[self.start]
root.compile()
self.findterms(self.namemap[self.start])
for nt in self.nonterminals:
self.emitfunc(nt)
s = ''
s += " def _parseRoot(self):\n"
s += " return self.parse{}()\n\n".format(root)
self.parser += s
def emitdecl(self, decl):
name = decl.dname
alt = decl.alt
alts = decl.alts
if any([x(name) for x in self.tokenmap.values()]): # see if name is token
pass
elif name not in self.namemap: # otherwise, it's a nonterminal
self.namemap[name] = Nonterminal(name)
#print(name, alt)
prods = [self.emitalt(name, alt),]
while alts:
prods.append(self.emitalt(name, alts.alt))
alts = alts.alts
return prods
def emitalt(self, name, alt):
if not alt: # epsilon
binding = None
exp = None
else:
exp = alt.exp
exps = alt.exps
binding = alt.bind
if binding:
bnames = self.getbinding(binding)
else:
bnames = None
#print (name, term)
if not exp:
return (name, [None,], bnames)
args = [self.emitexp(exp),] + self.emitexps(exps)
return (name, args, bnames)
def emitexps(self, exps):
ret = []
while exps:
ret.append(self.emitexp(exps.exp))
exps = exps.exps
return ret
def emitexp(self, exp):
if exp.name == "exprepeat":
exps = self.emitexps(exp.exps)
name = "_anon_{}{}".format("Repeat", self.count())
res = name
self.namemap[name] = Repeat(name, *exps)
self.operators.append((name, exps, None))
# self.operators[res.name] = res # track that this nonterminal is a special operator
elif exp.name == "expset":
exps = self.emitexps(exp.exps)
name = "_anon_{}{}".format("Set", self.count())
res = name
self.namemap[name] = Set(name, *exps)
self.operators.append((name, exps, None))
# self.operators[res.name] = Set(name, exps)
elif exp.name == "eopt":
exps = self.emitexps(exp.exps)
name = "_anon_{}{}".format("Optional", self.count())
res = name
self.namemap[name] = Optional(name, *exps)
self.operators.append((name, exps, None))
# self.operators[res.name] = Optional(name, exps)
else: # expression was just a term
res = self.emitterm(exp)
self.register((res,))
return res
def register(self, args):
# generate Terminals and Nonterminals for strings we haven't seen yet
for arg in args:
# print("Argument: ", arg)
if arg[0] in ("'", "\""):
val = arg[1:-1]
# print(arg, val)
if any([x(val) for x in self.tokenmap.values()]):
continue
else:
self.tokenmap[val] = Terminal(val, val)
self.tokenmap[arg] = Terminal(val, val)
self.lexmap.append((val, val))
else: # name
if any([x(arg) for x in self.tokenmap.values()]): # see if name is token
continue
if arg not in self.namemap: # otherwise, it's a nonterminal
self.namemap[arg] = Nonterminal(arg)
def emitterm(self, term):
obj = term.val
return obj
def getbinding(self, binding):
name = binding.bname
if binding.names:
names = self.getnames(binding.names)
else:
names = []
return [name, ] + names
def getnames(self, names):
if not names:
return []
name = names.termname
names = names.names
if names:
return [name,] + self.getnames(names)
else:
return [name, ]
def gettokens(self, toktree):
pairs = toktree[1]
while pairs:
name = pairs[0]
regex = pairs[1]
pairs = pairs[2]
regex = regex.strip()[1:-1]
self.tokenmap[name] = Terminal(name, regex)
self.lexmap.append((name, regex))
# print("Regex: ", name, regex)
# def emitoperator(self, op):
# print(op, op.rules, type(op))
# print([type(arg) for arg in op.args[0]])
# if isinstance(op, Repeat):
# s = ''
# for arg in op.args:
# if isinstance(arg, Nonterminal):
# s += " var{}_{} = self.{}()\n".format(i, cleanname, fname(cleanname))
# else:
# s += " var{}_{} = self.consume(\"{}\")\n".format(i, cleanname, term.name)
# elif isinstance(op, Optional):
# pass
# elif isinstance(op, Set):
# pass
def emitfunc(self, nonterm):
s = ''
s += " def {}(self):\n".format(fname(nonterm.name))
s += " if self.log:\n"
s += " print(\"{}\")\n".format(nonterm.name)
epsilon = False
alltoks = set()
# if isinstance(nonterm, Operator):
# self.parser += self.emitoperator(nonterm)
# return
if len(nonterm.rules) == 0:
raise Exception("No definition for nonterminal {}".format(nonterm))
for rule in nonterm.rules:
epsilon |= None in rule[0]
firsts = rule[0].difference(set((None,)))
tokset = "(\"{}\",)".format('\", \"'.join([tok.name for tok in firsts]))
alltoks.update(firsts) # get all tokens together for error case
production = rule[1]
variables = []
if not production.prod[0]:
continue
if isinstance(nonterm, Set):
return self.emitset(s, nonterm)
else:
s += " if self.next() in {}:\n".format(tokset)
if isinstance(nonterm, Repeat):
if len(production.prod) > 1:
raise Exception("Repeat operator can only be applied to a single terminal or nonterminal")
term = production.prod[0]
cleanname = sanitize(term.name)
variables.append("var{}_{}".format(0, cleanname))
s += " var0_{} = []\n".format(cleanname)
s += " while self.next() in {}:\n".format(tokset)
if isinstance(term, Nonterminal):
s += " var0_{}.append(self.{}())\n".format(cleanname, fname(cleanname))
else:
s += " var0_{}.append(self.consume(\"{}\"))\n".format(cleanname, term.name)
epsilon = True # repeats can be taken 0 times; equivalent to an episilon production
else:
for i,term in enumerate(production.prod):
cleanname = sanitize(term.name)
variables.append("var{}_{}".format(i, cleanname))
if isinstance(term, Nonterminal):
s += " var{}_{} = self.{}()\n".format(i, cleanname, fname(cleanname))
else:
s += " var{}_{} = self.consume(\"{}\")\n".format(i, cleanname, term.name)
# print(production, dir(production))
if production.pclass:
binding = production.pclass
if binding[0] == "_": # suppress this production
s += " return # production suppressed\n"
continue
attrs = zip(binding[1:], variables)
sargs = ''
for name, variable in attrs:
sargs += "('{}', {}),".format(name, variable)
s += " return {}({})\n".format(production.pclass[0], sargs)
else:
s += " return {}\n".format(', '.join(variables))
if epsilon:
s += " return # epsilon case\n\n"
else: # error case
# if isinstance(nonterm, Optional):
# s += " return # this production declared as optional\n\n"
# else:
alltokset = "(\"{}\",)".format('\", \"'.join([tok.name for tok in alltoks]))
s += " self.parsefail({}, self.next())\n\n".format(alltokset)
self.parser += s
def emitset(self, s, nonterm):
alltoks = set()
for term in nonterm.args:
if term in self.tokenmap:
term = self.tokenmap[term]
else:
term = self.namemap[term]
firsts = term.first
tokset = "(\"{}\",)".format('\", \"'.join([tok.name for tok in firsts]))
alltoks.update(firsts)
s += " if self.next() in {}:\n".format(tokset)
if isinstance(term, Nonterminal):
s += " return self.{}()\n".format(fname(sanitize(term.name)))
else:
s += " return self.consume(\"{}\")\n".format(term.name)
alltokset = "(\"{}\",)".format('\", \"'.join([tok.name for tok in alltoks]))
s += " self.parsefail({}, self.next())\n\n".format(alltokset)
self.parser += s
return
def sanitize(name):
return re.sub('[^0-9a-zA-Z_]', '', name)
def fname(name):
return "parse{}".format(name)
```
#### File: jemcmahan13/pygll/pygll.py
```python
from stock import PCLASS, MAIN
import re, sys
from emitter import Emitter
class GrammarObj(object):
def __str__(self):
return self.name
def __repr__(self):
s = self.name + '['
for k,v in self.attrs:
s += "{}:{}, ".format(k,v)
s += ']'
return s
def __init__(self, *args):
i = 0
for k,v in args:
if k == "_":
setattr(self, "_anon{{}}".format(i), v)
i += 1
else:
setattr(self, k, v)
self.attrs = args
class Declarations(GrammarObj):
def __init__(self, *args):
super().__init__(*args)
self.name = "Declarations"
class Declaration(GrammarObj):
def __init__(self, *args):
super().__init__(*args)
self.name = "Declaration"
class Alternative(GrammarObj):
def __init__(self, *args):
super().__init__(*args)
self.name = "Alternative"
class _(GrammarObj):
def __init__(self, *args):
super().__init__(*args)
self.name = "_"
class Alternatives(GrammarObj):
def __init__(self, *args):
super().__init__(*args)
self.name = "Alternatives"
class exprepeat(GrammarObj):
def __init__(self, *args):
super().__init__(*args)
self.name = "exprepeat"
class expset(GrammarObj):
def __init__(self, *args):
super().__init__(*args)
self.name = "expset"
class eopt(GrammarObj):
def __init__(self, *args):
super().__init__(*args)
self.name = "eopt"
class Expr(GrammarObj):
def __init__(self, *args):
super().__init__(*args)
self.name = "Expr"
class Name(GrammarObj):
def __init__(self, *args):
super().__init__(*args)
self.name = "Name"
class String(GrammarObj):
def __init__(self, *args):
super().__init__(*args)
self.name = "String"
class Terms(GrammarObj):
def __init__(self, *args):
super().__init__(*args)
self.name = "Terms"
class Binding(GrammarObj):
def __init__(self, *args):
super().__init__(*args)
self.name = "Binding"
class Names(GrammarObj):
def __init__(self, *args):
super().__init__(*args)
self.name = "Names"
LEXMAP = [('pound', '#'), ('bar', '\|'), ('epsilon', '\$'), ('lrepeat', '\['), ('rrepeat', '\]'), ('lset', '{'), ('rset', '}'), ('lopt', '<'), ('ropt', '>'), ('string', '(\\\'|\\").*?[^\\\\]\\1'), ('name', '\w+'), ('%root', '%root'), ('%tokens', '%tokens'), ('%grammar', '%grammar'), (':=', ':='), (';', ';')]
class Parser(object):
class ScanError(Exception):
pass
class ParseError(Exception):
pass
def parsefail(self, expected, found, val=None):
raise Parser.ParseError("Parse Error, line {}: Expected token {}, but found token {}:{}".format(self.line, expected, found, val))
def scanfail(self):
raise Parser.ScanError("Lexer Error, line {}: No matching token found. Remaining input: {} ....".format(self.line, self.remaining[:50]))
def __init__(self):
lex = [('whitespace', '\s+'),] + [ x for x in LEXMAP ]
rules = [ (regex, self.makeHandler(tokenName)) for tokenName, regex in lex ]
self.scanner = re.Scanner(rules)
self.line = 1
self.log = False
def parse(self, s):
self.toks, self.remaining = self.scanner.scan(s)
self.trim()
return self._parseRoot()
def makeHandler(self, token):
return lambda scanner, string : (token, string)
def trim(self):
if self.toks:
token, match = self.toks[0]
if token == "whitespace":
self.line += match.count('\n')
self.toks.pop(0)
self.trim()
def next(self):
if self.toks:
token,match = self.toks[0]
return token
elif self.remaining:
self.scanfail()
def consume(self, tok):
if not self.toks and self.remaining:
self.scanfail()
if len(self.toks) == 0:
self.parsefail(tok, 'EOF')
token,match = self.toks.pop(0)
if self.log:
print("consuming {}:{}".format(tok, match))
if tok != token:
self.parsefail(tok, token, match)
self.trim()
return match
def pop(self):
return self.consume(self.next())
def parseSpec(self):
if self.next() in ("%root",):
var0_RootDecl = self.parseRootDecl()
var1_TokenDecl = self.parseTokenDecl()
var2_GrammarDecl = self.parseGrammarDecl()
return var0_RootDecl, var1_TokenDecl, var2_GrammarDecl
self.parsefail(['("%root",)'], self.next())
def parseRootDecl(self):
if self.next() in ("%root",):
var0_root = self.consume("%root")
var1_name = self.consume("name")
return var0_root, var1_name
self.parsefail(['("%root",)'], self.next())
def parseTokenDecl(self):
if self.next() in ("%tokens",):
var0_tokens = self.consume("%tokens")
var1_TokenPairs = self.parseTokenPairs()
return var0_tokens, var1_TokenPairs
self.parsefail(['("%tokens",)'], self.next())
def parseGrammarDecl(self):
if self.next() in ("%grammar",):
var0_grammar = self.consume("%grammar")
var1_Decls = self.parseDecls()
return var0_grammar, var1_Decls
self.parsefail(['("%grammar",)'], self.next())
def parseTokenPairs(self):
if self.next() in ("name",):
var0_name = self.consume("name")
var1_string = self.consume("string")
var2_TokenPairs = self.parseTokenPairs()
return var0_name, var1_string, var2_TokenPairs
return # epsilon case
def parseDecls(self):
if self.next() in ("name",):
var0_Decl = self.parseDecl()
var1_Decls = self.parseDecls()
return Declarations(('decl', var0_Decl),('rest', var1_Decls),)
return # epsilon case
def parseDecl(self):
if self.next() in ("name",):
var0_name = self.consume("name")
var1_ = self.consume(":=")
var2_Alt = self.parseAlt()
var3_Alts = self.parseAlts()
var4_ = self.consume(";")
return Declaration(('dname', var0_name),('_', var1_),('alt', var2_Alt),('alts', var3_Alts),('_', var4_),)
self.parsefail(['("name",)'], self.next())
def parseAlt(self):
if self.next() in ("string", "lopt", "lrepeat", "lset", "name",):
var0_Exp = self.parseExp()
var1_Exps = self.parseExps()
var2_Binding = self.parseBinding()
return Alternative(('exp', var0_Exp),('exps', var1_Exps),('bind', var2_Binding),)
if self.next() in ("epsilon",):
var0_epsilon = self.consume("epsilon")
return # production suppressed
self.parsefail(['("string", "lopt", "lrepeat", "lset", "name",)', '("epsilon",)'], self.next())
def parseAlts(self):
if self.next() in ("bar",):
var0_bar = self.consume("bar")
var1_Alt = self.parseAlt()
var2_Alts = self.parseAlts()
return Alternatives(('_', var0_bar),('alt', var1_Alt),('alts', var2_Alts),)
return # epsilon case
def parseExp(self):
if self.next() in ("lrepeat",):
var0_lrepeat = self.consume("lrepeat")
var1_Exps = self.parseExps()
var2_rrepeat = self.consume("rrepeat")
return exprepeat(('_', var0_lrepeat),('exps', var1_Exps),('_', var2_rrepeat),)
if self.next() in ("lset",):
var0_lset = self.consume("lset")
var1_Exps = self.parseExps()
var2_rset = self.consume("rset")
return expset(('_', var0_lset),('exps', var1_Exps),('_', var2_rset),)
if self.next() in ("lopt",):
var0_lopt = self.consume("lopt")
var1_Exps = self.parseExps()
var2_ropt = self.consume("ropt")
return eopt(('_', var0_lopt),('exps', var1_Exps),('_', var2_ropt),)
if self.next() in ("string", "name",):
var0_Term = self.parseTerm()
return var0_Term
self.parsefail(['("lrepeat",)', '("lset",)', '("lopt",)', '("string", "name",)'], self.next())
def parseExps(self):
if self.next() in ("string", "lopt", "lrepeat", "lset", "name",):
var0_Exp = self.parseExp()
var1_Exps = self.parseExps()
return Expr(('exp', var0_Exp),('exps', var1_Exps),)
return # epsilon case
def parseBinding(self):
if self.next() in ("pound",):
var0_pound = self.consume("pound")
var1_name = self.consume("name")
var2_Names = self.parseNames()
return Binding(('_', var0_pound),('bname', var1_name),('names', var2_Names),)
return # epsilon case
def parseTerm(self):
if self.next() in ("name",):
var0_name = self.consume("name")
return Name(('val', var0_name),)
if self.next() in ("string",):
var0_string = self.consume("string")
return String(('val', var0_string),)
self.parsefail(['("name",)', '("string",)'], self.next())
def parseNames(self):
if self.next() in ("name",):
var0_name = self.consume("name")
var1_Names = self.parseNames()
return Names(('termname', var0_name),('names', var1_Names),)
return # epsilon case
def _parseRoot(self):
return self.parseSpec()
def main():
with open(sys.argv[1]) as f:
s = f.read()
p = Parser()
ast = p.parse(s)
e = Emitter(ast)
e.emit()
text = e.objs + PCLASS + e.parser + MAIN
print(text)
if __name__ == "__main__":
main()
```
#### File: tests/simplemath/math.py
```python
class GrammarObj(object):
def __str__(self):
return self.name
def __repr__(self):
s = self.name + '['
for k,v in self.attrs:
s += "{}:{}, ".format(k,v)
s += ']'
return s
def __init__(self, *args):
i = 0
for k,v in args:
if k == "_":
setattr(self, "_anon{{}}".format(i), v)
i += 1
else:
setattr(self, k, v)
self.attrs = args
class Exp(GrammarObj):
def __init__(self, *args):
super().__init__(*args)
self.name = "Exp"
class Plus(GrammarObj):
def __init__(self, *args):
super().__init__(*args)
self.name = "Plus"
class Term(GrammarObj):
def __init__(self, *args):
super().__init__(*args)
self.name = "Term"
class Mult(GrammarObj):
def __init__(self, *args):
super().__init__(*args)
self.name = "Mult"
class Paren(GrammarObj):
def __init__(self, *args):
super().__init__(*args)
self.name = "Paren"
class Num(GrammarObj):
def __init__(self, *args):
super().__init__(*args)
self.name = "Num"
LEXMAP = [('plus', '\\+'), ('mult', '\\*'), ('lparen', '\\('), ('rparen', '\\)'), ('num', '[0-9]+'), ('name', '\\w+')]
import sys, re
log = False
class Parser(object):
class ScanError(Exception):
pass
class ParseError(Exception):
pass
def parsefail(self, expected, found, val=None):
raise Parser.ParseError("Parse Error, line {}: Expected token {}, but found token {}:{}".format(self.line, expected, found, val))
def scanfail(self):
raise Parser.ScanError("Lexer Error, line {}: No matching token found. Remaining input: {} ....".format(self.line, self.remaining[:50]))
def __init__(self):
lex = [('whitespace', '\s+'),] + [ x for x in LEXMAP ]
rules = [ (regex, self.makeHandler(tokenName)) for tokenName, regex in lex ]
self.scanner = re.Scanner(rules)
self.line = 1
self.log = log
def parse(self, s):
self.toks, self.remaining = self.scanner.scan(s)
self.trim()
return self._parseRoot()
def makeHandler(self, token):
return lambda scanner, string : (token, string)
def trim(self):
if self.toks:
token, match = self.toks[0]
if token == "whitespace":
self.line += match.count('\n')
self.toks.pop(0)
self.trim()
# else:
# if log:
# print("next token is ", token)
def next(self):
if self.toks:
token,match = self.toks[0]
return token
elif self.remaining:
self.scanfail()
def consume(self, tok):
if not self.toks and self.remaining:
self.scanfail()
token,match = self.toks.pop(0)
if self.log:
print("consuming {}:{}".format(tok, match))
if tok != token:
self.parsefail(tok, token, match)
self.trim()
return match
def pop(self):
return self.consume(self.next())
def parseE(self):
if self.next() in ("lparen", "num",):
var0_T = self.parseT()
var1_Ep = self.parseEp()
return Exp(('op', var0_T),('rhs', var1_Ep),)
self.parsefail(['("lparen", "num",)'], self.next())
def parseT(self):
if self.next() in ("lparen", "num",):
var0_F = self.parseF()
var1_Tp = self.parseTp()
return Term(('op', var0_F),('rhs', var1_Tp),)
self.parsefail(['("lparen", "num",)'], self.next())
def parseEp(self):
if self.next() in ("plus",):
var0_plus = self.consume("plus")
var1_T = self.parseT()
var2_Ep = self.parseEp()
return Plus(('_', var0_plus),('op', var1_T),('rest', var2_Ep),)
return # epsilon case
def parseF(self):
if self.next() in ("lparen",):
var0_lparen = self.consume("lparen")
var1_E = self.parseE()
var2_rparen = self.consume("rparen")
return Paren(('_', var0_lparen),('exp', var1_E),('_', var2_rparen),)
if self.next() in ("num",):
var0_num = self.consume("num")
return Num(('val', var0_num),)
self.parsefail(['("lparen",)', '("num",)'], self.next())
def parseTp(self):
if self.next() in ("mult",):
var0_mult = self.consume("mult")
var1_F = self.parseF()
var2_Tp = self.parseTp()
return Mult(('_', var0_mult),('op', var1_F),('rest', var2_Tp),)
return # epsilon case
def _parseRoot(self):
return self.parseE()
def main():
with open(sys.argv[1]) as f:
s = f.read()
p = Parser()
ast = p.parse(s)
print(repr(ast))
if __name__ == "__main__":
main()
```
|
{
"source": "jemc/zproto",
"score": 2
}
|
#### File: python/zproto/ZprotoExample.py
```python
import struct
import uuid
import zmq
import logging
logger = logging.getLogger(__name__)
ZFrame = zmq.Frame
class ZprotoExample(object):
ZPROTO_EXAMPLE_VERSION = 1
LOG = 1
STRUCTURES = 2
BINARY = 3
TYPES = 4
FLAGS_SIZE = 4
def __init__(self, id=None, *args, **kwargs):
# Structure of our class
self.routingId = None # Routing_id from ROUTER, if any
self._id = id # ZprotoExample message ID
self._needle = 0 # Read/write pointer for serialization
self.struct_data = b'' # holds the binary data
self._sequence = 0
self._version = 0
self._level = 0
self._event = 0
self._node = 0
self._peer = 0
self._time = 0
self._host = ""
self._data = ""
self._aliases = []
self._headers = {}
self._flags = b''
self._public_key = b''
self._identifier = uuid.uuid4()
self._address = None # ZTypes are not implemented
#self._address = ZFrame()
self._content = None # ZTypes are not implemented
#self._content = ZMsg()
self._client_forename = ""
self._client_surname = ""
self._client_mobile = ""
self._client_email = ""
self._supplier_forename = ""
self._supplier_surname = ""
self._supplier_mobile = ""
self._supplier_email = ""
# --------------------------------------------------------------------------
# Network data encoding macros
# Put a 1-byte number to the frame
def _put_number1(self, nr):
d = struct.pack('>b', nr)
self.struct_data += d
print(self.struct_data)
# Get a 1-byte number to the frame
# then make it unsigned
def _get_number1(self):
num = struct.unpack_from('>b', self.struct_data, offset=self._needle)
self._needle += struct.calcsize('>b')
return num[0]
# Put a 2-byte number to the frame
def _put_number2(self, nr):
d = struct.pack('>H', nr)
self.struct_data += d
print(self.struct_data)
# Get a 2-byte number from the frame
def _get_number2(self):
num = struct.unpack_from('>H', self.struct_data, offset=self._needle)
self._needle += struct.calcsize('>H')
return num[0]
# Put a 4-byte number to the frame
def _put_number4(self, nr):
d = struct.pack('>I', nr)
self.struct_data += d
print(self.struct_data)
# Get a 4-byte number to the frame
def _get_number4(self):
num = struct.unpack_from('>I', self.struct_data, offset=self._needle)
self._needle += struct.calcsize('>I')
return num[0]
# Put a 8-byte number to the frame
def _put_number8(self, nr):
d = struct.pack('>Q', nr)
self.struct_data += d
print(self.struct_data)
# Get a 8-byte number to the frame
def _get_number8(self):
num = struct.unpack_from('>Q', self.struct_data, offset=self._needle)
self._needle += struct.calcsize('>Q')
return num[0]
# Put a string to the frame
def _put_string(self, s):
self._put_number1(len(s))
d = struct.pack('%is' % len(s), s.encode('UTF-8'))
self.struct_data += d
print(self.struct_data)
# Get a string from the frame
def _get_string(self):
s_len = self._get_number1()
s = struct.unpack_from(str(s_len) + 's', self.struct_data, offset=self._needle)
self._needle += struct.calcsize('s' * s_len)
return s[0].decode('UTF-8')
# Put a long string to the frame
def _put_long_string(self, s):
self._put_number4(len(s))
d = struct.pack('%is' % len(s), s.encode('UTF-8'))
self.struct_data += d
print(self.struct_data)
# Get a long string from the frame
def _get_long_string(self):
s_len = self._get_number4()
s = struct.unpack_from(str(s_len) + 's', self.struct_data, offset=self._needle)
self._needle += struct.calcsize('s' * s_len)
return s[0].decode('UTF-8')
# Put bytes to the frame
def _put_bytes(self, b):
self.struct_data += b
print(self.struct_data)
# Get bytes from the frame
def _get_bytes(self, size):
s = self.struct_data[self._needle:self._needle+size]
self._needle += size
return s
# Skipped recv from zmsg
# Skipped senf to zmsg
# --------------------------------------------------------------------------
# Receive a ZprotoExample from the socket. Returns new object or
# null if error. Will block if there's no message waiting.
def recv (self, insocket):
frames = insocket.recv_multipart()
if insocket.type == zmq.ROUTER:
self.routing_id = frames.pop(0)
self.struct_data = frames.pop(0)
logger.debug("recv data: {0}".format(self.struct_data))
if not self.struct_data:
logger.debug("Malformed msg")
return
# Get and check protocol signature
self._needle = 0
self._ceiling = len(self.struct_data)
# TODO what is zdigest?
signature = self._get_number2()
if signature != (0xAAA0 | 0):
logger.debug("Invalid signature {0}".format(signature))
return
self._id = self._get_number1()
if self._id == ZprotoExample.LOG:
self._sequence = self._get_number2()
self._version = self._get_number2()
if self._version != 3:
logger.debug("Value {0} != {1}".format(self._version, 3))
self._level = self._get_number1()
self._event = self._get_number1()
self._node = self._get_number2()
self._peer = self._get_number2()
self._time = self._get_number8()
self._host = self._get_string()
self._data = self._get_long_string()
elif self._id == ZprotoExample.STRUCTURES:
self._sequence = self._get_number2()
list_size = self._get_number4()
self._aliases = []
for x in range(list_size):
self._aliases.append(self._get_long_string());
hash_size = self._get_number4()
self._headers = {}
for x in range(hash_size):
key = self._get_string()
val = self._get_long_string()
self._headers.update({key: val})
elif self._id == ZprotoExample.BINARY:
self._sequence = self._get_number2()
self._flags = self._get_bytes(4)
self._public_key = self._get_bytes(self.getNumber4());
uuid_bytes = self._get_bytes(16)
self._identifier = uuid.UUID(bytes=uuid_bytes)
if not len(frames):
self._address = b''
logger.warn("no more frames in message while retrieving self._address")
else:
self._address = frames.pop(0)
logger.warn("Zmsg requested but we have no support for Zmsg types")
if not len(frames):
self._content = b''
logger.warn("no more frames in message while retrieving self._content")
else:
self._content = frames.pop(0)
elif self._id == ZprotoExample.TYPES:
self._sequence = self._get_number2()
self._client_forename = self._get_string()
self._client_surname = self._get_string()
self._client_mobile = self._get_string()
self._client_email = self._get_string()
self._supplier_forename = self._get_string()
self._supplier_surname = self._get_string()
self._supplier_mobile = self._get_string()
self._supplier_email = self._get_string()
else:
logger.debug("bad message ID")
# --------------------------------------------------------------------------
# Send the ZprotoExample to the socket, and destroy it
def send(self, outsocket):
if outsocket.socket_type == zmq.ROUTER:
outsocket.send(self.routing_id, zmq.SNDMORE)
# TDOD: We could generalize to a ZMsg class? ZMsg msg = new ZMsg();
self.struct_data = b''
self._needle = 0
nbr_frames = 0 # Total number of extra frames
# add signature
self._put_number2(0xAAA0 | 0)
self._put_number1(self._id)
if self._id == ZprotoExample.LOG:
# sequence is a 2-byte integer
self._put_number2(self._sequence)
# version is a 2-byte integer
self._put_number2(3)
# level is a 1-byte integer
self._put_number1(self._level)
# event is a 1-byte integer
self._put_number1(self._event)
# node is a 2-byte integer
self._put_number2(self._node)
# peer is a 2-byte integer
self._put_number2(self._peer)
# time is a 8-byte integer
self._put_number8(self._time)
if self._host != None:
self._put_string(self._host)
else:
self._put_number1(0) # Empty string
if self._data != None:
self._put_long_string(self._data)
else:
self._put_number4(0) # Empty string
elif self._id == ZprotoExample.STRUCTURES:
# sequence is a 2-byte integer
self._put_number2(self._sequence)
if self._aliases != None:
self._put_number4(len(self._aliases))
for val in self._aliases:
self._put_long_string(val)
else:
self._put_number4(0); # Empty string array
if self._headers != None:
self._put_number4(len(self._headers))
for key, val in self._headers.items():
self._put_string(key)
self._put_long_string(val)
else:
self._put_number4(0) # Empty hash
elif self._id == ZprotoExample.BINARY:
# sequence is a 2-byte integer
self._put_number2(self._sequence)
self._put_bytes(self._flags)
if self._public_key != None:
self._put_number4(len(self._public_key))
self._put_bytes(self._public_key)
else:
self._put_number4(0)
if self._identifier != None:
self._put_bytes(self._identifier.bytes)
else:
self._put_chunk(b'0'*16) # Empty Chunk
nbr_frames += 1
# TODO msg
elif self._id == ZprotoExample.TYPES:
# sequence is a 2-byte integer
self._put_number2(self._sequence)
if self._client_forename != None:
self._put_string(self._client_forename)
else:
self._put_number1(0) # Empty string
if self._client_surname != None:
self._put_string(self._client_surname)
else:
self._put_number1(0) # Empty string
if self._client_mobile != None:
self._put_string(self._client_mobile)
else:
self._put_number1(0) # Empty string
if self._client_email != None:
self._put_string(self._client_email)
else:
self._put_number1(0) # Empty string
if self._supplier_forename != None:
self._put_string(self._supplier_forename)
else:
self._put_number1(0) # Empty string
if self._supplier_surname != None:
self._put_string(self._supplier_surname)
else:
self._put_number1(0) # Empty string
if self._supplier_mobile != None:
self._put_string(self._supplier_mobile)
else:
self._put_number1(0) # Empty string
if self._supplier_email != None:
self._put_string(self._supplier_email)
else:
self._put_number1(0) # Empty string
# Now send the data frame
if nbr_frames:
outsocket.send(self.struct_data, zmq.SNDMORE)
else:
outsocket.send(self.struct_data)
# no more frames so return
return
if self._id == ZprotoExample.BINARY:
opt = zmq.SNDMORE
nbr_frames -= 1
if nbr_frames == 0:
opt = 0
outsocket.send(self._address, opt)
# --------------------------------------------------------------------------
# Print contents of message to stdout
def dump(self):
if self.id == Zprotoexample.LOG:
logger.info("LOG:")
logger.info(" sequence=%d" %sequence)
logger.info(" version=3")
logger.info(" level=%d" %level)
logger.info(" event=%d" %event)
logger.info(" node=%d" %node)
logger.info(" peer=%d" %peer)
logger.info(" time=%d" %time)
if self._host != None:
logger.info(" host='%s'\n" %self._host)
else:
logger.info(" host=")
if self._data != None:
logger.info(" data='%s'\n" %self._data)
else:
logger.info(" data=")
else:
logger.info("(NULL)")
if self.id == Zprotoexample.STRUCTURES:
logger.info("STRUCTURES:")
logger.info(" sequence=%d" %sequence)
logger.info(" aliases={0}".format(self._aliases))
logger.info(" headers={0}".format(self._headers))
else:
logger.info("(NULL)")
if self.id == Zprotoexample.BINARY:
logger.info("BINARY:")
logger.info(" sequence=%d" %sequence)
logger.info(" flags={0}".format(self._flags))
logger.info(" public_key={0}".format(self._public_key))
logger.info(" identifier={0}".format(self._identifier))
logger.info(" address={0}".format(self._address))
logger.info(" content={0}".format(self._content))
else:
logger.info("(NULL)")
if self.id == Zprotoexample.TYPES:
logger.info("TYPES:")
logger.info(" sequence=%d" %sequence)
if self._client_forename != None:
logger.info(" client_forename='%s'\n" %self._client_forename)
else:
logger.info(" client_forename=")
if self._client_surname != None:
logger.info(" client_surname='%s'\n" %self._client_surname)
else:
logger.info(" client_surname=")
if self._client_mobile != None:
logger.info(" client_mobile='%s'\n" %self._client_mobile)
else:
logger.info(" client_mobile=")
if self._client_email != None:
logger.info(" client_email='%s'\n" %self._client_email)
else:
logger.info(" client_email=")
if self._supplier_forename != None:
logger.info(" supplier_forename='%s'\n" %self._supplier_forename)
else:
logger.info(" supplier_forename=")
if self._supplier_surname != None:
logger.info(" supplier_surname='%s'\n" %self._supplier_surname)
else:
logger.info(" supplier_surname=")
if self._supplier_mobile != None:
logger.info(" supplier_mobile='%s'\n" %self._supplier_mobile)
else:
logger.info(" supplier_mobile=")
if self._supplier_email != None:
logger.info(" supplier_email='%s'\n" %self._supplier_email)
else:
logger.info(" supplier_email=")
else:
logger.info("(NULL)")
# --------------------------------------------------------------------------
# Get/set the message routing id
def routing_id(self):
return self._routing_id
def set_routing_id(self, routing_id):
self._routing_id = routing_id
# --------------------------------------------------------------------------
# Get/set the zproto_example id
def id():
return self._id
def set_id(self, id):
self._id = id
# --------------------------------------------------------------------------
# Return a printable command string
def command(self):
if self._id == ZprotoExample.LOG:
return "LOG"
if self._id == ZprotoExample.STRUCTURES:
return "STRUCTURES"
if self._id == ZprotoExample.BINARY:
return "BINARY"
if self._id == ZprotoExample.TYPES:
return "TYPES"
return "?";
# --------------------------------------------------------------------------
# Get/set the sequence field
def sequence(self):
return self._sequence;
def set_sequence(self, sequence):
self._sequence = sequence
# --------------------------------------------------------------------------
# Get/set the level field
def level(self):
return self._level;
def set_level(self, level):
self._level = level
# --------------------------------------------------------------------------
# Get/set the event field
def event(self):
return self._event;
def set_event(self, event):
self._event = event
# --------------------------------------------------------------------------
# Get/set the node field
def node(self):
return self._node;
def set_node(self, node):
self._node = node
# --------------------------------------------------------------------------
# Get/set the peer field
def peer(self):
return self._peer;
def set_peer(self, peer):
self._peer = peer
# --------------------------------------------------------------------------
# Get/set the time field
def time(self):
return self._time;
def set_time(self, time):
self._time = time
# --------------------------------------------------------------------------
# Get/set the host field
def host(self):
return self._host;
def set_host(self, host):
self._host = host
# --------------------------------------------------------------------------
# Get/set the data field
def data(self):
return self._data;
def set_data(self, data):
self._data = data
# --------------------------------------------------------------------------
# Get the aliases field
def aliases(self):
return self._aliases
def get_aliases(self):
return self._aliases
def set_aliases(self, aliases):
self._aliases = aliases
# --------------------------------------------------------------------------
# Get the headers field
def headers(self):
return self._headers
def get_headers(self):
return self._headers
def set_headers(self, headers):
self._headers = headers
# --------------------------------------------------------------------------
# Get/set the flags field
def flags(self):
return self._flags;
def set_flags(self, flags):
self._flags = flags
# --------------------------------------------------------------------------
# Get the public_key field
def public_key(self):
return self._public_key
def get_public_key(self):
return self._public_key
def set_public_key(self, public_key):
self._public_key = public_key
# --------------------------------------------------------------------------
# Get the identifier field
def identifier(self):
return self._identifier
def get_identifier(self):
return self._identifier
def set_identifier(self, identifier):
self._identifier = identifier
# --------------------------------------------------------------------------
# Get the address field
def address(self):
return self._address
def get_address(self):
return self._address
def set_address(self, address):
self._address = address
# --------------------------------------------------------------------------
# Get the content field
def content(self):
return self._content
def get_content(self):
return self._content
def set_content(self, content):
self._content = content
# --------------------------------------------------------------------------
# Get/set the client_forename field
def client_forename(self):
return self._client_forename;
def set_client_forename(self, client_forename):
self._client_forename = client_forename
# --------------------------------------------------------------------------
# Get/set the client_surname field
def client_surname(self):
return self._client_surname;
def set_client_surname(self, client_surname):
self._client_surname = client_surname
# --------------------------------------------------------------------------
# Get/set the client_mobile field
def client_mobile(self):
return self._client_mobile;
def set_client_mobile(self, client_mobile):
self._client_mobile = client_mobile
# --------------------------------------------------------------------------
# Get/set the client_email field
def client_email(self):
return self._client_email;
def set_client_email(self, client_email):
self._client_email = client_email
# --------------------------------------------------------------------------
# Get/set the supplier_forename field
def supplier_forename(self):
return self._supplier_forename;
def set_supplier_forename(self, supplier_forename):
self._supplier_forename = supplier_forename
# --------------------------------------------------------------------------
# Get/set the supplier_surname field
def supplier_surname(self):
return self._supplier_surname;
def set_supplier_surname(self, supplier_surname):
self._supplier_surname = supplier_surname
# --------------------------------------------------------------------------
# Get/set the supplier_mobile field
def supplier_mobile(self):
return self._supplier_mobile;
def set_supplier_mobile(self, supplier_mobile):
self._supplier_mobile = supplier_mobile
# --------------------------------------------------------------------------
# Get/set the supplier_email field
def supplier_email(self):
return self._supplier_email;
def set_supplier_email(self, supplier_email):
self._supplier_email = supplier_email
```
|
{
"source": "jemd15/k-means-taller2-md-bd",
"score": 3
}
|
#### File: jemd15/k-means-taller2-md-bd/calculate_clusters.py
```python
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
# Constant
DATASET1 = "./cars.csv"
LOOPS = 20
MAX_ITERATIONS = 10
INITIALIZE_CLUSTERS = 'k-means++'
CONVERGENCE_TOLERANCE = 0.001
NUM_THREADS = 8
def dataset_to_list_points(dir_dataset):
"""
Read a txt file with a set of points and return a list of objects Point
:param dir_dataset:
"""
points = list()
with open(dir_dataset, 'rt') as reader:
for point in reader:
a = int(np.asarray(point.split(",")[0]))
b = int(np.asarray(point.split(",")[10]))
c = [a,b]
points.append(c)
return points
def plot_results(inertials):
x, y = zip(*[inertia for inertia in inertials])
plt.plot(x, y, 'ro-', markersize=8, lw=2)
plt.grid(True)
plt.xlabel('Num Clusters')
plt.ylabel('Inertia')
plt.show()
def select_clusters(dataset, loops, max_iterations, init_cluster, tolerance, num_threads):
# Read data set
points = dataset_to_list_points(dataset)
inertia_clusters = list()
for i in range(1, loops + 1, 1):
# Object KMeans
kmeans = KMeans(n_clusters=i, max_iter=max_iterations, init=init_cluster, tol=tolerance, n_jobs=num_threads)
# Calculate Kmeans
kmeans.fit(points)
# Obtain inertia
inertia_clusters.append([i, kmeans.inertia_])
plot_results(inertia_clusters)
select_clusters(DATASET1, LOOPS, MAX_ITERATIONS, INITIALIZE_CLUSTERS, CONVERGENCE_TOLERANCE, NUM_THREADS)
```
|
{
"source": "jemdwood/cs234_proj",
"score": 2
}
|
#### File: jemdwood/cs234_proj/kurin_dataflow_old_unbatched.py
```python
import os
import glob
import numpy as np
from tensorpack import RNGDataFlow
from record_breakout import Recorder
import glob
from scipy import misc
import gym
from cv2 import resize
FRAME_HISTORY = 4
GAMMA = 0.99
TRAIN_TEST_SPLIT = 0.8
# Timon will pass me the key
GAME_NAMES = {
'MontezumaRevenge-v0': 'revenge',
'MsPacman-v0': 'mspacman',
'SpaceInvaders-v0': 'spaceinvaders'
}
class Kurin_Reader():
def __init__(self, record_folder, gym_game_name, data_frac=1.0):
self.record_folder = record_folder
self.gym_game_name = gym_game_name
self.kurin_to_gym = self.get_kurin_to_gym_action_map()
self.data_frac = data_frac
def read_eps(self):
eps_numbers = glob.glob(os.path.join(self.record_folder, GAME_NAMES[self.gym_game_name], 'screens', GAME_NAMES[self.gym_game_name], '*'))
eps_numbers = [x.split('/')[-1] for x in eps_numbers]
eps_numbers = eps_numbers[:int(self.data_frac*len(eps_numbers))]
for eps_num in eps_numbers:
full_eps_dict = {} # needs to have 'obs', 'act', 'rew'
full_eps_dict['obs'] = self.read_obs(eps_num)
full_eps_dict['act'], full_eps_dict['rew'] = self.read_act_reward(eps_num)
#print np.prod(full_eps_dict['obs'].shape)
#print full_eps_dict['obs'].dtype
yield full_eps_dict
def read_obs(self, eps_num): # [?, 84, 84, 3]
obs = None
num_screens = len(glob.glob(os.path.join(self.record_folder, GAME_NAMES[self.gym_game_name], 'screens', GAME_NAMES[self.gym_game_name], str(eps_num), '*png')))
screens = [] # list of screens
for i in range(1, num_screens+1): # not 0
image = misc.imread(os.path.join(self.record_folder, GAME_NAMES[self.gym_game_name], 'screens', GAME_NAMES[self.gym_game_name], str(eps_num), str(i)+'.png'))
image = resize(image, dsize = (84, 84))
screens.append(np.expand_dims(image, axis=0))
return np.concatenate(screens, axis=0)
def read_act_reward(self, eps_num): # [[?, actions], [?, rewards]]
acts, rewards = [[], []]
with open(os.path.join(self.record_folder, GAME_NAMES[self.gym_game_name], 'trajectories', GAME_NAMES[self.gym_game_name], str(eps_num)+'.txt'), 'r') as f:
f.readline() # ignoring headers
f.readline() # ignoring headers
f.readline() # ignoring headers
for line in f:
line = line.strip().split(',') # [frame,reward,score,terminal, action]
line = [x.strip() for x in line]
rewards.append(float(line[1]))
acts.append(float(line[4]))
acts.append(self.kurin_to_gym[int(line[4])])
return np.asarray(acts), np.asarray(rewards)
def get_kurin_to_gym_action_map(self):
kurin_to_gym = {} # keys are action numbers in Kurin. Values are action numbers in OpenAI Gym. This is Game Specific!
# list of action meanings in Kurin
kurin_action_meanings = ['NOOP', 'FIRE','UP','RIGHT','LEFT','DOWN','UPRIGHT','UPLEFT',
'DOWNRIGHT','DOWNLEFT','UPFIRE','RIGHTFIRE','LEFTFIRE','DOWNFIRE',
'UPRIGHTFIRE','UPLEFTFIRE','DOWNRIGHTFIRE','DOWNLEFTFIRE']
# list of action meanings for given game in Gym
env = gym.make(self.gym_game_name)
gym_action_meanings = env.unwrapped.get_action_meanings()
for i in range(len(kurin_action_meanings)):
try:
ind = gym_action_meanings.index(kurin_action_meanings[i])
kurin_to_gym[i] = ind
except ValueError:
kurin_to_gym[i] = gym_action_meanings.index('NOOP') # NOOP in gym
return kurin_to_gym
class KurinDataFlow(RNGDataFlow):
"""
Produces [state, action, reward] of human demonstrations,
state is 84x84x12 in the range [0,255], action is an int.
"""
def __init__(self, mode, record_folder=None, gym_game_name=None, data_frac=1.0):
"""
Args:
train_or_test (str): either 'train' or 'test'
shuffle (bool): shuffle the dataset
"""
if record_folder is None:
record_folder = '/Users/kalpit/Desktop/CS234/cs234_proj/spaceinvaders'
if gym_game_name is None:
gym_game_name = 'spaceinvaders'
assert mode in ['train', 'test', 'all']
self.mode = mode
self.shuffle = mode in ['train', 'all']
states = []
actions = []
rewards = []
scores = []
rec = Kurin_Reader(record_folder=record_folder, gym_game_name=gym_game_name, data_frac=data_frac)
eps_counter = 0
for eps in rec.read_eps():
s = eps['obs']
a = eps['act']
r = eps['rew']
# process states
s = np.pad(s, ((FRAME_HISTORY-1,FRAME_HISTORY), (0,0), (0,0), (0,0)), 'constant')
s = np.concatenate([s[i:-(FRAME_HISTORY-i)] for i in range(FRAME_HISTORY)], axis=-1)
s = s[:-(FRAME_HISTORY-1)]
states.append(s)
# actions
actions.append(a)
# human score
scores.append(np.sum(r))
# process rewards just like in tensorpack
R = 0
r = np.sign(r)
for idx in range(len(r)):
R = r[idx] + GAMMA * R
r[idx] = R
rewards.append(r)
eps_counter += 1
print('eps_counter: %d' % eps_counter)
self.avg_human_score = np.mean(scores)
self.num_episodes = eps_counter
self.states = np.concatenate(states, axis=0)
self.actions = np.concatenate(actions, axis=0)
self.rewards = np.concatenate(rewards, axis=0)
num = self.size()
if mode != 'all':
idxs = list(range(self.size()))
# shuffle the same way every time
np.random.seed(1)
np.random.shuffle(idxs)
self.states = self.states[idxs]
self.actions = self.actions[idxs]
self.rewards = self.rewards[idxs]
if mode == 'train':
self.states = self.states[:int(TRAIN_TEST_SPLIT*num)]
self.actions = self.actions[:int(TRAIN_TEST_SPLIT*num)]
self.rewards = self.rewards[:int(TRAIN_TEST_SPLIT*num)]
elif mode == 'test':
self.states = self.states[int(TRAIN_TEST_SPLIT*num):]
self.actions = self.actions[int(TRAIN_TEST_SPLIT*num):]
self.rewards = self.rewards[int(TRAIN_TEST_SPLIT*num):]
def size(self):
return self.states.shape[0]
def get_data(self):
idxs = list(range(self.size()))
if self.shuffle:
self.rng.shuffle(idxs)
for k in idxs:
state = self.states[k]
action = self.actions[k]
reward = self.rewards[k]
yield [state, action, reward]
if __name__=='__main__':
gym_game_name = 'SpaceInvaders-v0'
data_frac = 1.0
##def KurinDataFlow(mode, record_folder=None, game_name=None, read_from_npy=True)
#rdf = KurinDataFlow('train', 9, record_folder='/Users/kalpit/Desktop/CS234/cs234_proj/mspacman', game_name='mspacman')
rdf = KurinDataFlow('train', gym_game_name=gym_game_name, data_frac=data_frac)
```
|
{
"source": "jemejones/django-movies",
"score": 3
}
|
#### File: django-movies/movies_app/models.py
```python
from django.db import models
from django.urls import reverse
# Create your models here.
class Person(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
slug = models.SlugField(max_length=50)
birthday = models.DateField()
class Meta:
abstract = True
def __str__(self):
return '{} {}'.format(self.first_name, self.last_name)
class Actor(Person):
def get_absolute_url(self):
return reverse('actor-detail', kwargs={'pk': self.pk})
class Director(Person):
def get_absolute_url(self):
return reverse('director-detail', kwargs={'pk': self.pk})
class Movie(models.Model):
name = models.CharField(max_length=250)
year = models.IntegerField()
RATINGS = [
('G', 'G - General Audiences'),
('PG', 'PG - Parental Guidance Suggested'),
('PG-13', 'PG-13 - Parents Strongly Cautioned'),
('R', 'R - Restricted'),
('NC-17', 'NC-17 - No One Under 17 Admitted'),
]
rating = models.CharField(max_length=7, choices=RATINGS)
director = models.ForeignKey(Director, related_name='movies')
actors = models.ManyToManyField(Actor, related_name='movies')
def __str__(self):
return '{} ({})'.format(self.name, self.year)
def get_absolute_url(self):
return reverse('movie-detail', kwargs={'pk': self.pk})
```
|
{
"source": "jemejones/super-waffle",
"score": 4
}
|
#### File: super-waffle/001_sum/run_001_sum.py
```python
def calculate_sum(number_list):
"""return the calculated sum of a list of numbers
Arguments:
number_list -- a list of numbers (ints, floats)
"""
return 0
```
|
{
"source": "jemenake/LogicProjectTools",
"score": 3
}
|
#### File: jemenake/LogicProjectTools/folderpolicy.py
```python
import os
import os.path
import re
import fnmatch
import time
POLICYFILE_NAME = ".folderpolicy"
POLICY_REGEXP = "^(DENY|ACCEPT) +(.*)$"
policy_matcher = re.compile(POLICY_REGEXP)
# How many seconds to wait before doing the next folder
THROTTLING = 0.03
violations = list()
def read_policy(policyfile_path):
policy = list()
with open(policyfile_path, "rt") as f:
for line in f.readlines():
line = line.rstrip()
if policy_matcher.match(line) is not None:
policy.append(line)
return policy
def meets_policy(pathname, policy):
basename = os.path.basename(pathname)
for policy_item in policy:
m = policy_matcher.match(policy_item)
if m is not None:
action = m.group(1)
wildcard = m.group(2)
if fnmatch.fnmatch(basename, wildcard):
# We found a matching line
if action == "DENY":
return False
if action == "ACCEPT":
return True
else:
print("Unknown action: {0}".format(action))
return True
def walk_folder(folder, policy=[]):
time.sleep(THROTTLING)
print("Trying {0}".format(folder))
policyfile_path = os.path.join(folder, POLICYFILE_NAME)
if os.path.isfile(policyfile_path):
policy = read_policy(policyfile_path)
try:
for name in os.listdir(folder):
fullpath = os.path.join(folder, name)
if os.path.islink(fullpath):
next
if not meets_policy(fullpath, policy):
violations.append("{0} is not allowed in {1}".format(name, folder))
continue
# If we made it this far, it meets the policy. If it is a folder, go into it.
if os.path.isdir(fullpath):
walk_folder(fullpath, policy)
except OSError as e:
# Probably a permission problem
pass
SENDER = "<EMAIL>"
### Mail-sending stuff
def send_report_to(recipient):
# Import smtplib for the actual sending function
import smtplib
# Import the email modules we'll need
from email.mime.text import MIMEText
msg = MIMEText("\n".join(violations))
# me == the sender's email address
# you == the recipient's email address
msg['Subject'] = "You have some files in places they shouldn't be"
msg['From'] =SENDER
msg['To'] = recipient
s = smtplib.SMTP('localhost')
s.sendmail(SENDER, [recipient], msg.as_string())
s.quit()
walk_folder("/Users/jonanderson")
for line in violations:
print line
```
#### File: jemenake/LogicProjectTools/LogicProjectMonitor.py
```python
from os import walk
import os.path
import notify # For notifying the user (email, log file, etc...)
TARGET_FOLDERS = '/Users/jemenake',
IGNORE_FOLDERS = '/Users/jemenake/Music/GarageBand', "/Users/jemenake/all_hashed_audio"
#TARGET_FOLDERS = ('/Users', '/Volumes/Old Macintosh HD', '/Volumes/Iomega HDD', '/Volumes/Logic Project Archives')
LOGIC9_TARGET_EXT = ".logic"
LOGICX_TARGET_EXT = ".logicx"
AUDIO_FOLDER = "Audio Files"
AUDIO_FILE_PREFIXES = "KURZ", "MOTIF", "VOICE", "YAMAHA", "KORG", "CASIO", "AUDIO", "TRACK"
# Should we look through the .logic/.logicx project files to find out what audio files they expect to see?
INSPECT_LOGIC_FILES = False
logic9_projects = list()
logicx_projects = list()
logic_projects = list()
audio_folders = list()
audio_files = list()
#
# Scans a .logic project for all of the audio files it references
#
# Takes: full pathname to a "*.logic" project folder
# Returns: list of full pathnames to audio files
def get_linked_logic9_audio_filenames(project_path):
pathnames = list()
data_pathname = project_path + "/" + "LgDoc" + "/documentData"
# print "Opening " + data_pathname
try:
with open(data_pathname, "r") as myfile:
data = myfile.read()
offset = 0
while True:
offset = data.find("LFUA", offset)
if offset == -1:
break
filename_start = offset + 10
filename_end = data.find("\0", filename_start)
path_start = offset + 138
path_end = data.find("\0", path_start)
filename = data[filename_start:filename_end]
path = data[path_start:path_end]
pathname = path + "/" + filename
pathnames.append(pathname)
offset = path_end
except:
pass
# print "Problem reading " + data_pathname
return pathnames
##########################################
##########################################
# Scan all target folders, finding all .logic and .logicx folders and save what you find in global vars
#
def scan_target_folders(folders):
for folder in folders:
# print "Searching for intact Logic projects in " + folder
# print
for (dirpath, dirnames, filenames) in walk(folder):
for ignore_folder in IGNORE_FOLDERS:
if dirpath[:len(ignore_folder)] == ignore_folder:
# print "Ignoring " + dirpath
break
else:
logic9_projects.extend([dirpath + "/" + a for a in dirnames if a[-len(LOGIC9_TARGET_EXT):] == LOGIC9_TARGET_EXT])
logicx_projects.extend([dirpath + "/" + a for a in dirnames if a[-len(LOGICX_TARGET_EXT):] == LOGICX_TARGET_EXT])
audio_folders.extend([dirpath + "/" + a for a in dirnames if a == AUDIO_FOLDER])
audio_files.extend([dirpath + "/" + a for a in filenames if os.path.basename(a).upper().startswith(AUDIO_FILE_PREFIXES) and a.lower().endswith(".aif")])
logic_projects.extend(logic9_projects)
logic_projects.extend(logicx_projects)
return
# candidate_folders = sorted(candidate_folders, key=lambda x: len(x), reverse=True)
#
# found_folders = dict()
#
# while len(candidate_folders) > 0:
# candidate = candidate_folders[0]
# # Remove the candidate from the list
# candidate_folders = candidate_folders[1:]
#
# # Is this a "*.logic" folder
# if candidate[-len(LOGIC9_TARGET_EXT):] == LOGIC9_TARGET_EXT:
# parent_path = os.path.dirname(candidate)
# parent_name = os.path.basename(parent_path)
# candidate_without_ext = os.path.basename(candidate)[:-len(LOGIC9_TARGET_EXT)]
#
# # Remove any other projects whose parent folders also hold this
# candidate_folders = [a for a in candidate_folders if not parent_path.startswith(os.path.dirname(a))]
#
# # Are there any other "*.logic" folders in the same folder we're in?
# other_folders = [a for a in candidate_folders if a.startswith(parent_path) and a != parent_path and a[-len(LOGIC9_TARGET_EXT):] == LOGIC9_TARGET_EXT]
# if len(other_folders) == 0:
# # Is the folder named the same thing as we are?
# if parent_name == candidate_without_ext:
# # Is there an "Audio Files" folder in here?
# if os.path.isdir(parent_path + "/Audio Files"):
# found_folders[candidate] = pathsize(parent_path)
# print "0:FOUND:" + candidate
# else:
# print "1:NOAUDIOFILES:" + candidate
# else:
# print "2:MISMATCHEDPARENT" + candidate
#
# else:
# # There are other .logic folders in here. Remove all of them from candidates (but don't remove the *parent*, itself)
# candidate_folders = [ a for a in candidate_folders if not a.startswith(parent_path) and a != parent_path]
#
# # print
# for name in [ a for a in sorted(found_folders.keys(), key=lambda x: found_folders[x], reverse=True) ]:
# print str(found_folders[name]) + ":" + name
##########################################
##########################################
def pathsize(path):
size = 0
for (dirpath, dirnames, filenames) in walk(path):
for filename in filenames:
size += + os.lstat(dirpath + "/" + filename).st_size
return size
#########################################
#########################################
def run_checks():
problems = []
from collections import Counter
# Check to see if a .logic project has other .logic projects in the same folder
project_counts = Counter([os.path.dirname(a) for a in logic_projects])
for k in project_counts.keys():
if project_counts[k] != 1:
problems.append(k + " has " + str(project_counts[k]) + " logic projects in it")
# Check to see if any audio files aren't in Audio Files folders
folders_with_audio_files = set([os.path.dirname(a) for a in audio_files])
for foldername in folders_with_audio_files:
if not foldername in audio_folders:
problems.append(foldername + " contains audio files, but it's not an Audio Files folder")
# Check to see that all Audio Files folders are in a folder with ONE .logic project
for parent_foldername in [os.path.dirname(a) for a in audio_folders]:
if parent_foldername in project_counts.keys():
if project_counts[parent_foldername] != 1:
problems.append(parent_foldername + " has an Audio Files folder, and MULTIPLE Logic projects")
else:
# There were ZERO Logic projects with this folder
problems.append(parent_foldername + " has an Audio Files folder, but no Logic project")
if INSPECT_LOGIC_FILES:
# Check to make sure that all linked audio files exist
for project in logic_projects:
missing_audio = list()
project_audio_files = get_linked_logic9_audio_filenames(project)
for audio_file in project_audio_files:
if not os.path.isfile(audio_file):
missing_audio.append(audio_file)
if(len(missing_audio) > 0):
problems.append(project + " is missing the following audio files:")
for a in missing_audio:
problems.append(" " + a)
# Check to make sure that all linked audio files are in their assigned "Audio Files" folder
for project in logic_projects:
wayward_audio = list()
project_audio_files = get_linked_logic9_audio_filenames(project)
for audio_file in project_audio_files:
if os.path.dirname(project) + "/Audio Files" != os.path.dirname(audio_file):
wayward_audio.append(audio_file)
if(len(wayward_audio) > 0):
problems.append(project + " references a wayward audio file:")
for a in wayward_audio:
problems.append(" " + a)
# Check for any audio in Audio Files that isn't referenced in the project
pass
return problems
#########################################
#########################################
def main():
scan_target_folders(TARGET_FOLDERS)
problems = run_checks()
if len(problems) > 0:
# We found some problems
notify.notify("\n".join(problems))
if __name__ == "__main__":
main()
```
|
{
"source": "Jemeni11/Fic-Retriever",
"score": 3
}
|
#### File: Fic-Retriever/embed_messages/SH_Embed.py
```python
from discord import Embed
from scrapers.scribblehub import ScribbleHub
import re
from dateutil import tz
from datetime import datetime
now = datetime.now(tz=tz.tzlocal())
def ScribbleHubEmbed(URL: str):
SHinstance = ScribbleHub(URL)
try:
if re.search(r"(^https://www\.scribblehub\.com/(series|read))/\d+", URL, re.IGNORECASE):
SHReply = SHinstance.SHWork()
# Create the initial embed object #
# Description has a limit of 4096, but I'm setting this to 350 characters.
DESCRIPTION = SHReply['SYNOPSIS'] if len(SHReply['SYNOPSIS']) < 350 else f"{SHReply['SYNOPSIS'][:345]} ..."
embed = Embed(
title=SHReply['STORY_TITLE'],
url=URL,
description=DESCRIPTION,
color=0xE09319)
# Add author, thumbnail, fields, and footer to the embed
embed.set_author(
name=SHReply['AUTHOR'],
url=SHReply['AUTHOR_PROFILE_LINK'],
icon_url=SHReply['AUTHOR_AVATAR_LINK']
)
embed.set_thumbnail(url=SHReply['COVER_IMAGE'])
if SHReply['FANDOM'] != 'N/A':
embed.add_field(name="Fandom", value=SHReply['FANDOM'], inline=False)
if SHReply['CONTENT_WARNING'] != 'N/A':
embed.add_field(name="Content Warning", value=SHReply['CONTENT_WARNING'], inline=False)
embed.add_field(
name="Stats",
value=
f"""{SHReply['VIEWS']} • {SHReply['FAVOURITES']} • {SHReply['CHAPTER_COUNT']} • {SHReply['READERS']}""",
inline=False)
embed.add_field(name="Genres", value=SHReply['GENRES'], inline=False)
embed.set_footer(text=f"Info retrieved by Summarium on {now.strftime('%a %-d at %X')}")
return embed
elif re.search(r"(^https://www\.scribblehub\.com/profile)/\d+/(\w+)*", URL, re.IGNORECASE):
SHReply = SHinstance.SHProfile()
# Create the initial embed object #
# Description has a limit of 4096, but I'm setting this to 500 characters.
embed = Embed(
title="ScribbleHub Profile",
url=URL,
description='',
color=0xE09319
)
# Add author, thumbnail, fields, and footer to the embed
embed.set_author(
name=SHReply['AUTHOR_NAME'],
url=URL,
icon_url=SHReply['PROFILE_PHOTO']
)
embed.set_thumbnail(url=SHReply['PROFILE_PHOTO'])
if SHReply['ABOUT'] != '':
embed.add_field(name="About", value=SHReply['ABOUT'], inline=False)
if SHReply['HOME_PAGE'] != 'N/A':
embed.add_field(name="Home Page", value=SHReply['HOME_PAGE'], inline=False)
embed.add_field(name="Last Active", value=SHReply['LAST_ACTIVE'], inline=True)
embed.add_field(name="Followers", value=SHReply['FOLLOWERS'], inline=True)
embed.add_field(name="Following", value=SHReply['FOLLOWING'], inline=True)
embed.add_field(
name="Stats",
value=
f"""
Joined: {SHReply['JOINED']} • Readers: {SHReply['READERS']} • Series: {SHReply['NUMBER_OF_SERIES']}
""",
inline=False)
embed.set_footer(text=f"Info retrieved by Summarium on {now.strftime('%a %-d at %X')}")
return embed
except:
embed = Embed(
title="Summarium Error",
url=URL,
description=f"Can not get scribblehub URL {URL}",
color=0x6A0DAD
)
return embed
```
#### File: Jemeni11/Fic-Retriever/main.py
```python
import re
import os
import discord
from discord.ext import commands
from embed_messages.SH_Embed import ScribbleHubEmbed
from embed_messages.AO3_Embed import ArchiveOfOurOwnEmbed
from embed_messages.FF_Embed import FanFictionDotNetEmbed
from embed_messages.FL_Embed import FictionDotLiveEmbed
from dotenv import load_dotenv
load_dotenv()
BOT_TOKEN = os.getenv('TOKEN')
description = """An example bot to showcase the discord.ext.commands extension
module.
There are a number of utility commands being showcased here."""
intents = discord.Intents.default()
intents.members = True
# intents.message_content = True
"""
This worked perfectly about an hour ago and now it throws the following error:
(virtualenv) nonso@HPEnvy:~/Documents/Code/Projects/Summarium$ python3 main.py
Traceback (most recent call last):
File "main.py", line 25, in <module>
intents.message_content = True
AttributeError: 'Intents' object has no attribute 'message_content'
(virtualenv) nonso@HPEnvy:~/Documents/Code/Projects/Summarium$
So I commented that line out and ran my code again and it worked
somehow even though it shouldn't.
Putting this comment here incase it causes chaos later on.
"""
bot = commands.Bot(command_prefix="?", description=description, intents=intents)
@bot.event
async def on_ready():
print(f"Logged in as {bot.user} (ID: {bot.user.id})")
print("____________________________________________")
@bot.event
async def on_message(message):
if message.author.id == bot.user.id:
return
if message.author.bot:
return # Do not reply to other bots
# Pulling out all URLs
URLs = re.findall(
r"""
\b((?:https?://)?(?:(?:www\.)?(?:[\da-z\.-]+)\.(?:[a-z]{2,6})
|(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]
|2[0-4][0-9]|[01]?[0-9][0-9]?)|(?:(?:[0-9a-fA-F]{1,4}:){7,7}
[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]
{1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(?::
[0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(?::[0-9a-fA-F]
{1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(?::[0-9a-fA-F]{1,4}){1,4}
|(?:[0-9a-fA-F]{1,4}:){1,2}(?::[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]
{1,4}:(?:(?::[0-9a-fA-F]{1,4}){1,6})|:(?:(?::[0-9a-fA-F]{1,4}){1,7}|:)
|fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(?:ffff(?::0{1,4})
{0,1}:){0,1}(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}
(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])|(?:[0-9a-fA-F]{1,4}:)
{1,4}:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25
[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])))(?::[0-9]{1,4}|[1-5][0-9]
{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])?
(?:/[\w\.-]*)*/?)\b
""",
message.content, re.VERBOSE)
for i in URLs:
if re.search(r"(^https://www\.scribblehub\.com/(series|read|profile))/\d+", i, re.IGNORECASE):
await message.reply(embed=ScribbleHubEmbed(i))
elif re.search(r"^https://archiveofourown\.org/(\bseries\b|\bworks\b|\bcollections\b)/", i, re.IGNORECASE):
await message.reply(embed=ArchiveOfOurOwnEmbed(i))
elif re.search(r"^https://(www|m)\.(\bfanfiction\b\.\bnet\b)/s/\d+/\d+/\w*", i, re.IGNORECASE):
await message.reply(file=FanFictionDotNetEmbed(i)[0], embed=FanFictionDotNetEmbed(i)[1])
elif re.search(r'^https?://fiction\.live/(?:stories|Sci-fi)/[^\/]+/([0-9a-zA-Z\-]+)/?.*', i, re.IGNORECASE):
await message.reply(embed=FictionDotLiveEmbed(i))
bot.run(BOT_TOKEN)
```
#### File: Fic-Retriever/scrapers/fictionlive.py
```python
import requests
from bs4 import BeautifulSoup
from datetime import datetime
def fictiondotlive(url: str):
STORY_ID = url.split("/")[5]
response = requests.get(f'https://fiction.live/api/node/{STORY_ID}').json()
TITLE = response['t'][:-4] if '<br>' == response['t'][-4:] else response['t']
AUTHOR = response['u'][0]['n']
if not 'a' in response['u'][0].keys():
AUTHOR_IMAGE = 'https://ddx5i92cqts4o.cloudfront.net/images/1e1nvq5tm_fllogo.png'
else:
AUTHOR_IMAGE = response['u'][0]['a']
AUTHOR_LINK = f"https://fiction.live/user/{AUTHOR}"
STORY_STATUS = None if 'storyStatus' not in response.keys() else response['storyStatus']
CONTENT_RATING = None if 'contentRating' not in response.keys() else response['contentRating']
if CONTENT_RATING == 'nsfw':
CONTENT_RATING = 'NSFW'
if 'i' not in response.keys():
COVER_IMAGE = 'https://ddx5i92cqts4o.cloudfront.net/images/1e1nvq5tm_fllogo.png'
else:
COVER_IMAGE = response['i'][0]
AUTHOR_NOTE = ' ' if 'd' not in response.keys() else response['d']
AUTHOR_NOTEVar = str(AUTHOR_NOTE).replace('<br>', '\r\n')
AUTHOR_NOTEVar2 = str(AUTHOR_NOTEVar).replace('</p><p>', '</p><p>\r\n</p><p>')
AUTHOR_NOTE = BeautifulSoup(AUTHOR_NOTEVar2, 'lxml').get_text()
DESCRIPTION = ' ' if 'b' not in response.keys() else response['b']
DESCRIPTIONVar = str(DESCRIPTION).replace('<br>', '\r\n')
DESCRIPTIONVar2 = str(DESCRIPTIONVar).replace('</p><p>', '</p><p>\r\n</p><p>')
DESCRIPTION = BeautifulSoup(DESCRIPTIONVar2, 'lxml').get_text()
TAGS = ' ' if 'ta' not in response.keys() else response['ta']
NOS_OF_CHAPTERS = ' ' if 'bm' not in response.keys() else f"{len(response['bm']):,}"
NOS_OF_WORDS = ' ' if 'w' not in response.keys() else f"{response['w']:,}"
if 'cht' not in response.keys():
UPDATED = ' '
else:
UPDATED = str(datetime.fromtimestamp(response['cht'] / 1000.0, None))[:10]
if 'rt' not in response.keys():
PUBLISHED = ' '
else:
PUBLISHED = str(datetime.fromtimestamp(response['rt'] / 1000.0, None))[:10]
return {
'TITLE': TITLE,
'AUTHOR': AUTHOR,
'AUTHOR_IMAGE': AUTHOR_IMAGE,
'AUTHOR_LINK': AUTHOR_LINK,
'STORY_STATUS': STORY_STATUS,
'CONTENT_RATING': CONTENT_RATING,
'COVER_IMAGE': COVER_IMAGE,
'AUTHOR_NOTE': AUTHOR_NOTE,
'DESCRIPTION': DESCRIPTION,
'TAGS': TAGS,
'NOS_OF_CHAPTERS': NOS_OF_CHAPTERS,
'NOS_OF_WORDS': NOS_OF_WORDS,
'UPDATED': UPDATED,
'PUBLISHED': PUBLISHED
}
```
|
{
"source": "Jemeni11/Lyric-Translator_",
"score": 4
}
|
#### File: Jemeni11/Lyric-Translator_/Caller.py
```python
from translathor import translator
from apicalls import *
def caller():
intention = input(
"To get songs by an artist use (F)\nTo get music lyrics use(G)\nUse (H) to get lyrics and translate -->")
if intention.lower() not in ['f', 'g', 'h'] :
exit("Lmao, get serious abeg")
elif intention.lower() == "g":
for text in get_lyrics():
print(text)
elif intention.lower() == "f":
get_songsby()
else:
print(translator(get_lyrics()))
if __name__ == "__main__":
print(caller())
```
|
{
"source": "Jemesson/nlp-conceptual-model",
"score": 3
}
|
#### File: Jemesson/nlp-conceptual-model/Api.py
```python
import json
from ModelGenerationApi import ModelGenerationApi
from flask import Flask, request, Response, jsonify
app = Flask(__name__)
OPTION_USER_STORIES = 'user-stories'
OPTION_ONTOLOGY = 'ontology'
OPTION_PROLOG = 'prolog'
@app.route('/')
def index():
return 'Hello =]. Use the endpoint: user-stories please.'
@app.route('/user-stories', methods=['POST'])
def api_user_stories():
if not request.files or not request.files['stories']:
return Response(json.dumps({'error': 'Upload a user story.'}), status=409, mimetype='application/json')
stream = request.files['stories'].stream
if request.files['stories'].content_type == 'text/csv':
messages = stream.read().decode("UTF8").split('\r\n')
else:
messages = stream.read().decode('UTF-8').split('\n')
weights = [1, 1, 1, 0.7, 0.5, 0.66]
conceptual_model = ModelGenerationApi('system-name', 1, 1, weights, messages)
result = conceptual_model.gen_concept_model()
option = None
if request.args.get('option'):
option = request.args.get('option')
if option is None or option == OPTION_USER_STORIES:
user_stories_json = [str(us.toJSON()) for us in result['stories']]
return "\n".join(user_stories_json)
elif option == OPTION_ONTOLOGY:
return result['ontology']
elif option == OPTION_PROLOG:
return result['prolog']
else:
return Response(json.dumps({'error': 'Invalid option.'}), status=409, mimetype='application/json')
if __name__ == '__main__':
app.run()
```
#### File: nlp-conceptual-model/libs/NLPLoader.py
```python
import spacy
class NLPLoader:
"""
Loads NLP Library
"""
def __init__(self, message, language):
self.message = message
self.language = language
self.nlp = spacy.load(self.language)
def __str__(self):
return f'Message: {self.message} Language: {self.language}'
@property
def nlp_tool(self):
return self.nlp
```
|
{
"source": "jemfinch/finitd",
"score": 2
}
|
#### File: finitd/test/test_conf.py
```python
import grp
from finitd import conf
from finitd.test import *
from hieropt.test.test_hieropt import assert_write_then_read_equivalence
def test_Uid():
uid = conf.Uid('uid')
uid.setFromString('root')
assert_equals(uid(), 0)
assert_equals(str(uid), 'root')
def test_Gid():
gid = conf.Gid('gid')
# Can't test for 'root' here because BSD doesn't have a root group.
gid.setFromString('daemon')
assert_equals(gid(), grp.getgrnam('daemon').gr_gid)
assert_equals(str(gid), 'daemon')
def test_Signal():
sig = conf.Signal('sig')
sig.setFromString('SIGKILL')
assert_equals(sig(), 9)
assert_equals(str(sig), 'SIGKILL')
sig.setFromString('SIGTERM')
assert_equals(sig(), 15)
assert_equals(str(sig), 'SIGTERM')
def test_conf_config():
assert_write_then_read_equivalence(conf.config)
def test_watcher_pidfile():
assert_equals(conf.config.options.pidfile(), None)
assert_equals(conf.config.watcher.pidfile(), None)
conf.config.options.pidfile.set('pid')
assert_equals(conf.config.watcher.pidfile(), 'pid.watcher')
```
|
{
"source": "jemfinch/hieropt",
"score": 2
}
|
#### File: hieropt/hieropt/__init__.py
```python
import os
import re
import textwrap
from OrderedDict import OrderedDict
from optparse import OptionParser, OptionValueError
class InvalidSyntax(Exception):
def __init__(self, lineno, msg):
self.lineno = lineno
self.msg = msg
def __str__(self):
return '%s (on line %s)' % (self.msg, self.lineno)
class MissingName(InvalidSyntax):
def __init__(self, lineno):
InvalidSyntax.__init__(self, lineno, 'Could not find variable name')
class UnregisteredName(InvalidSyntax):
def __init__(self, lineno, name):
InvalidSyntax.__init__(self, lineno, 'Unregistered name: %r' % name)
class GroupExpectsNoValue(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return 'Group expects no value: %r' % self.name
def wrap(comment):
return textwrap.wrap(' '.join(comment.split()))
def writeComment(fp, comment):
for line in wrap(comment):
fp.write('# %s\n' % line)
def OptparseCallback(option, optString, valueString, parser, Value):
try:
Value.setFromString(valueString)
except ValueError, e:
raise OptionValueError('%s option expected %s, received %r (%s)' %
(optString, Value.type(), valueString, e))
class IgnoreValue(object):
"""Used non-strict Groups to ignore the value in readfp."""
def expectsValue(self):
return True
def setFromString(self, s):
return
class Group(object):
"""All configuration variables are groups, that is, all configuration variables can have other groups and variables registered under them. Experience (from the very similar configuration in Supybot) has shown that making non-group variables is simply not worth the trouble and inconsistency."""
def __init__(self, name, comment=None, Child=None, strict=True):
"""
@param name: The name for this group. An argument could be made for making the group itself name-agnostic and only giving it a name upon registration with another group, but that would cripple unregistered groups.
@param comment: A helpful comment indicating the usage/meaning of a particular group. This comment will be written to configuration files and used as the help text of the optparse OptionParser the group can generate.
@param Child: A callable (usually a class) which, if not None, will be used in the get() method to create a requested child rather than raising KeyError.
"""
# All of these are prefixed with underscores so they won't conflict with
# registered children.
if name.startswith('_'):
raise ValueError('Names beginning with an underscore are forbidden: %r'%name)
self._name = name
self._parent = None
self._Child = Child
self._strict = strict
self._comment = comment
self._children = OrderedDict()
def get(self, name):
"""Returns the child variable with the given name. If no such variable exists and the Child argument was given to __init__, a new variable will be created and returned.
@param name: The name of the child to retrieve.
"""
try:
return self._children[name]
except KeyError:
if self._Child is not None:
child = self._Child(name)
self.register(child)
return child
else:
raise
def __getattr__(self, name):
if name.startswith('_'):
return object.__getattr__(self, name)
try:
return self.get(name)
except KeyError:
raise AttributeError(name)
def __call__(self):
# Having this here offers a better exception/error message than __getattr__'s
# AttributeError.
raise GroupExpectsNoValue(self._fullname())
def register(self, child):
"""Registers the given child with this group. Any previously-registered children
with the same name are replaced.
@param child: The child to register.
"""
self._children[child._name] = child
child._parent = self
return child
def _fullname(self, parentName=None, childName=None):
if childName is None:
childName = self._name
if parentName is None and self._parent is not None:
parentName = self._parent._fullname()
if not parentName:
return childName
else:
return '%s.%s' % (parentName, childName)
def writefp(self, fp, annotate=True, parentName=None):
"""Writes this configuration group and its children in their current state to the given file(-like) object.
@param fp: The file(-like) object to write.
@param annotate: Flag determining whether to write comments to the given file object. Default values are still written, but commented out.
@param parentName: The name of the parent to prefix to this group's own name and the name of its children.
"""
if self._comment and annotate:
writeComment(fp, self._comment)
fp.write('\n')
myname = self._fullname(parentName)
for child in self.children():
child.writefp(fp, annotate=annotate, parentName=myname)
_sepRe = re.compile(r'\s*[:=]\s*')
def readfp(self, fp):
"""Reads the given file object, setting the state of this configuration group and its children appropriately. Comment lines and blank lines are ignored; comment lines are those which begin (apart from leading whitespace) with a '#' character. Comments cannot be initiated part way through a line: e.g., a line 'foo: bar # baz' gives the 'foo' configuration variable the literal value 'bar # baz'. Non-comment lines consist of a configuration variable name followed by optional whitespace, a separator of ':' or '=', more optional whitespace, and finally the value of that variable in string form."""
lineno = 0
for line in fp:
lineno += 1
line = line.strip()
if not line or line.startswith('#'):
continue
try:
(name, value) = self._sepRe.split(line, 1)
except ValueError:
raise MissingName(lineno)
value = value.strip()
parts = name.split('.')
if parts.pop(0) != self._name:
if not self._strict:
continue # Just ignore other names.
raise UnregisteredName(lineno, name)
group = self
for part in parts:
try:
group = group.get(part)
except KeyError:
if not self._strict:
group = IgnoreValue()
raise UnregisteredName(lineno, name)
if not group.expectsValue():
raise InvalidSyntax(lineno, '%s expects no value' % name)
group.setFromString(value)
def read(self, filename):
"""Calls readfp with a file object opened with the given name."""
fp = open(filename)
try:
self.readfp(fp)
finally:
fp.close()
def readenv(self, environ=None):
"""Reads the given environment dictionary, setting the state of this configuration group and its children appropriately. Unrecognized env variable names are ignored. Environment variables are expected to be capitalized, parts separated by underscores. For instance, if you would access the configuration variable via 'foo.bar.baz' in Python, the environment variable expected would be FOO_BAR_BAZ.
@param environ: The environment dictionary. Defaults to os.environ.
@type environ: dict
"""
if environ is None:
environ = os.environ
for (name, variable) in self:
if not variable.expectsValue():
continue
envName = name.replace('.', '_').upper()
try:
variable.setFromString(environ[envName])
except KeyError:
continue
except ValueError, e:
raise ValueError('Invalid environment variable %s: %s' % (envName, e))
def __iter__(self):
"""Generates a series of (fullname, configuration variable) pairs for this Group
and its children."""
yield (self._name, self)
for child in self.children():
for (childname, grandchild) in child:
yield (self._fullname(self._name, childname), grandchild)
def toOptionParser(self, parser=None, **kwargs):
"""Modifies or produces an optparse.OptionParser which will set the appropriate variables in this configuration tree when certain options are given. Options are converted to lowercase and separated by dashes, in accordance with the common practice for long options in *nix. For instance, if you would access the configuration variable via 'foo.bar.baz' in Python, the command line option associated with that variable would be --foo-bar-baz."""
if parser is None:
parser = OptionParser(**kwargs)
for (name, variable) in self:
if not variable.expectsValue():
continue
optionName = name.replace('.', '-').lower()
parser.add_option('', '--' + optionName, action="callback",
type="string", callback=OptparseCallback,
metavar=variable.type().upper(), help=variable._comment,
callback_args=(variable,))
return parser
def children(self):
return self._children.values()
def expectsValue(self):
return False
parent = object()
class Value(Group):
def __init__(self, name, default=None, **kwargs):
Group.__init__(self, name, **kwargs)
self._value = None
self._default = default
@property
def default(self):
if callable(self._default):
return self._default()
elif self._default is parent:
return self._parent()
else:
return self._default
def __call__(self):
if self._value is None:
return self.default
else:
return self._value
@classmethod
def type(cls):
if cls is Value:
return 'string'
else:
return cls.__name__.lower()
def set(self, v):
self._value = v
def setFromString(self, s):
self.set(self.fromString(s))
def fromString(self, s):
return s
def toString(self, v):
return str(v)
def __str__(self):
return self.toString(self())
def writefp(self, fp, annotate=True, parentName=None):
myname = self._fullname(parentName)
if self._comment is not None and annotate:
writeComment(fp, self._comment)
if self._value is None:
fp.write('# ') # Document the default value, but comment it out.
if self() is None:
stringValue = '(no default)'
else:
stringValue = str(self)
fp.write('%s: %s\n' % (myname, stringValue))
if annotate:
fp.write('\n') # Extra newline makes comments more easily distinguishable.
for child in self.children():
child.writefp(fp, annotate=annotate, parentName=myname)
def expectsValue(self):
return True
def isSet(self):
return self._value is not None or self.default is not None
def isDefault(self):
return self._value is None
def reset(self):
self._value = None
class Bool(Value):
def fromString(self, s):
if s.lower() in ['true', 'on', '1', 'yes']:
return True
elif s.lower() in ['false', 'off', '0', 'no']:
return False
else:
raise ValueError('%r cannot be converted to bool' % s)
class Int(Value):
def fromString(self, s):
if s.startswith('0x'):
return int(s[2:], 16)
elif s.startswith('0'):
return int(s, 8)
else:
return int(s)
class Float(Value):
fromString = float
```
|
{
"source": "Jemgoss/pychromecast",
"score": 3
}
|
#### File: pychromecast/pychromecast/config.py
```python
import json
from urllib.request import urlopen
APP_BACKDROP = "E8C28D3C"
APP_YOUTUBE = "233637DE"
APP_MEDIA_RECEIVER = "CC1AD845"
APP_PLEX = "06ee44ee-e7e3-4249-83b6-f5d0b6f07f34_1"
APP_DASHCAST = "84912283"
APP_HOMEASSISTANT_LOVELACE = "A078F6B0"
APP_HOMEASSISTANT_MEDIA = "B45F4572"
APP_SUPLA = "A41B766D"
APP_YLEAREENA = "A9BCCB7C"
APP_BUBBLEUPNP = "3927FA74"
APP_BBCSOUNDS = "03977A48"
APP_BBCIPLAYER = "5E81F6DB"
def get_possible_app_ids():
"""Returns all possible app ids."""
try:
with urlopen("https://clients3.google.com/cast/chromecast/device/baseconfig") as response:
response.read(4)
data = json.load(response)
return [app["app_id"] for app in data["applications"]] + data["enabled_app_ids"]
except ValueError:
# If json fails to parse
return []
def get_app_config(app_id):
"""Get specific configuration for 'app_id'."""
try:
with urlopen(f"https://clients3.google.com/cast/chromecast/device/app?a={app_id}") as response:
if response.status == 200:
response.read(4)
data = json.load(response)
else:
data = {}
return data
except ValueError:
# If json fails to parse
return {}
```
|
{
"source": "Jemhunter/Invoke",
"score": 4
}
|
#### File: Invoke/invokeReactive/invokeInterpreter.py
```python
import random
#all the characters that are considered valid
validChars = ["Q", "W", "E", "R", "I", ">", "<", "V", "^"]
def convertToValid (fileInput):
'''Convert file data to array of valid characters only'''
#convert to upper case
fileInput = fileInput.upper()
#create array to hold data in
data = []
#split into lines
lines = fileInput.split("\n")
#to store the max length
maxLength = -1
#iterate lines
for line in lines:
#if this line is longer than the current maximum
if len(line) > maxLength:
#update the maximum
maxLength = len(line)
#iterate lines
for line in lines:
#create list to hold the data for this line
lineData = []
#iteracte characters
for char in line:
#if it is a valid character
if char in validChars:
#add it to the list
lineData.append(char)
else:
#add a blank
lineData.append("")
#iterate for extra items that are needed
for i in range(maxLength - len(lineData)):
#add blanks to fill to square
lineData.append("")
#add line to data
data.append(lineData)
#return the array
return data
def getFile ():
'''Get all the data from the file given'''
#get the path from the user
path = input("Enter path to Invoke file:")
#initialize data as none - if no file can be obtained it won't proceed
fileData = None
#check for correct extension
if path.endswith(".inv"):
try:
#attempt to open and read the file
invokeFile = open(path, "r")
#store data in variable
fileData = invokeFile.read()
#close the file
invokeFile.close()
except:
#if something went wrong the file wasn't found
print("File not found or invalid, please ensure you typed the path correctly")
else:
#if it wasn't a .inv file
print("The file must be in the format filename.inv")
#return the data
return fileData
def whichCommand (parts):
'''Determine which command is being executed from the 3 parts'''
#list of valid parts
validParts = ["Q", "W", "E"]
#if there aren't 3
if len(parts) < 3:
#return no command
return -1
#if all are valid
if parts[0] in validParts and parts[1] in validParts and parts[2] in validParts:
#1st is Q
if parts[0] == "Q":
#2nd is Q
if parts[1] == "Q":
#3rd is Q
if parts[2] == "Q":
#QQQ
return 6
#3rd is W
elif parts[2] == "W":
#QQW
return 3
#3rd is E
else:
#QQE
return 2
#2nd is W
elif parts[1] == "W":
#3rd is Q
if parts[2] == "Q":
#QQW
return 3
#3rd is W
elif parts[2] == "W":
#WWQ
return 1
#3rd is E
else:
#QWE
return 9
#2nd is E
else:
#3rd is Q
if parts[2] == "Q":
#QQE
return 2
#3rd is W
elif parts[2] == "W":
#QWE
return 9
#3rd is E
else:
#EEQ
return 5
#1st is W
elif parts[0] == "W":
#2nd is Q
if parts[1] == "Q":
#3rd is Q
if parts[2] == "Q":
#QQW
return 3
#3rd is W
elif parts[2] == "W":
#WWQ
return 1
#3rd is E
else:
#QWE
return 9
#2nd is W
elif parts[1] == "W":
#3rd is Q
if parts[2] == "Q":
#WWQ
return 1
#3rd is W
elif parts[2] == "W":
#WWW
return 7
#3rd is E
else:
#WWE
return 0
#2nd is E
else:
#3rd is Q
if parts[2] == "Q":
#QWE
return 9
#3rd is W
elif parts[2] == "W":
#WWE
return 0
#3rd is E
else:
#EEW
return 4
#1st is E
else:
#2nd is Q
if parts[1] == "Q":
#3rd is Q
if parts[2] == "Q":
#QQE
return 2
#3rd is W
elif parts[2] == "W":
#QWE
return 9
#3rd is E
else:
#EEQ
return 5
#2nd is W
elif parts[1] == "W":
#3rd is Q
if parts[2] == "Q":
#QWE
return 9
#3rd is W
elif parts[2] == "W":
#WWE
return 0
#3rd is E
else:
#EEW
return 4
#2nd is E
else:
#3rd is Q
if parts[2] == "Q":
#EEQ
return 5
#3rd is W
elif parts[2] == "W":
#EEW
return 4
#3rd is E
else:
#EEE
return 8
#if no other command was found
return -1
def addMana (pots, amount, addPosition, startDir):
'''Add mana to a specific pot - with overflow'''
#if addPosition is within pots
if addPosition < len(pots) and addPosition > -1:
#get the current pot amount
currentPotAmount = pots[addPosition]
#if there will not be overflow
if currentPotAmount + amount <= 255:
#add to the pot
pots[addPosition] = pots[addPosition] + amount
else:
#there is overflow
#fill the current pot
pots[addPosition] = 255
#remove amount added from amount that needs to be added
amount = amount - (255 - currentPotAmount)
#split evenly into left and right overflow
rightPart = amount // 2
leftPart = amount // 2
#if it was an odd number
if amount % 2 != 0:
#if right first
if startDir == 0:
#add excess to right
rightPart = rightPart + 1
else:
#add excess to left
leftPart = leftPart + 1
#for the pot on the left of the current to 0
for i in range(addPosition - 1, -1, -1):
#if there is still mana to add on the left
if leftPart > 0:
#if the current pot is not full
if pots[i] < 255:
#get the amount in the pot
potAmount = pots[i]
#if there will not be further overflow
if potAmount + leftPart < 255:
#add left part remaining to the pot
pots[i] = pots[i] + leftPart
#no left part remaining
leftPart = 0
else:
#fill the pot
pots[i] = 255
#remove the amount added from the left part
leftPart = leftPart - (255 - potAmount)
#if there is still a left part
if leftPart > 0:
#add it to the right part (it bounced off the left wall)
rightPart = rightPart + leftPart
#start position is one right of the add position
currentPot = addPosition + 1
#until the right part runs out
while rightPart > 0:
#if there is not a current pot
if currentPot >= len(pots):
#add a new empty pot
pots.append(0)
#if the pot is not full
if pots[currentPot] < 255:
#if there will not be overflow
if rightPart <= (255 - pots[currentPot]):
#add remaining amount to the pot
pots[currentPot] = pots[currentPot] + rightPart
#no right part remaining
rightPart = 0
else:
#get the pot amount
potAmount = pots[currentPot]
#fill the pot
pots[currentPot] = 255
#reduce the right part by the amount added
rightPart = rightPart - (255 - potAmount)
#move to the next pot
currentPot = currentPot + 1
#return the updated pots
return pots
def outputChar(number):
'''Print a single character with no return character'''
print(chr(number), end="")
def outputNumber(number, needLine):
'''Print a number with a leading new line if needed'''
#if a new line is needed
if needLine:
#print the number with new line
print("\n" + str(number))
else:
#print just the number
print(number)
def getInput ():
'''Get user input'''
#get input
inp = input(">")
try:
#attempt convert to int
inp = int(inp)
#return number (if positive)
if inp >=0:
return inp
except:
#if it isn't a number
#if it has a length of 1
if len(inp) == 1:
#return the unicode value for it (mod 256, so it is in ascii range)
return (ord(inp) % 256)
#if a number or single character was not entered
return 0
def runProgram (code, debug):
'''Run a program, code is the program code and debug is if the debug prints are on'''
#create mana pots
manaPots = [0]
#create empty phial
manaPhial = 0
#the currently selected pot
currentPot = 0
#the parts of a command currently in use
parts = ["", "", ""]
#where to replace the part next
partToReplace = 0
#if the program is running
running = True
#current instruction pointers
pointerY = 0
pointerX = 0
#how to change the pointers after each instruction (x,y)
pointerChange = [1, 0]
#what the current direction of overflow is
direction = random.randint(0,1)
#if a new line is needed (has a character been printed most recently)
needLine = False
#valid code parts
validParts = ["Q", "W", "E"]
#single command characters
otherCommands = [">", "<", "V", "^", "I", "R"]
#until the program stops
while running:
#wrap the pointer if it goes off the right side
if pointerX >= len(code[0]):
pointerX = 0
#wrap the pointer if it goes off the left side
if pointerX < 0:
pointerX = len(code[0]) - 1
#wrap the pointer if it goes off the bottom
if pointerY >= len(code):
pointerY = 0
#wrap the pointer if it goes off the top
if pointerY < 0:
pointerY = len(code) - 1
#the amount the pointer is being boosted by (jumps)
pointerBoost = [0,0]
#the currently selected character/instruction
currentChar = code[pointerY][pointerX]
#if in debug mode
if debug:
#output the character
print(currentChar)
#if it is a command or code character
if currentChar in validParts or currentChar in otherCommands:
#if the character is the invoke command
if currentChar == "I":
#get which command the code makes up
command = whichCommand(parts)
#if debug is on
if debug:
#print the command id
print(command)
if command == 0:
#right 1 pot
#increase selected pot
currentPot = currentPot + 1
#if this exceeds where there are currently pots
if currentPot >= len(manaPots):
#add a new empty pot
manaPots.append(0)
#get a new random direction
direction = random.randint(0,1)
elif command == 1:
#left 1 pot
#if not on the left wall
if currentPot != 0:
#get new random direction
direction = random.randint(0,1)
#move to the left
currentPot = currentPot - 1
#if at the left wall
if currentPot < 0:
#stay on first pot
currentPot = 0
elif command == 2:
#add 1 to current pot
#call for 1 mana to be added to the current pot
manaPots = addMana(manaPots, 1, currentPot, direction)
#invert the direction
if direction == 0:
direction = 1
else:
direction = 0
elif command == 3:
#remove 1 from current pot
#if there is mana in the pot
if manaPots[currentPot] > 0:
#take 1 mana from the pot
manaPots[currentPot] = manaPots[currentPot] - 1
elif command == 4:
#output value of current pot
#output the number
outputNumber(manaPots[currentPot], needLine)
#a new line is no longer needed
needLine = False
elif command == 5:
#output character of current pot
#output character
outputChar(manaPots[currentPot])
#a new line is needed when a number is printed
needLine = True
elif command == 6:
#Input and add to current pot
#get amount to add
toAdd = getInput()
#call for it to be added to the pot
manaPots = addMana(manaPots, toAdd, currentPot, direction)
#if the amount added was odd
if toAdd % 2 != 0:
#invert the direction
if direction == 0:
direction = 1
else:
direction = 0
elif command == 7:
#draw as much as possible to fill phial
#get the maximum amount that can be taken
maximum = 511 - manaPhial
#if the maximum is greater than or equal to the amount in the current pot
if maximum >= manaPots[currentPot]:
#take all into phial
manaPhial = manaPhial + manaPots[currentPot]
#set pot to empty
manaPots[currentPot] = 0
else:
#fill phial
manaPhial = 511
#remove maximum from current pot
manaPots[currentPot] = manaPots[currentPot] - maximum
elif command == 8:
#pour all from phial into pot
#add the amount in the phial to the current pot
manaPots = addMana(manaPots, manaPhial, currentPot, direction)
#if it was an odd amount
if manaPhial % 2 != 0:
#invert direction
if direction == 0:
direction = 1
else:
direction = 0
#empty phial
manaPhial = 0
elif command == 9:
#stop
running = False
elif currentChar == "R":
#revoke - jump if empty pot
#if the current pot is empty
if manaPots[currentPot] == 0:
#move the pointer 1 further in it's current direction
pointerBoost[0] = pointerChange[0]
pointerBoost[1] = pointerChange[1]
elif currentChar == ">":
#switch to move right
pointerChange = [1, 0]
elif currentChar == "<":
#switch to move left
pointerChange = [-1, 0]
elif currentChar == "V":
#switch to move down
pointerChange = [0, 1]
elif currentChar == "^":
#switch to move up
pointerChange = [0, -1]
elif currentChar in validParts:
#it is Q,W or E
#replace the next part to replace whith this character
parts[partToReplace] = currentChar
#move to replace on one
partToReplace = partToReplace + 1
#if it is beyond the end
if partToReplace > 2:
#move back to start
partToReplace = 0
#adjust instruction pointer
pointerX = pointerX + pointerChange[0] + pointerBoost[0]
pointerY = pointerY + pointerChange[1] + pointerBoost[1]
#if debug is on
if debug:
#prtint the mana pots, the current pot position, the phial and current parts
print(manaPots)
print("Current:" + str(currentPot))
print("Phial:" + str(manaPhial))
print(parts)
#if a line is needed
if needLine:
#print complete message with leading new line
print("\nExecution Complete")
else:
#print complete message
print("Execution Complete")
def execute(debug):
'''Get the code and run it'''
#get file data
fileData = getFile()
#if there was some data
if fileData != None:
#convert to valid
data = convertToValid(fileData)
#if there is code in y axis (at least 1 row)
if len(data) > 0:
#if there is code in x axis (at least 1 column)
if len (data[0]) > 0:
#run the program
runProgram(data, debug)
else:
#no columns
print("No code found")
else:
#no rows
print("No code found")
#Main Program
#run the program
execute(False)
#hold the terminal open until enter is pressed
input()
```
#### File: Invoke/invokeUnreactive/invokeDebugger.pyw
```python
import tkinter
import time
from tkinter import N, E, S, W, HORIZONTAL, END, LEFT, RIGHT, INSERT, BOTTOM, TOP, Y, X, BOTH, WORD
from tkinter import scrolledtext
import random
import ToolTips
class manaScroll (tkinter.Frame):
'''Class to hold pots scroll window and manage numbers/sprites'''
def __init__ (self, parent):
'''Constructor'''
#list of buttons - each one is a pot
self.buttons = []
#initialize the frame for this object
tkinter.Frame.__init__(self, parent)
#create canvas to hold scrolling frame
self.canvas = tkinter.Canvas(parent, height=75, borderwidth=0)
#create frame to hold buttons
self.frame = tkinter.Frame(self.canvas)
#create scroll bar
self.scrollBar = tkinter.Scrollbar(parent, orient="horizontal", command=self.canvas.xview)
#attach scroll bar to canvas
self.canvas.configure(xscrollcommand=self.scrollBar.set)
#place scroll bar and canvas
self.scrollBar.grid(row=1, column=0, sticky=(N,E,S,W))
self.canvas.grid(row=0, column=0, sticky=(N,E,S,W))
#create a window to display the frame widget
self.canvas.create_window((0,0), window = self.frame, anchor="nw", tags="self.frame")
#bind reconfiguring size to call for configure
self.frame.bind("<Configure>", self.onFrameConfigure)
#get all pot images from files
self.potImages = [tkinter.PhotoImage(file="resources/beakerEmpty.png"),
tkinter.PhotoImage(file="resources/beakerLevel1.png"),
tkinter.PhotoImage(file="resources/beakerLevel2.png"),
tkinter.PhotoImage(file="resources/beakerLevel3.png"),
tkinter.PhotoImage(file="resources/beakerLevel4.png"),
tkinter.PhotoImage(file="resources/beakerLevel5.png")]
#colour for active buttons
self.buttonActive = "#f49fef"
#add a starting pot
self.addPot()
#get default button colour
self.buttonDefault = self.buttons[0].cget("background")
#activate first button
self.buttons[0].configure(relief="sunken", bg=self.buttonActive)
#set the first as selected
self.selected = 0
def addPot(self):
'''Add a pot to the list'''
#create a label string for the button
label = str(len(self.buttons)) + " : 0"
#create the button
button = tkinter.Button(self.frame,bd=5,disabledforeground="#000000",image=self.potImages[0],text=label,height=60,width=60,state="disabled",compound="bottom")
#add to list of buttons
self.buttons.append(button)
#pack the button
button.pack(side=LEFT)
def onFrameConfigure(self, event):
'''When the frame is resized - reconfigure the scroll area'''
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
def changeSelected(self, toSelect):
'''Change which pot is currently selected'''
#if there is a selected pot and it is in range
if self.selected > -1 and self.selected < len(self.buttons):
#deselect that pot
self.buttons[self.selected].config(relief="raised", bg=self.buttonDefault)
#if to select exists
if toSelect > -1 and toSelect < len(self.buttons):
#set selected
self.selected = toSelect
#select that pot
self.buttons[self.selected].configure(relief="sunken", bg=self.buttonActive)
#get the position to move the scroll to
pos = (toSelect - 3.5)/len(self.buttons)
#if it is less than 0 set it to 0
if pos < 0:
pos = 0
#scroll the canvas to focus on the selected pot
self.canvas.xview_moveto(pos)
else:
#no selected pot
self.selected = -1
def changeValue(self, which, amount):
'''Change the value of a specific pot. which - index of pot to change, amount - what to set it to'''
#if the pot exists in the list of pots
if which >= 0 and which < len(self.buttons):
#create the label text
labelText = str(which) + " : " + str(amount)
#set the buttons text
self.buttons[which].config(text=labelText)
#set to correct sprite
if amount == 0:
self.buttons[which].config(image=self.potImages[0])
elif amount <= 50:
self.buttons[which].config(image=self.potImages[1])
elif amount <= 100:
self.buttons[which].config(image=self.potImages[2])
elif amount <= 150:
self.buttons[which].config(image=self.potImages[3])
elif amount <= 200:
self.buttons[which].config(image=self.potImages[4])
else:
self.buttons[which].config(image=self.potImages[5])
class codeGrid (tkinter.Frame):
'''Grid object to hold all the code inside of'''
def __init__ (self, parent, code):
'''Constructor'''
#create frame
tkinter.Frame.__init__(self, parent)
#create canvas to hold grid, that will scroll
self.canvas = tkinter.Canvas(parent, borderwidth=0)
#create frame to hold characters
self.frame = tkinter.Frame(self.canvas, bg="#000000")
#create scroll bars
self.scrollBarH = tkinter.Scrollbar(parent, orient="horizontal", command=self.canvas.xview)
self.scrollBarV = tkinter.Scrollbar(parent, orient="vertical", command=self.canvas.yview)
self.canvas.configure(xscrollcommand=self.scrollBarH.set, yscrollcommand=self.scrollBarV.set)
#locate items
self.scrollBarH.grid(row=1, column=0, sticky=(N,E,S,W))
self.scrollBarV.grid(row=0, column=1, sticky=(N,E,S,W))
self.canvas.grid(row=0, column=0, sticky=(N,E,S,W))
#create window to display frame
self.canvas.create_window((0,0), window = self.frame, anchor="nw", tags="self.frame")
#bind the window configure to recalculating the bounds
self.frame.bind("<Configure>", self.onFrameConfigure)
#nothing selected yet
self.selectedX = -1
self.selectedY = -1
#2d array for each code element
self.codeParts = []
#iterate for rows
for i in range(0, len(code)):
#add row
self.frame.grid_rowconfigure(i, weight=1)
#iterate for columns
for i in range(0, len(code[0])):
#add column
self.frame.grid_columnconfigure(i, weight=1)
#current row variable
r = 0
#for each of the lines
for line in code:
#current column variable
c = 0
#list of code elements
codes = []
#iterate for each character
for item in line:
#create a label with that character
l = tkinter.Label(self.frame, text = item, bg="#000000", fg="#ffffff", bd=3, font="TKFixedFont")
#locate in grid
l.grid(row=r, column=c, sticky=(N,E,S,W))
#bind clicking on the label to code clicked function
l.bind("<Button-1>", self.codeClicked)
#add label to list
codes.append(l)
#increment column
c = c + 1
#add row to list
self.codeParts.append(codes)
#increment row
r = r + 1
#select first character
self.changeSelection(0, 0)
def codeClicked (self, event):
'''When a code letter is clicked - set as breakpoint'''
#get the character's colour
colour = event.widget.cget("foreground")
#if there is text in the label
if event.widget.cget("text") != " ":
#if it is white (no break)
if colour == "#ffffff":
#set to cyan - break point on
event.widget.config(fg="#00ffff")
else:
#set to white - break point off
event.widget.config(fg="#ffffff")
def checkBreakPoint (self, x, y):
'''Check if there is a break point on a certain character'''
#if both x and y are valid list elements for the code
if x > -1 and x < len(self.codeParts[0]) and y > -1 and y < len(self.codeParts):
#if the colour of the text is cyan
if self.codeParts[y][x].cget("foreground") == "#00ffff":
#break point is present
return True
#no break point
return False
def onFrameConfigure(self, event):
'''When the frame resizes, recalculate the scroll region'''
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
def changeSelection(self, x, y):
'''Change which character is selected'''
#if there is a selected character and it is in the code range
if self.selectedX > -1 and self.selectedX < len(self.codeParts[0]) and self.selectedY > -1 and self.selectedY < len(self.codeParts):
#change background colour back to black (unselected)
self.codeParts[self.selectedY][self.selectedX].config(bg="#000000")
#none currently selected (reset both to -1)
self.selectedX = -1
self.selectedY = -1
#if the given values for selection are in the code range
if x > -1 and x < len(self.codeParts[0]) and y > -1 and y < len(self.codeParts):
#set selection values
self.selectedX = x
self.selectedY = y
#highlight the character (magenta colour)
self.codeParts[self.selectedY][self.selectedX].config(bg="#ff00ff")
class codeWindow (tkinter.Toplevel):
'''Class to hold the code window'''
def __init__(self, code, *args, **kwargs):
'''Constructor'''
tkinter.Toplevel.__init__(self, *args, **kwargs)
#set the title and default geometry
self.title("Invoke Unreactive: Code")
self.geometry("500x500")
#set the icon
self.iconbitmap(r"resources\icon.ico")
#configure rows and columns for grid and scroll bars
self.grid_rowconfigure(0,weight=10)
self.grid_rowconfigure(1,minsize=20)
self.grid_columnconfigure(0,weight=10)
self.grid_columnconfigure(1,minsize=20)
#create code grid
self.gridCode = codeGrid(self, code)
class program ():
'''Class to hold and process the instructions and related data'''
def __init__(self, code, manaWindow, outputWindow, partsOutput, phialOutput, inputEntry, inputButton, codeWindow, partsFrame, runButton, pauseButton, textOutputDebug):
'''Constructor - passes all tkinter UI parts needed and creates program data in default state'''
self.code = code
self.manaOutput = manaWindow
self.textOutput = outputWindow
self.partsOutput = partsOutput
self.phialOutput = phialOutput
self.inputEntry = inputEntry
self.inputButton = inputButton
self.codeWindow = codeWindow
self.partsFrame = partsFrame
self.textOutputDebug = textOutputDebug
#move default output to top
self.textOutput.tkraise()
#assign buttons
self.runButton = runButton
self.pauseButton = pauseButton
#debug output variables
self.debug = False
self.currentColour = 0
self.currentColourDebug = 0
#if the program is about to restart (after this pass)
self.needToRestart = False
#create mana pots
self.manaPots = [0]
#create empty phial
self.manaPhial = 0
#the currently selected pot
self.currentPot = 0
#the parts of a command currently in use
self.parts = ["", "", ""]
#where to replace the part next
self.partToReplace = 0
#if the program is running
self.running = True
#current instruction pointers
self.pointerY = 0
self.pointerX = 0
#how to change the pointers after each instruction (x,y)
self.pointerChange = [1, 0]
#if a new line is needed (has a character been printed most recently)
self.needLine = False
self.needLineDebug = False
#default button colour
self.defaultColour = self.inputButton.cget("background")
#if the program is running
self.running = False
#if a single step should execute
self.step = False
#if input is needed
self.inputNeeded = False
#if the program is finished
self.programComplete = False
#initialize state of the run and pause buttons
self.runButton.config(state = "normal")
self.pauseButton.config(state = "disabled")
#default tooltip for the command
self.tip = ToolTips.ToolTips([self.partsFrame], ["No Command"])
def runPressed (self):
'''When run is pressed toggle into run mode'''
self.running = True
self.step = False
def pausePressed (self):
'''When pause is pressed toggle into paused mode'''
self.running = False
self.step = False
def stepPressed (self):
'''When step is pressed, set variable stating a single step is needed'''
self.step = True
def whichCommand (self, parts):
'''Determine which command is being executed from the 3 parts'''
#list of valid parts
validParts = ["Q", "W", "E"]
#if there aren't 3
if len(parts) < 3:
#return no command
return -1
#if all are valid
if parts[0] in validParts and parts[1] in validParts and parts[2] in validParts:
#1st is Q
if parts[0] == "Q":
#2nd is Q
if parts[1] == "Q":
#3rd is Q
if parts[2] == "Q":
#QQQ
return 6
#3rd is W
elif parts[2] == "W":
#QQW
return 3
#3rd is E
else:
#QQE
return 2
#2nd is W
elif parts[1] == "W":
#3rd is Q
if parts[2] == "Q":
#QQW
return 3
#3rd is W
elif parts[2] == "W":
#WWQ
return 1
#3rd is E
else:
#QWE
return 9
#2nd is E
else:
#3rd is Q
if parts[2] == "Q":
#QQE
return 2
#3rd is W
elif parts[2] == "W":
#QWE
return 9
#3rd is E
else:
#EEQ
return 5
#1st is W
elif parts[0] == "W":
#2nd is Q
if parts[1] == "Q":
#3rd is Q
if parts[2] == "Q":
#QQW
return 3
#3rd is W
elif parts[2] == "W":
#WWQ
return 1
#3rd is E
else:
#QWE
return 9
#2nd is W
elif parts[1] == "W":
#3rd is Q
if parts[2] == "Q":
#WWQ
return 1
#3rd is W
elif parts[2] == "W":
#WWW
return 7
#3rd is E
else:
#WWE
return 0
#2nd is E
else:
#3rd is Q
if parts[2] == "Q":
#QWE
return 9
#3rd is W
elif parts[2] == "W":
#WWE
return 0
#3rd is E
else:
#EEW
return 4
#1st is E
else:
#2nd is Q
if parts[1] == "Q":
#3rd is Q
if parts[2] == "Q":
#QQE
return 2
#3rd is W
elif parts[2] == "W":
#QWE
return 9
#3rd is E
else:
#EEQ
return 5
#2nd is W
elif parts[1] == "W":
#3rd is Q
if parts[2] == "Q":
#QWE
return 9
#3rd is W
elif parts[2] == "W":
#WWE
return 0
#3rd is E
else:
#EEW
return 4
#2nd is E
else:
#3rd is Q
if parts[2] == "Q":
#EEQ
return 5
#3rd is W
elif parts[2] == "W":
#EEW
return 4
#3rd is E
else:
#EEE
return 8
#if no other command was found
return -1
def addMana (self, pots, amount, addPosition):
'''Add mana to a specific pot - with overflow'''
#if addPosition is within pots
if addPosition < len(pots) and addPosition > -1:
#get the current pot amount
currentPotAmount = pots[addPosition]
#if there will not be overflow
if currentPotAmount + amount <= 255:
#add to the pot
pots[addPosition] = pots[addPosition] + amount
else:
#fill the current pot
#excess is lost
pots[addPosition] = 255
#return the updated pots
return pots
def writeToText (self, data, colour):
'''Write text to the default output'''
#create the tag name
tag = "colour" + str(self.currentColour)
#configure tag
self.textOutput.tag_configure(tag, foreground = colour)
#increment colour counter
self.currentColour = self.currentColour + 1
#activate text output
self.textOutput.config(state="normal")
#write line in tagged colour
self.textOutput.insert(END, data, tag)
#deactivate text output - prevents user typing
self.textOutput.config(state="disabled")
#move to bottom of output
self.textOutput.yview_moveto(1)
def writeToTextDebug (self, data, colour):
'''Write text to debug output window'''
#create debug tag name
tag = "colour" + str(self.currentColourDebug)
#configure tag
self.textOutputDebug.tag_configure(tag, foreground = colour)
#increment debug colour counter
self.currentColourDebug = self.currentColourDebug + 1
#activate debug output
self.textOutputDebug.config(state="normal")
#write line in tagged colour
self.textOutputDebug.insert(END, data, tag)
#deactivate debug output - prevents user typing
self.textOutputDebug.config(state="disabled")
#move to bottom of debug output
self.textOutputDebug.yview_moveto(1)
def outputInt(self, value):
'''Output an integer to the console'''
#create message
message = str(value) + "\n"
#if a new line is needed (after char output)
if self.needLine:
#add new line to front of message
message = "\n" + message
#a new line is not needed next
self.needLine = False
#write the message
self.writeToText(message, "#000000")
#duplication of above but for debug output (prevents extra new lines where not necessary)
message = str(value) + "\n"
if self.needLineDebug:
message = "\n" + message
self.needLineDebug = False
self.writeToTextDebug(message, "#000000")
def outputChar(self, value):
'''Output a single character to both outputs'''
self.writeToText(str(chr(value)), "#000000")
self.writeToTextDebug(str(chr(value)), "#000000")
def outputDebug(self, message):
'''Output a string to the debug output only (in debug colour too)'''
#create message
message = str(message) + "\n"
#if a new line is needed
if self.needLineDebug:
#add new line
message = "\n" + message
#another line is not yet needed (for next time)
self.needLineDebug = False
#write the message to the debug output
self.writeToTextDebug(message, "#0000ff")
def outputInput(self, data, char):
'''Output an input event to the outputs'''
#create message
message = "Input > " + str(data)
#if it was a single character
if char:
#add integer representation to message
message = message + " (" + str(ord(data)) + ")"
#add new line to end of message
message = message + "\n"
#if another new line is needed (on the front)
if self.needLine:
#add new line
message = "\n" + message
#another new line is not needed next time
self.needLine = False
#write the message to the output
self.writeToText(message, "#00ff00")
#duplicate of above but for debug output (prevents excess or missing new lines)
message = "Input > " + str(data)
if char:
message = message + " (" + str(ord(data)) + ")"
message = message + "\n"
if self.needLineDebug:
message = "\n" + message
self.needLineDebug
self.writeToTextDebug(message, "#00ff00")
def endMessage(self):
'''Add the message to both outputs that the program has terminated'''
#create message
message = "Execution complete\n"
#if a new line is needed
if self.needLine:
#add new line
message = "\n" + message
self.needLine = False
#write message to the output
self.writeToText(message, "#ff0000")
#duplicate of above but for debug output
message = "Execution complete\n"
if self.needLineDebug:
message = "\n" + message
self.needLineDebug = False
self.writeToTextDebug(message, "#ff0000")
def movePointer(self):
'''Move the pointer on the code grid'''
try:
#attempt (in case user closed the grid)
#change selected characer
self.codeWindow.changeSelection(self.pointerX, self.pointerY)
except:
#do nothing as code window was closed so no action needed
pass
def inputGiven (self, value):
'''Check if an input is valid and process it'''
number = -1
try:
#attempt convert to int
value = int(value)
#if it is a positive number
if value >= 0:
#use the value
number = value
#output that an integer was input
self.outputInput(number, False)
except:
#if it isn't a number
#if it has a length of 1
if len(value) == 1:
#use the unicode value for it (mod 256, so it is in ascii range)
number = ord(value) % 256
#output that a character was input
self.outputInput(value, True)
#if a valid input was given
if number >= 0:
#disable input button
self.inputButton.config(state="disabled")
#empty entry box
self.inputEntry.delete(0, END)
#call for it to be added to the pot
self.manaPots = self.addMana(self.manaPots, number, self.currentPot)
#move pointer to next position
self.pointerX = self.pointerX + self.pointerChange[0]
self.pointerY = self.pointerY + self.pointerChange[1]
#visually move the pointer
self.movePointer()
try:
#if a break point was reached
if self.codeWindow.checkBreakPoint(self.pointerX, self.pointerY):
#stop running
self.running = False
#switch buttons to paused
self.pauseButton.config(state = "disabled")
self.runButton.config(state = "normal")
#debug output
self.outputDebug("Breakpoint reached, pausing")
except:
pass
#input is no longer needed
self.inputNeeded = False
def singleExecute (self):
'''Run a single command'''
returnValue = 0
#if the program is running or stepping - not awaiting input and not complete
if (self.running or self.step) and not self.inputNeeded and not self.programComplete:
#if stepping
if self.step:
#wait for next button press
self.step = False
#valid code parts
validParts = ["Q", "W", "E"]
#other commands
otherCommands = ["I", "R", ">", "<", "^", "V"]
#wrap the pointer if it goes off the right side
if self.pointerX >= len(self.code[0]):
self.pointerX = 0
#wrap the pointer if it goes off the left side
if self.pointerX < 0:
self.pointerX = len(self.code[0]) - 1
#wrap the pointer if it goes off the bottom
if self.pointerY >= len(self.code):
self.pointerY = 0
#wrap the pointer if it goes off the top
if self.pointerY < 0:
self.pointerY = len(self.code) - 1
#the amount the pointer is being boosted by (jumps)
pointerBoost = [0,0]
#the currently selected character/instruction
currentChar = self.code[self.pointerY][self.pointerX]
#if it is a command or code character
if currentChar in validParts or currentChar in otherCommands:
#if the character is the invoke command
if currentChar == "I":
#get which command the code makes up
command = self.whichCommand(self.parts)
if command == 0:
#right 1 pot
#increase selected pot
self.currentPot = self.currentPot + 1
#if this exceeds where there are currently pots
if self.currentPot >= len(self.manaPots):
#add a new empty pot
self.manaPots.append(0)
self.manaOutput.addPot()
#select the correct pot
self.manaOutput.changeSelected(self.currentPot)
self.outputDebug("Moving right 1 pot")
elif command == 1:
#left 1 pot
#move to the left
self.currentPot = self.currentPot - 1
#if at the left wall
if self.currentPot < 0:
#stay on first pot
self.currentPot = 0
#select the correct pot
self.manaOutput.changeSelected(self.currentPot)
self.outputDebug("Moving left 1 pot")
elif command == 2:
#add 1 to current pot
#call for 1 mana to be added to the current pot
self.manaPots = self.addMana(self.manaPots, 1, self.currentPot)
self.outputDebug("Adding 1 to pot " + str(self.currentPot))
elif command == 3:
#remove 1 from current pot
#if there is mana in the pot
if self.manaPots[self.currentPot] > 0:
#take 1 mana from the pot
self.manaPots[self.currentPot] = self.manaPots[self.currentPot] - 1
self.outputDebug("Taking 1 from pot " + str(self.currentPot))
elif command == 4:
#output value of current pot
#output the number
self.outputDebug("Outputting pot " + str(self.currentPot) + " value")
self.outputInt(self.manaPots[self.currentPot])
elif command == 5:
#output character of current pot
#output character
self.outputDebug("Outputting pot " + str(self.currentPot) + " character")
self.outputChar(self.manaPots[self.currentPot])
#a new line is needed when a number is printed
self.needLine = True
self.needLineDebug = True
elif command == 6:
#Input and add to current pot
#get amount to add
self.inputNeeded = True
self.inputButton.config(state="normal")
#focus the input
self.inputEntry.focus_set()
self.outputDebug("Getting user input (store value or character value in pot " + str(self.currentPot)+ ")")
elif command == 7:
#draw as much as possible to fill phial
#get the maximum amount that can be taken
maximum = 511 - self.manaPhial
#if the maximum is greater than or equal to the amount in the current pot
if maximum >= self.manaPots[self.currentPot]:
#take all into phial
self.manaPhial = self.manaPhial + self.manaPots[self.currentPot]
#set pot to empty
self.manaPots[self.currentPot] = 0
else:
#fill phial
self.manaPhial = 511
#remove maximum from current pot
self.manaPots[self.currentPot] = self.manaPots[self.currentPot] - maximum
self.outputDebug("Drawing from " + str(self.currentPot) + " to fill phial")
elif command == 8:
#pour all from phial into pot
#add the amount in the phial to the current pot
self.manaPots = self.addMana(self.manaPots, self.manaPhial, self.currentPot)
#empty phial
self.manaPhial = 0
self.outputDebug("Pouring phial into pot " + str(self.currentPot))
elif command == 9:
#stop
self.programComplete = True
self.outputDebug("Program Stop")
elif currentChar == "R":
#revoke - jump if empty pot
#if the current pot is empty
if self.manaPots[self.currentPot] == 0:
#move the pointer 1 further in it's current direction
pointerBoost[0] = self.pointerChange[0]
pointerBoost[1] = self.pointerChange[1]
self.outputDebug("Pot " + str(self.currentPot) +" is empty, jumping over")
else:
self.outputDebug("Pot " + str(self.currentPot) +" is not empty ("+ str(self.manaPots[self.currentPot]) +"), no jump")
elif currentChar == ">":
#switch to move right
self.pointerChange = [1, 0]
self.outputDebug("Moving right")
elif currentChar == "<":
#switch to move left
self.pointerChange = [-1, 0]
self.outputDebug("Moving left")
elif currentChar == "V":
#switch to move down
self.pointerChange = [0, 1]
self.outputDebug("Moving down")
elif currentChar == "^":
#switch to move up
self.pointerChange = [0, -1]
self.outputDebug("Moving up")
elif currentChar in validParts:
#it is Q,W or E
#replace the next part to replace whith this character
self.parts[self.partToReplace] = currentChar
#move to replace on one
self.partToReplace = self.partToReplace + 1
#if it is beyond the end
if self.partToReplace > 2:
#move back to start
self.partToReplace = 0
else:
returnValue = 1
#adjust instruction pointer
if not self.inputNeeded and not self.programComplete:
self.pointerX = self.pointerX + self.pointerChange[0] + pointerBoost[0]
self.pointerY = self.pointerY + self.pointerChange[1] + pointerBoost[1]
self.movePointer()
#button colours for parts
quasColour = "#103be8"
wexColour = "#e810ae"
exortColour = "#dd8006"
colours = []
#fill list with colours based on parts
for i in self.parts:
if i == "Q":
colours.append(quasColour)
elif i == "W":
colours.append(wexColour)
elif i == "E":
colours.append(exortColour)
else:
colours.append(self.defaultColour)
#iterate for the parts
for i in range(0, 3):
#set the colours and text
self.partsOutput[i].config(text=self.parts[i], bg=colours[i])
#set the phical output
self.phialOutput.config(text=self.manaPhial)
#iterate for the pots
for i in range(0, len(self.manaPots)):
#set the pot values
self.manaOutput.changeValue(i, self.manaPots[i])
#get the command for the currently set parts
comm = self.whichCommand(self.parts)
#set the tooltip to be correct for the command
if comm == -1:
self.tip
self.tip.tooltip_text = ["No Command"]
elif comm == 0:
self.tip.tooltip_text = ["Move right one pot"]
elif comm == 1:
self.tip.tooltip_text = ["Move left one pot"]
elif comm == 2:
self.tip.tooltip_text = ["Add 1 mana to current pot"]
elif comm == 3:
self.tip.tooltip_text = ["Take 1 mana from current pot"]
elif comm == 4:
self.tip.tooltip_text = ["Output number in current pot"]
elif comm == 5:
self.tip.tooltip_text = ["Output character of current pot"]
elif comm == 6:
self.tip.tooltip_text = ["Input value and add that mana to current pot"]
elif comm == 7:
self.tip.tooltip_text = ["Draw as much mana as possible from current pot to fill phial"]
elif comm == 8:
self.tip.tooltip_text = ["Pour all of phial into current pot"]
elif comm == 9:
self.tip.tooltip_text = ["Stop program"]
try:
#attempt to check if there is a break point
if self.codeWindow.checkBreakPoint(self.pointerX, self.pointerY):
#pause if there is
self.running = False
self.pauseButton.config(state = "disabled")
self.runButton.config(state = "normal")
#debug output
self.outputDebug("Breakpoint reached, pausing")
except:
#failed - code window is closed - don't do anything
pass
#if the program finished
if self.programComplete:
#output the end message
self.endMessage()
#-1 - means program ended
return -1
#return the default return value 0 - normal character, other - blank
return returnValue
def openMenu (root):
'''Create and run the default menu - to input the file path'''
#set the title and geometry
root.title("Invoke: File Open")
root.geometry("300x30")
#setup the grid
root.grid_rowconfigure(0, minsize=300)
root.grid_columnconfigure(0, minsize=30)
#add a frame to hold the input
mainFrame = tkinter.Frame(root)
mainFrame.grid(row=0, column=0, sticky=(N,E,S,W))
#create a stringvar to hold the path
path = tkinter.StringVar()
def convertToValid (fileInput):
'''Convert file data to array of valid characters only'''
validChars = ["Q", "W", "E", "R", "I", ">", "<", "V", "^"]
#convert to upper case
fileInput = fileInput.upper()
#create array to hold data in
data = []
#split into lines
lines = fileInput.split("\n")
#to store the max length
maxLength = -1
#iterate lines
for line in lines:
#if this line is longer than the current maximum
if len(line) > maxLength:
#update the maximum
maxLength = len(line)
#iterate lines
for line in lines:
#create list to hold the data for this line
lineData = []
#iteracte characters
for char in line:
#if it is a valid character
if char in validChars:
#add it to the list
lineData.append(char)
else:
#add a blank
lineData.append(" ")
#iterate for extra items that are needed
for i in range(maxLength - len(lineData)):
#add blanks to fill to square
lineData.append(" ")
#add line to data
data.append(lineData)
#return the array
return data
def load (pathString):
#initialize data as none - if no file can be obtained it won't proceed
fileData = None
#check for correct extension
if pathString.endswith(".inv"):
try:
#attempt to open and read the file
invokeFile = open(pathString, "r")
#store data in variable
fileData = invokeFile.read()
#close the file
invokeFile.close()
except:
#if something went wrong the file wasn't found
return None
return fileData
def attemptLoad ():
'''Load has been pressed so attempt a load'''
#get the path
pathData = path.get()
#load the files data
data = load(pathData)
#if there is some data
if data != None:
#convert to valid grid
data = convertToValid(data)
#if there is data to run
if len(data) > 0:
if len(data[0]) > 0:
#quit the path input menu
mainFrame.quit()
#open the main program
openProgram(root, data)
#once the program returns quit the main - prevents hanging
root.quit()
#setup the frame grid - part for entry, part for button
mainFrame.grid_rowconfigure(0, minsize=30)
mainFrame.grid_columnconfigure(0, minsize=240)
mainFrame.grid_columnconfigure(1, minsize=60)
#create entry widget (assing path stringvar)
fileInput = tkinter.Entry(mainFrame, bd=5, textvariable=path)
#locate the entry in the grid
fileInput.grid(row=0, column=0, sticky=(N,E,S,W))
#create button which calls an attempt load when pressed
invokeButton = tkinter.Button(mainFrame, bd=5, text="Load", command=attemptLoad)
#locate the button in the grid
invokeButton.grid(row=0, column=1, sticky=(N,E,S,W))
#move the focus to the entry at the start
fileInput.focus_set()
#begin the update loop of the UI
root.mainloop()
def openProgram(root, code):
'''Create the interface for the main program'''
def quitProgram():
'''End the program completely'''
root.destroy()
#create window to store the UI in
programWindow = tkinter.Frame(root)
#locate the program window into the root
programWindow.grid(row=0, column=0, sticky=(N,E,S,W))
#set the title and geometry for the window
root.title("Invoke Unreactive: Program")
root.geometry("600x450")
#set up the rows to hold all the UI
programWindow.grid_rowconfigure(0,minsize=90)
programWindow.grid_rowconfigure(1,minsize=5)
programWindow.grid_rowconfigure(2,minsize=60)
programWindow.grid_rowconfigure(3,minsize=5)
programWindow.grid_rowconfigure(4,minsize=30)
programWindow.grid_rowconfigure(5,minsize=5)
programWindow.grid_rowconfigure(6,minsize=255)
programWindow.grid_columnconfigure(0,minsize=600)
#create frame to hold the memory scroll area
memoryFrame = tkinter.Frame(programWindow)
memoryFrame.grid(row=0, column=0, sticky=(N,E,S,W))
#setup the grid in the memory frame to hold the scroll area and scroll bar
memoryFrame.grid_rowconfigure(0,minsize=60)
memoryFrame.grid_rowconfigure(1,minsize=15)
memoryFrame.grid_columnconfigure(0,minsize=600)
#ceate the scroll object
memoryScroll = manaScroll(memoryFrame)
#frame to hold the information about the phial and parts
infoFrame = tkinter.Frame(programWindow)
infoFrame.grid(row=2, column=0, sticky=(N,E,S,W))
#setup columns in the info frame
infoFrame.grid_rowconfigure(0, minsize=70)
infoFrame.grid_columnconfigure(0,minsize=210)
infoFrame.grid_columnconfigure(1,minsize=320)
infoFrame.grid_columnconfigure(2,minsize=70)
#create frame to hold the parts
partsFrame = tkinter.Frame(infoFrame)
partsFrame.grid(row=0, column=0, sticky=(N,E,S,W))
#setup the columns for each of the parts in the part holder
partsFrame.grid_rowconfigure(0, minsize=70)
partsFrame.grid_columnconfigure(0, minsize=70)
partsFrame.grid_columnconfigure(1, minsize=70)
partsFrame.grid_columnconfigure(2, minsize=70)
#get the image of the phial
phialImage = tkinter.PhotoImage(file="resources/phial.png")
#setup the parts and phial outputs
part1 = tkinter.Button(partsFrame,bd=5,disabledforeground="#000000",text="",state="disabled")
part2 = tkinter.Button(partsFrame,bd=5,disabledforeground="#000000",text="",state="disabled")
part3 = tkinter.Button(partsFrame,bd=5,disabledforeground="#000000",text="",state="disabled")
phial = tkinter.Button(infoFrame,bd=5,disabledforeground="#000000",image=phialImage,text="0",state="disabled",compound="bottom")
#locate parts and phial into the grid
part1.grid(row=0, column=0, sticky=(N,E,S,W))
part2.grid(row=0, column=1, sticky=(N,E,S,W))
part3.grid(row=0, column=2, sticky=(N,E,S,W))
phial.grid(row=0, column=2, sticky=(N,E,S,W))
#store the parts in a list
partsOutput = [part1, part2, part3]
#create frame to hold the inputs
infoInput = tkinter.Frame(infoFrame)
infoInput.grid(row=0, column=1, sticky=(N,E,S,W))
#configure input grid
infoInput.grid_rowconfigure(0,minsize=30)
infoInput.grid_rowconfigure(1,minsize=30)
infoInput.grid_columnconfigure(0,minsize=210)
infoInput.grid_columnconfigure(1,minsize=110)
#variables to hold input and debug
inputText = tkinter.StringVar()
debug = tkinter.IntVar()
#create entry, input button and debug check box
entryInput = tkinter.Entry(infoInput, bd=5, textvariable=inputText)
entryInputButton = tkinter.Button(infoInput, bd=5, text="Input", state="disabled")
debugCheck = tkinter.Checkbutton(infoInput, text="Debug Output", variable=debug, offvalue=0, onvalue=1)
#locate input elements
entryInput.grid(row=0,column=0,sticky=(N,E,S,W))
entryInputButton.grid(row=0,column=1,sticky=(N,E,S,W))
debugCheck.grid(row=1,column=0,sticky=(N,E,S,W))
#create a frame to hold the controls
controlsFrame = tkinter.Frame(programWindow)
controlsFrame.grid(row=4, column=0, sticky=(N,E,S,W))
#setup controls grid - for each of the buttons
controlsFrame.grid_rowconfigure(0,minsize=30)
controlsFrame.grid_columnconfigure(0,minsize=120)
controlsFrame.grid_columnconfigure(1,minsize=120)
controlsFrame.grid_columnconfigure(2,minsize=120)
controlsFrame.grid_columnconfigure(3,minsize=120)
controlsFrame.grid_columnconfigure(4,minsize=120)
#create all the control buttons
runButton = tkinter.Button(controlsFrame,text="Run")
pauseButton = tkinter.Button(controlsFrame,text="Pause",state="disabled")
stepButton = tkinter.Button(controlsFrame,text="Step")
restartButton = tkinter.Button(controlsFrame,text="Restart")
endButton = tkinter.Button(controlsFrame,text="Exit", command=quitProgram)
#locate the control buttons
runButton.grid(row=0,column=0,sticky=(N,E,S,W))
pauseButton.grid(row=0,column=1,sticky=(N,E,S,W))
stepButton.grid(row=0,column=2,sticky=(N,E,S,W))
restartButton.grid(row=0,column=3,sticky=(N,E,S,W))
endButton.grid(row=0,column=4,sticky=(N,E,S,W))
#create and locate holder for the output
outputFrame = tkinter.Frame(programWindow)
outputFrame.grid(row=6, column=0, sticky=(N,E,S,W))
#create grid for output holder
outputFrame.grid_rowconfigure(0, minsize=240)
outputFrame.grid_columnconfigure(0, minsize=600)
#do not allow internal components to change the frames size
outputFrame.grid_propagate(False)
#create the normal output holder
outputNormalHolder = tkinter.Frame(outputFrame)
#locate holder
outputNormalHolder.grid(row = 0, column = 0, sticky = (N,E,S,W))
#configure grid of holder
outputNormalHolder.grid_rowconfigure(0, minsize=240)
outputNormalHolder.grid_columnconfigure(0, minsize=600)
#create the debug output holder
outputDebugHolder = tkinter.Frame(outputFrame)
#locate holder
outputDebugHolder.grid(row = 0, column = 0, sticky = (N,E,S,W))
#configure grid of holder
outputDebugHolder.grid_rowconfigure(0, minsize=240)
outputDebugHolder.grid_columnconfigure(0, minsize=600)
#create and locate normal scroll text output
textOutput = scrolledtext.ScrolledText(outputNormalHolder, wrap=WORD, width=0, height=0, state="disabled")
textOutput.grid(row=0, column=0, sticky=(N,E,S,W))
#create and locate debug scroll text output
textOutputDebug = scrolledtext.ScrolledText(outputDebugHolder, wrap=WORD, width=0, height=0, state="disabled")
textOutputDebug.grid(row=0, column=0, sticky=(N,E,S,W))
#move normal output to the top
outputNormalHolder.tkraise()
#create the program display window
programWindowOutput = codeWindow(code)
#create the program controller - passing all necessary components
programControl = program(code, memoryScroll, textOutput, partsOutput, phial, entryInput, entryInputButton, programWindowOutput.gridCode, partsFrame, runButton, pauseButton, textOutputDebug)
def runButtonPressed ():
'''Run the program'''
runButton.config(state="disabled")
pauseButton.config(state="normal")
programControl.runPressed()
def pauseButtonPressed ():
'''Pause the program'''
runButton.config(state="normal")
pauseButton.config(state="disabled")
programControl.pausePressed()
def inputButtonPressed ():
'''Attempt input of data from entry widget'''
programControl.inputGiven(inputText.get())
def stepButtonPressed ():
'''Perform a single step'''
programControl.stepPressed()
def debugChecked ():
'''Invert the debug state'''
#if debug is on
if debug.get() == 1:
#change the variable in the program controller
programControl.debug = True
#move the debug output to the top
outputDebugHolder.tkraise()
#scroll to same position in debug as in normal
textOutputDebug.yview_moveto(textOutput.yview()[0])
else:
#change the variable in the program controller
programControl.debug = False
#move the normal output to the top
outputNormalHolder.tkraise()
#scroll to the same position in normal as in debug
textOutput.yview_moveto(textOutputDebug.yview()[0])
def restart(programControl, programWindowOutput):
'''Reload the program'''
#destroy the program controller
del programControl
#destroy the program windows
programWindow.destroy()
programWindowOutput.destroy()
#recreate the programs
openProgram(root, code)
def restartPressed():
'''When restart is pressed turn flag on in the program so at the end of next execution it restarts'''
programControl.needToRestart = True
#if the program is over - restart now
if programControl.programComplete:
restart(programControl, programWindowOutput)
#bind control buttons to associated functions
runButton.config(command=runButtonPressed)
pauseButton.config(command=pauseButtonPressed)
entryInputButton.config(command=inputButtonPressed)
stepButton.config(command=stepButtonPressed)
debugCheck.config(command=debugChecked)
restartButton.config(command=restartPressed)
def runningLoop ():
'''Repeats while the program is running'''
#run the next instruction (if possible)
#will check if running or stepping or inputting first
value = programControl.singleExecute()
#if restart is required
if programControl.needToRestart:
#restart the program
restart(programControl, programWindowOutput)
#if the program hasn't ended and doesn't need to restart
if value != -1 and not programControl.needToRestart:
#normal case
if value == 0:
#repeat with normal delay
root.after(150, runningLoop)
#no code executed (blank)
else:
#repeat with short delay
root.after(30, runningLoop)
#start the running loop
runningLoop()
#start the UI update loop
root.mainloop()
#create the program root
root = tkinter.Tk()
#set the icon for the window
root.iconbitmap(r"resources\icon.ico")
#cannot resize the window
root.resizable(False, False)
#start the program by opening the load menu
openMenu(root)
```
|
{
"source": "Jemie-Wang/ECE5725_Project-Reptile_Monitoring_System",
"score": 3
}
|
#### File: ECE5725_Project-Reptile_Monitoring_System/logSensor/logDHT.py
```python
import time
import sqlite3
import board
import adafruit_dht
import RPi.GPIO as GPIO
import os
dbname='../sensorData.db'
sampleFreq = 10 # time in seconds
dhtDevice = adafruit_dht.DHT11(board.D6)
# get data from DHT sensor
def getDHTdata():
# Print the values to the serial port
temp=None
hum =None
try:
temp = dhtDevice.temperature
hum = dhtDevice.humidity
if hum is not None and temp is not None:
hum = round(hum)
temp = round(temp, 1)
print(
"Temp: {:.1f} C Humidity: {}% ".format(
temp, hum
)
)
if temp>25 or hum<5:
os.system('python /home/pi/project/logSensor/textMessage/sensorText.py')
except RuntimeError as error:
# Errors happen fairly often, DHT's are hard to read, just keep going
time.sleep(2.0)
except Exception as error:
dhtDevice.exit()
raise error
except OverflowError as error:
print("meet error"+ str(error))
return temp, hum
# log sensor data on database
def logData (temp, hum):
conn=sqlite3.connect(dbname)
curs=conn.cursor()
curs.execute("INSERT INTO DHT_data values(datetime('now','localtime'), (?), (?))", (temp, hum))
conn.commit()
conn.close()
# main function
def main():
while True:
temp, hum = getDHTdata()
if temp is None or hum is None:
#print("The DHT failed to work!!!!!!!!!")
continue
logData (temp, hum)
time.sleep(sampleFreq)
# ------------ Execute program
main()
GPIO.cleanup()
```
|
{
"source": "jemik/tm-v1-api-cookbook",
"score": 2
}
|
#### File: check-incident-details/python/check_incident_details.py
```python
import argparse
import datetime
import json
import os
import requests
# default settings
V1_TOKEN = os.environ.get('TMV1_TOKEN', '')
V1_URL = os.environ.get('TMV1_URL', 'https://api.xdr.trendmicro.com')
def check_datetime_aware(d):
return (d.tzinfo is not None) and (d.tzinfo.utcoffset(d) is not None)
class TmV1Client:
base_url_default = V1_URL
WB_STATUS_IN_PROGRESS = 1
def __init__(self, token, base_url=None):
if not token:
raise ValueError('Authentication token missing')
self.token = token
self.base_url = base_url or TmV1Client.base_url_default
def make_headers(self):
return {
'Authorization': 'Bearer ' + self.token,
'Content-Type': 'application/json;charset=utf-8'
}
def get(self, path, **kwargs):
kwargs.setdefault('headers', {}).update(self.make_headers())
r = requests.get(self.base_url + path, **kwargs)
if ((200 == r.status_code)
and ('application/json' in r.headers.get('Content-Type', ''))):
return r.json()
raise RuntimeError(f'Request unsuccessful (GET {path}):'
f' {r.status_code} {r.text}')
def put(self, path, **kwargs):
kwargs.setdefault('headers', {}).update(self.make_headers())
r = requests.put(self.base_url + path, **kwargs)
if ((200 == r.status_code)
and ('application/json' in r.headers.get('Content-Type', ''))):
return r.json()
raise RuntimeError(f'Request unsuccessful (PUT {path}):'
f' {r.status_code} {r.text}')
def get_workbench_histories(self, start, end, offset=None, size=None):
if not check_datetime_aware(start):
start = start.astimezone()
if not check_datetime_aware(end):
end = end.astimezone()
start = start.astimezone(datetime.timezone.utc)
end = end.astimezone(datetime.timezone.utc)
start = start.isoformat(timespec='milliseconds').replace('+00:00', 'Z')
end = end.isoformat(timespec='milliseconds').replace('+00:00', 'Z')
# API returns data in the range of [offset, offset+limit)
return self.get(
'/v2.0/xdr/workbench/workbenchHistories',
params=dict([('startTime', start), ('endTime', end),
('sortBy', '-createdTime')]
+ ([('offset', offset)] if offset is not None else [])
+ ([('limit', size)] if size is not None else [])
))['data']['workbenchRecords']
def update_workbench(self, workbench_id, status):
return self.put(
f'/v2.0/xdr/workbench/workbenches/{workbench_id}',
json={'investigationStatus': status})
def fetch_workbench_alerts(v1, start, end):
"""
This function do the loop to get all workbench alerts by changing
the parameters of both 'offset' and 'size'.
"""
offset = 0
size = 100
alerts = []
while True:
gotten = v1.get_workbench_histories(start, end, offset, size)
if not gotten:
break
print(f'Workbench alerts ({offset} {offset+size}): {len(gotten)}')
alerts.extend(gotten)
offset = len(alerts)
return alerts
def main(start, end, days, v1_token, v1_url):
if end is None:
end = datetime.datetime.now(datetime.timezone.utc)
else:
end = datetime.datetime.fromisoformat(end)
if start is None:
start = end + datetime.timedelta(days=-days)
else:
start = datetime.datetime.fromisoformat(start)
v1 = TmV1Client(v1_token, v1_url)
wb_records = fetch_workbench_alerts(v1, start, end)
if wb_records:
print('')
print('Target Workbench alerts:')
print(json.dumps([x['workbenchId'] for x in wb_records], indent=2))
print('')
records_list = []
for record in wb_records:
wb_id = record['workbenchId']
records_list.append(record)
if record['investigationStatus'] == 0:
v1.update_workbench(wb_id, TmV1Client.WB_STATUS_IN_PROGRESS)
print('Details of target Workbench alerts:')
print(json.dumps(records_list, indent=2))
else:
print('No Workbench alerts found')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Modify alert status after checking alert details',
epilog=(f'Example: python {os.path.basename(__file__)} '
'-e 2021-04-12T14:28:00.123456+00:00 -d 5'))
parser.add_argument(
'-s', '--start',
help=('Timestamp in ISO 8601 format that indicates the start of'
' the data retrieval time range'))
parser.add_argument(
'-e', '--end',
help=('Timestamp in ISO 8601 format that indicates the end of the data'
' retrieval time range. The default value is the current time'))
parser.add_argument(
'-d', '--days', type=int, default=5,
help=('Number of days before the end time of the data retrieval'
' time range. The default value is 5.'))
parser.add_argument(
'-t', '--v1-token', default=V1_TOKEN,
help=('Authentication token of your Trend Micro Vision One'
' user account'))
parser.add_argument(
'-r', '--v1-url', default=TmV1Client.base_url_default,
help=('URL of the Trend Micro Vision One server for your region.'
f' The default value is "{TmV1Client.base_url_default}"'))
main(**vars(parser.parse_args()))
```
|
{
"source": "jemil-butt/kernel_inference",
"score": 3
}
|
#### File: kernel_inference/Figures/Comparison_table.py
```python
import sys
sys.path.append("..")
import numpy as np
import numpy.linalg as lina
import matplotlib.pyplot as plt
import scipy as sc
import scipy.optimize as scopt
plt.rcParams.update({'font.size': 6})
K_nondiag_mercer_1=np.load("../Data/K_nondiag_Mercer_1.npy")
K_nondiag_mercer_2=np.load("../Data/K_nondiag_Mercer_1.npy")
# ii) Auxiliary quantities
n=300
n_sample=5
n_simu=1
n_test=1000
t=np.linspace(0,1,n)
sample_index=np.round(np.linspace(n/4,3*n/4,n_sample))
t_sample=t[sample_index.astype(int)]
np.random.seed(0)
tol=10**(-6)
"""
2. Create covariance matrices
"""
# i) Define parameters
d_sqexp=0.2
d_exp=0.5
n_exp_Bochner=10
# ii) Create covariance functions
def cov_fun_sqexp_true(t1,t2):
return np.exp(-(lina.norm(t1-t2)/d_sqexp)**2)
def cov_fun_exp_true(t1,t2):
return np.exp(-(lina.norm(t1-t2)/d_exp))
def cov_fun_bb_true(t1,t2):
return np.min((t1,t2))-t1*t2
# iii) Assemble matrices
K_sqexp=np.zeros([n,n])
K_exp=np.zeros([n,n])
K_bb=np.zeros([n,n])
for k in range(n):
for l in range(n):
K_sqexp[k,l]=cov_fun_sqexp_true(t[k],t[l])
K_exp[k,l]=cov_fun_exp_true(t[k],t[l])
K_bb[k,l]=cov_fun_bb_true(t[k],t[l])
"""
3. Simulation for model training -----------------------------------------
"""
# i) Initialization
x_simu_sqexp=np.zeros([n,n_simu])
x_simu_exp=np.zeros([n,n_simu])
x_simu_bb=np.zeros([n,n_simu])
x_simu_ndm_1=np.zeros([n,n_simu])
x_simu_ndm_2=np.zeros([n,n_simu])
# ii) Simulate
for k in range(n_simu):
x_simu_sqexp[:,k]=np.random.multivariate_normal(np.zeros([n]),K_sqexp)
x_simu_exp[:,k]=np.random.multivariate_normal(np.zeros([n]),K_exp)
x_simu_bb[:,k]=np.random.multivariate_normal(np.zeros([n]),K_bb)
x_simu_ndm_1[:,k]=np.random.multivariate_normal(np.zeros([n]),K_nondiag_mercer_1)
x_simu_ndm_2[:,k]=np.random.multivariate_normal(np.zeros([n]),K_nondiag_mercer_2)
# iii) Empirical covariances
K_emp_sqexp=(1/n_simu)*x_simu_sqexp@x_simu_sqexp.T
K_emp_exp=(1/n_simu)*x_simu_exp@x_simu_exp.T
K_emp_bb=(1/n_simu)*x_simu_bb@x_simu_bb.T
K_emp_ndm_1=(1/n_simu)*x_simu_ndm_1@x_simu_ndm_1.T
K_emp_ndm_2=(1/n_simu)*x_simu_ndm_2@x_simu_ndm_2.T
"""
4. Infer best model parameters: Parametric -------------------------------
"""
# i) Create empirical correlogram
Dist_mat=np.zeros([n,n])
for k in range(n):
for l in range(n):
Dist_mat[k,l]=np.abs(t[k]-t[l])
t_diff=1*t
n_t_diff=np.zeros([n,1])
correlogram_sqexp=np.zeros([n,1])
correlogram_exp=np.zeros([n,1])
correlogram_bb=np.zeros([n,1])
correlogram_ndm_1=np.zeros([n,1])
correlogram_ndm_2=np.zeros([n,1])
for k in range(n):
Ind_mat=np.isclose(Dist_mat,t[k])
n_t_diff[k]=np.sum(Ind_mat)
correlogram_sqexp[k]=np.sum(K_emp_sqexp[Ind_mat])/n_t_diff[k]
correlogram_exp[k]=np.sum(K_emp_exp[Ind_mat])/n_t_diff[k]
correlogram_bb[k]=np.sum(K_emp_bb[Ind_mat])/n_t_diff[k]
correlogram_ndm_1[k]=np.sum(K_emp_ndm_1[Ind_mat])/n_t_diff[k]
correlogram_ndm_2[k]=np.sum(K_emp_ndm_2[Ind_mat])/n_t_diff[k]
# ii) Define model covariances
def cov_fun_sqexp_model(t1,t2,sigma,d):
return sigma*np.exp(-(lina.norm(t1-t2)/d)**2)
def cov_fun_exp_model(t1,t2,sigma,d):
return sigma*np.exp(-(lina.norm(t1-t2)/d))
# iii) Define objective functions
# First: sqexp dataset
def sqexp_sqexp_objective(x):
cov_predicted=np.zeros([n,1])
for k in range(n):
cov_predicted[k]=cov_fun_sqexp_model(t[0],t[k],x[0],x[1])
RMSE=(1/n)*lina.norm(cov_predicted-correlogram_sqexp)
return RMSE
def exp_sqexp_objective(x):
cov_predicted=np.zeros([n,1])
for k in range(n):
cov_predicted[k]=cov_fun_exp_model(t[0],t[k],x[0],x[1])
RMSE=(1/n)*lina.norm(cov_predicted-correlogram_sqexp)
return RMSE
# Second: exp dataset
def sqexp_exp_objective(x):
cov_predicted=np.zeros([n,1])
for k in range(n):
cov_predicted[k]=cov_fun_sqexp_model(t[0],t[k],x[0],x[1])
RMSE=(1/n)*lina.norm(cov_predicted-correlogram_exp)
return RMSE
def exp_exp_objective(x):
cov_predicted=np.zeros([n,1])
for k in range(n):
cov_predicted[k]=cov_fun_exp_model(t[0],t[k],x[0],x[1])
RMSE=(1/n)*lina.norm(cov_predicted-correlogram_exp)
return RMSE
# Third: bb dataset
def sqexp_bb_objective(x):
cov_predicted=np.zeros([n,1])
for k in range(n):
cov_predicted[k]=cov_fun_sqexp_model(t[0],t[k],x[0],x[1])
RMSE=(1/n)*lina.norm(cov_predicted-correlogram_bb)
return RMSE
def exp_bb_objective(x):
cov_predicted=np.zeros([n,1])
for k in range(n):
cov_predicted[k]=cov_fun_exp_model(t[0],t[k],x[0],x[1])
RMSE=(1/n)*lina.norm(cov_predicted-correlogram_bb)
return RMSE
# Fourth: ndm_1 dataset
def sqexp_ndm_1_objective(x):
cov_predicted=np.zeros([n,1])
for k in range(n):
cov_predicted[k]=cov_fun_sqexp_model(t[0],t[k],x[0],x[1])
RMSE=(1/n)*lina.norm(cov_predicted-correlogram_ndm_1)
return RMSE
def exp_ndm_1_objective(x):
cov_predicted=np.zeros([n,1])
for k in range(n):
cov_predicted[k]=cov_fun_exp_model(t[0],t[k],x[0],x[1])
RMSE=(1/n)*lina.norm(cov_predicted-correlogram_ndm_1)
return RMSE
# Fifth: dnm_2 dataset
def sqexp_ndm_2_objective(x):
cov_predicted=np.zeros([n,1])
for k in range(n):
cov_predicted[k]=cov_fun_sqexp_model(t[0],t[k],x[0],x[1])
RMSE=(1/n)*lina.norm(cov_predicted-correlogram_ndm_2)
return RMSE
def exp_ndm_2_objective(x):
cov_predicted=np.zeros([n,1])
for k in range(n):
cov_predicted[k]=cov_fun_exp_model(t[0],t[k],x[0],x[1])
RMSE=(1/n)*lina.norm(cov_predicted-correlogram_ndm_2)
return RMSE
# iv) Optimize parameters
# First sqexp dataset
params_optimal_sqexp_sqexp=(scopt.minimize(sqexp_sqexp_objective,[0,1])).x
params_optimal_exp_sqexp=(scopt.minimize(exp_sqexp_objective,[0,1])).x
# Second exp dataset
params_optimal_sqexp_exp=(scopt.minimize(sqexp_exp_objective,[0,1])).x
params_optimal_exp_exp=(scopt.minimize(exp_exp_objective,[0,1])).x
# Third bb dataset
params_optimal_sqexp_bb=(scopt.minimize(sqexp_bb_objective,[0,1])).x
params_optimal_exp_bb=(scopt.minimize(exp_bb_objective,[0,1])).x
# Fourth ndm_1 dataset
params_optimal_sqexp_ndm_1=(scopt.minimize(sqexp_ndm_1_objective,[0,1])).x
params_optimal_exp_ndm_1=(scopt.minimize(exp_ndm_1_objective,[0,1])).x
# Fifth dnm_2 dataset
params_optimal_sqexp_ndm_2=(scopt.minimize(sqexp_ndm_2_objective,[0,1])).x
params_optimal_exp_ndm_2=(scopt.minimize(exp_ndm_2_objective,[0,1])).x
"""
5. Infer best model parameters: Bochner ----------------------------------
"""
# i) Calculate the basis functions
n_exp_Bochner=30
omega=np.logspace(-1,1,n_exp_Bochner)
def complex_exp(t1,t2,omega):
return np.exp(2*np.pi*(1j)*omega*(t1-t2))
basis_vectors=np.zeros([n,n_exp_Bochner])+0j*np.zeros([n,n_exp_Bochner])
for k in range(n):
for l in range(n_exp_Bochner):
basis_vectors[k,l]=(complex_exp(t[k],0,omega[l]))
# ii) Parameter estimation
Bochner_Psi_mat=np.zeros([n,n_exp_Bochner])
for k in range(n):
for l in range(n_exp_Bochner):
Bochner_Psi_mat[k,l]=complex_exp(0,t_diff[k],omega[l])
weight_fun_sqexp=(scopt.nnls(Bochner_Psi_mat,np.reshape(correlogram_sqexp,[n])))[0]
weight_fun_exp=(scopt.nnls(Bochner_Psi_mat,np.reshape(correlogram_exp,[n])))[0]
weight_fun_bb=(scopt.nnls(Bochner_Psi_mat,np.reshape(correlogram_bb,[n])))[0]
weight_fun_ndm_1=(scopt.nnls(Bochner_Psi_mat,np.reshape(correlogram_ndm_1,[n])))[0]
weight_fun_ndm_2=(scopt.nnls(Bochner_Psi_mat,np.reshape(correlogram_ndm_2,[n])))[0]
# iii) Assemble to covariance function
K_Bochner_sqexp=np.real([email protected](weight_fun_sqexp)@basis_vectors.conj().T)
K_Bochner_exp=np.real([email protected](weight_fun_exp)@basis_vectors.conj().T)
K_Bochner_bb=np.real([email protected](weight_fun_bb)@basis_vectors.conj().T)
K_Bochner_ndm_1=np.real([email protected](weight_fun_ndm_1)@basis_vectors.conj().T)
K_Bochner_ndm_2=np.real([email protected](weight_fun_ndm_2)@basis_vectors.conj().T)
"""
6. Kernel inference ------------------------------------------------------
"""
# i) Prepare auxiliary quantities
r=2
n_exp=10
d_prior=0.4
def cov_fun_prior(t1,t2):
return np.exp(-(lina.norm(t1-t2)/d_prior)**2)
K_prior=np.zeros([n,n])
for k in range(n):
for l in range(n):
K_prior[k,l]=cov_fun_prior(t[k],t[l])
[U_p,Lambda_p,V_p]=lina.svd(K_prior)
U_p_cut=U_p[:,:n_exp]
Lambda_p_cut=np.diag(Lambda_p[:n_exp])
Psi=U_p_cut
# ii) Perform kernel inference
import KI
beta, mu, gamma_sqexp, C_gamma_sqexp, KI_logfile_sqexp=KI.Kernel_inference_homogeneous(x_simu_sqexp,Lambda_p_cut,Psi,r)
beta, mu, gamma_exp, C_gamma_exp, KI_logfile_exp=KI.Kernel_inference_homogeneous(x_simu_exp,Lambda_p_cut,Psi,r)
beta, mu, gamma_bb, C_gamma_bb, KI_logfile_bb=KI.Kernel_inference_homogeneous(x_simu_bb,Lambda_p_cut,Psi,r)
beta, mu, gamma_ndm_1, C_gamma_ndm_1, KI_logfile_ndm_1=KI.Kernel_inference_homogeneous(x_simu_ndm_1,Lambda_p_cut,Psi,r)
beta, mu, gamma_ndm_2, C_gamma_ndm_2, KI_logfile_ndm_2=KI.Kernel_inference_homogeneous(x_simu_ndm_2,Lambda_p_cut,Psi,r)
"""
7. Simulation and RMSE ---------------------------------------------------
"""
# i) Set up Matrices for interpolation
# First: sqexp Dataset
K_sqexp_sqexp=np.zeros([n,n])
K_exp_sqexp=np.zeros([n,n])
for k in range(n):
for l in range(n):
K_sqexp_sqexp[k,l]=cov_fun_sqexp_model(t[k],t[l],params_optimal_sqexp_sqexp[0],params_optimal_sqexp_sqexp[1])
K_exp_sqexp[k,l]=cov_fun_exp_model(t[k],t[l],params_optimal_exp_sqexp[0],params_optimal_exp_sqexp[1])
K_KI_sqexp=C_gamma_sqexp
K_t_sqexp_sqexp=K_sqexp_sqexp[:,sample_index.astype(int)]
K_t_exp_sqexp=K_exp_sqexp[:,sample_index.astype(int)]
K_t_Bochner_sqexp=K_Bochner_sqexp[:,sample_index.astype(int)]
K_t_KI_sqexp=K_KI_sqexp[:,sample_index.astype(int)]
K_ij_sqexp_sqexp=K_sqexp_sqexp[np.ix_(sample_index.astype(int),sample_index.astype(int))]
K_ij_exp_sqexp=K_exp_sqexp[np.ix_(sample_index.astype(int),sample_index.astype(int))]
K_ij_Bochner_sqexp=K_Bochner_sqexp[np.ix_(sample_index.astype(int),sample_index.astype(int))]
K_ij_KI_sqexp=K_KI_sqexp[np.ix_(sample_index.astype(int),sample_index.astype(int))]
# Second: exp Dataset
K_sqexp_exp=np.zeros([n,n])
K_exp_exp=np.zeros([n,n])
for k in range(n):
for l in range(n):
K_sqexp_exp[k,l]=cov_fun_sqexp_model(t[k],t[l],params_optimal_sqexp_exp[0],params_optimal_sqexp_exp[1])
K_exp_exp[k,l]=cov_fun_exp_model(t[k],t[l],params_optimal_exp_exp[0],params_optimal_exp_exp[1])
K_KI_exp=C_gamma_exp
K_t_sqexp_exp=K_sqexp_exp[:,sample_index.astype(int)]
K_t_exp_exp=K_exp_exp[:,sample_index.astype(int)]
K_t_Bochner_exp=K_Bochner_exp[:,sample_index.astype(int)]
K_t_KI_exp=K_KI_exp[:,sample_index.astype(int)]
K_ij_sqexp_exp=K_sqexp_exp[np.ix_(sample_index.astype(int),sample_index.astype(int))]
K_ij_exp_exp=K_exp_exp[np.ix_(sample_index.astype(int),sample_index.astype(int))]
K_ij_Bochner_exp=K_Bochner_exp[np.ix_(sample_index.astype(int),sample_index.astype(int))]
K_ij_KI_exp=K_KI_exp[np.ix_(sample_index.astype(int),sample_index.astype(int))]
# Third: bb Dataset
K_sqexp_bb=np.zeros([n,n])
K_exp_bb=np.zeros([n,n])
for k in range(n):
for l in range(n):
K_sqexp_bb[k,l]=cov_fun_sqexp_model(t[k],t[l],params_optimal_sqexp_bb[0],params_optimal_sqexp_bb[1])
K_exp_bb[k,l]=cov_fun_exp_model(t[k],t[l],params_optimal_exp_bb[0],params_optimal_exp_bb[1])
K_KI_bb=C_gamma_bb
K_t_sqexp_bb=K_sqexp_bb[:,sample_index.astype(int)]
K_t_exp_bb=K_exp_bb[:,sample_index.astype(int)]
K_t_Bochner_bb=K_Bochner_bb[:,sample_index.astype(int)]
K_t_KI_bb=K_KI_bb[:,sample_index.astype(int)]
K_ij_sqexp_bb=K_sqexp_bb[np.ix_(sample_index.astype(int),sample_index.astype(int))]
K_ij_exp_bb=K_exp_bb[np.ix_(sample_index.astype(int),sample_index.astype(int))]
K_ij_Bochner_bb=K_Bochner_bb[np.ix_(sample_index.astype(int),sample_index.astype(int))]
K_ij_KI_bb=K_KI_bb[np.ix_(sample_index.astype(int),sample_index.astype(int))]
# Fourth: ndm_1 Dataset
K_sqexp_ndm_1=np.zeros([n,n])
K_exp_ndm_1=np.zeros([n,n])
for k in range(n):
for l in range(n):
K_sqexp_ndm_1[k,l]=cov_fun_sqexp_model(t[k],t[l],params_optimal_sqexp_ndm_1[0],params_optimal_sqexp_ndm_1[1])
K_exp_ndm_1[k,l]=cov_fun_exp_model(t[k],t[l],params_optimal_exp_ndm_1[0],params_optimal_exp_ndm_1[1])
K_KI_ndm_1=C_gamma_ndm_1
K_t_sqexp_ndm_1=K_sqexp_ndm_1[:,sample_index.astype(int)]
K_t_exp_ndm_1=K_exp_ndm_1[:,sample_index.astype(int)]
K_t_Bochner_ndm_1=K_Bochner_ndm_1[:,sample_index.astype(int)]
K_t_KI_ndm_1=K_KI_ndm_1[:,sample_index.astype(int)]
K_ij_sqexp_ndm_1=K_sqexp_ndm_1[np.ix_(sample_index.astype(int),sample_index.astype(int))]
K_ij_exp_ndm_1=K_exp_ndm_1[np.ix_(sample_index.astype(int),sample_index.astype(int))]
K_ij_Bochner_ndm_1=K_Bochner_ndm_1[np.ix_(sample_index.astype(int),sample_index.astype(int))]
K_ij_KI_ndm_1=K_KI_ndm_1[np.ix_(sample_index.astype(int),sample_index.astype(int))]
# Fifth: ndm_2 Dataset
K_sqexp_ndm_2=np.zeros([n,n])
K_exp_ndm_2=np.zeros([n,n])
for k in range(n):
for l in range(n):
K_sqexp_ndm_2[k,l]=cov_fun_sqexp_model(t[k],t[l],params_optimal_sqexp_ndm_2[0],params_optimal_sqexp_ndm_2[1])
K_exp_ndm_2[k,l]=cov_fun_exp_model(t[k],t[l],params_optimal_exp_ndm_2[0],params_optimal_exp_ndm_2[1])
K_KI_ndm_2=C_gamma_ndm_2
K_t_sqexp_ndm_2=K_sqexp_ndm_2[:,sample_index.astype(int)]
K_t_exp_ndm_2=K_exp_ndm_2[:,sample_index.astype(int)]
K_t_Bochner_ndm_2=K_Bochner_ndm_2[:,sample_index.astype(int)]
K_t_KI_ndm_2=K_KI_ndm_2[:,sample_index.astype(int)]
K_ij_sqexp_ndm_2=K_sqexp_ndm_2[np.ix_(sample_index.astype(int),sample_index.astype(int))]
K_ij_exp_ndm_2=K_exp_ndm_2[np.ix_(sample_index.astype(int),sample_index.astype(int))]
K_ij_Bochner_ndm_2=K_Bochner_ndm_2[np.ix_(sample_index.astype(int),sample_index.astype(int))]
K_ij_KI_ndm_2=K_KI_ndm_2[np.ix_(sample_index.astype(int),sample_index.astype(int))]
# Sixth: True underlying covariances
K_t_true_sqexp=K_sqexp[:,sample_index.astype(int)]
K_t_true_exp=K_exp[:,sample_index.astype(int)]
K_t_true_bb=K_bb[:,sample_index.astype(int)]
K_t_true_ndm_1=K_nondiag_mercer_1[:,sample_index.astype(int)]
K_t_true_ndm_2=K_nondiag_mercer_2[:,sample_index.astype(int)]
K_ij_true_sqexp=K_sqexp[np.ix_(sample_index.astype(int),sample_index.astype(int))]
K_ij_true_exp=K_KI_exp[np.ix_(sample_index.astype(int),sample_index.astype(int))]
K_ij_true_bb=K_bb[np.ix_(sample_index.astype(int),sample_index.astype(int))]
K_ij_true_ndm_1=K_nondiag_mercer_1[np.ix_(sample_index.astype(int),sample_index.astype(int))]
K_ij_true_ndm_2=K_nondiag_mercer_2[np.ix_(sample_index.astype(int),sample_index.astype(int))]
# ii) Perform estimation
# First: sqexp dataset
invtol=10**(-3)
sqn=np.sqrt(n)
RMSE_sqexp_sqexp=np.zeros([n_test,1])
RMSE_exp_sqexp=np.zeros([n_test,1])
RMSE_Bochner_sqexp=np.zeros([n_test,1])
RMSE_KI_sqexp=np.zeros([n_test,1])
RMSE_true_sqexp=np.zeros([n_test,1])
for k in range(n_test):
x_temp_sqexp=np.random.multivariate_normal(np.zeros([n]),K_sqexp)
x_temp_meas_sqexp=x_temp_sqexp[sample_index.astype(int)]
[email protected](K_ij_sqexp_sqexp,rcond=invtol)@x_temp_meas_sqexp
[email protected](K_ij_exp_sqexp,rcond=invtol)@x_temp_meas_sqexp
x_temp_Bochner_sqexp=<EMAIL>ch<EMAIL>.pinv(K_ij_Bochner_sqexp,rcond=invtol)@x_temp_meas_sqexp
[email protected](K_ij_KI_sqexp,rcond=invtol)@x_temp_meas_sqexp
[email protected](K_ij_true_sqexp,rcond=invtol)@x_temp_meas_sqexp
RMSE_sqexp_sqexp[k]=lina.norm(x_temp_sqexp-x_temp_sqexp_sqexp)
RMSE_exp_sqexp[k]=lina.norm(x_temp_sqexp-x_temp_exp_sqexp)
RMSE_Bochner_sqexp[k]=lina.norm(x_temp_sqexp-x_temp_Bochner_sqexp)
RMSE_KI_sqexp[k]=lina.norm(x_temp_sqexp-x_temp_KI_sqexp)
RMSE_true_sqexp[k]=lina.norm(x_temp_sqexp-x_temp_true_sqexp)
RMSE_sqexp_sqexp_mean=np.mean(RMSE_sqexp_sqexp)/sqn
RMSE_exp_sqexp_mean=np.mean(RMSE_exp_sqexp)/sqn
RMSE_Bochner_sqexp_mean=np.mean(RMSE_Bochner_sqexp)/sqn
RMSE_KI_sqexp_mean=np.mean(RMSE_KI_sqexp)/sqn
RMSE_true_sqexp_mean=np.mean(RMSE_true_sqexp)/sqn
print('Sqexp simulations done!')
# Second: exp dataset
RMSE_sqexp_exp=np.zeros([n_test,1])
RMSE_exp_exp=np.zeros([n_test,1])
RMSE_Bochner_exp=np.zeros([n_test,1])
RMSE_KI_exp=np.zeros([n_test,1])
RMSE_true_exp=np.zeros([n_test,1])
for k in range(n_test):
x_temp_exp=np.random.multivariate_normal(np.zeros([n]),K_exp)
x_temp_meas_exp=x_temp_exp[sample_index.astype(int)]
[email protected](K_ij_sqexp_exp,rcond=invtol)@x_temp_meas_exp
[email protected](K_ij_exp_exp,rcond=invtol)@x_temp_meas_exp
[email protected](K_ij_Bochner_exp,rcond=invtol)@x_temp_meas_exp
[email protected](K_ij_KI_exp,rcond=invtol)@x_temp_meas_exp
[email protected](K_ij_true_exp,rcond=invtol)@x_temp_meas_exp
RMSE_sqexp_exp[k]=lina.norm(x_temp_exp-x_temp_sqexp_exp)/sqn
RMSE_exp_exp[k]=lina.norm(x_temp_exp-x_temp_exp_exp)/sqn
RMSE_Bochner_exp[k]=lina.norm(x_temp_exp-x_temp_Bochner_exp)/sqn
RMSE_KI_exp[k]=lina.norm(x_temp_exp-x_temp_KI_exp)/sqn
RMSE_true_exp[k]=lina.norm(x_temp_exp-x_temp_true_exp)
RMSE_sqexp_exp_mean=np.mean(RMSE_sqexp_exp)
RMSE_exp_exp_mean=np.mean(RMSE_exp_exp)
RMSE_Bochner_exp_mean=np.mean(RMSE_Bochner_exp)
RMSE_KI_exp_mean=np.mean(RMSE_KI_exp)
RMSE_true_exp_mean=np.mean(RMSE_true_exp)/sqn
print('Exp simulations done!')
# Third: bb dataset
RMSE_sqexp_bb=np.zeros([n_test,1])
RMSE_exp_bb=np.zeros([n_test,1])
RMSE_Bochner_bb=np.zeros([n_test,1])
RMSE_KI_bb=np.zeros([n_test,1])
RMSE_true_bb=np.zeros([n_test,1])
for k in range(n_test):
x_temp_bb=np.random.multivariate_normal(np.zeros([n]),K_bb)
x_temp_meas_bb=x_temp_bb[sample_index.astype(int)]
[email protected](K_ij_sqexp_bb,rcond=invtol)@x_temp_meas_bb
[email protected](K_ij_exp_bb,rcond=invtol)@x_temp_meas_bb
[email protected](K_ij_Bochner_bb,rcond=invtol)@x_temp_meas_bb
[email protected](K_ij_KI_bb,rcond=invtol)@x_temp_meas_bb
[email protected](K_ij_true_bb,rcond=invtol)@x_temp_meas_bb
RMSE_sqexp_bb[k]=lina.norm(x_temp_bb-x_temp_sqexp_bb)/sqn
RMSE_exp_bb[k]=lina.norm(x_temp_bb-x_temp_exp_bb)/sqn
RMSE_Bochner_bb[k]=lina.norm(x_temp_bb-x_temp_Bochner_bb)/sqn
RMSE_KI_bb[k]=lina.norm(x_temp_bb-x_temp_KI_bb)/sqn
RMSE_true_bb[k]=lina.norm(x_temp_bb-x_temp_true_bb)
RMSE_sqexp_bb_mean=np.mean(RMSE_sqexp_bb)
RMSE_exp_bb_mean=np.mean(RMSE_exp_bb)
RMSE_Bochner_bb_mean=np.mean(RMSE_Bochner_bb)
RMSE_KI_bb_mean=np.mean(RMSE_KI_bb)
RMSE_true_bb_mean=np.mean(RMSE_true_bb)/sqn
print('Brownian bridge simulations done!')
# Fourth: ndm_1 dataset
RMSE_sqexp_ndm_1=np.zeros([n_test,1])
RMSE_exp_ndm_1=np.zeros([n_test,1])
RMSE_Bochner_ndm_1=np.zeros([n_test,1])
RMSE_KI_ndm_1=np.zeros([n_test,1])
RMSE_true_ndm_1=np.zeros([n_test,1])
for k in range(n_test):
x_temp_ndm_1=np.random.multivariate_normal(np.zeros([n]),K_nondiag_mercer_1)
x_temp_meas_ndm_1=x_temp_ndm_1[sample_index.astype(int)]
[email protected](K_ij_sqexp_ndm_1,rcond=invtol)@x_temp_meas_ndm_1
[email protected](K_ij_exp_ndm_1,rcond=invtol)@x_temp_meas_ndm_1
[email protected](K_ij_Bochner_ndm_1,rcond=invtol)@x_temp_meas_ndm_1
[email protected](K_ij_KI_ndm_1,rcond=invtol)@x_temp_meas_ndm_1
[email protected](K_ij_true_ndm_1,rcond=invtol)@x_temp_meas_ndm_1
RMSE_sqexp_ndm_1[k]=lina.norm(x_temp_ndm_1-x_temp_sqexp_ndm_1)/sqn
RMSE_exp_ndm_1[k]=lina.norm(x_temp_ndm_1-x_temp_exp_ndm_1)/sqn
RMSE_Bochner_ndm_1[k]=lina.norm(x_temp_ndm_1-x_temp_Bochner_ndm_1)/sqn
RMSE_KI_ndm_1[k]=lina.norm(x_temp_ndm_1-x_temp_KI_ndm_1)/sqn
RMSE_true_ndm_1[k]=lina.norm(x_temp_ndm_1-x_temp_true_ndm_1)
RMSE_sqexp_ndm_1_mean=np.mean(RMSE_sqexp_ndm_1)
RMSE_exp_ndm_1_mean=np.mean(RMSE_exp_ndm_1)
RMSE_Bochner_ndm_1_mean=np.mean(RMSE_Bochner_ndm_1)
RMSE_KI_ndm_1_mean=np.mean(RMSE_KI_ndm_1)
RMSE_true_ndm_1_mean=np.mean(RMSE_true_ndm_1)/sqn
print('Nondiagonal Mercer simulations done!')
# Fifth: ndm_2 dataset
RMSE_sqexp_ndm_2=np.zeros([n_test,1])
RMSE_exp_ndm_2=np.zeros([n_test,1])
RMSE_Bochner_ndm_2=np.zeros([n_test,1])
RMSE_KI_ndm_2=np.zeros([n_test,1])
RMSE_true_ndm_2=np.zeros([n_test,1])
for k in range(n_test):
x_temp_ndm_2=np.random.multivariate_normal(np.zeros([n]),K_nondiag_mercer_2)
x_temp_meas_ndm_2=x_temp_ndm_2[sample_index.astype(int)]
[email protected](K_ij_sqexp_ndm_2,rcond=invtol)@x_temp_meas_ndm_2
[email protected](K_ij_exp_ndm_2,rcond=invtol)@x_temp_meas_ndm_2
[email protected](K_ij_Bochner_ndm_2,rcond=invtol)@x_temp_meas_ndm_2
[email protected](K_ij_KI_ndm_2,rcond=invtol)@x_temp_meas_ndm_2
[email protected](K_ij_true_ndm_2,rcond=invtol)@x_temp_meas_ndm_2
RMSE_sqexp_ndm_2[k]=lina.norm(x_temp_ndm_2-x_temp_sqexp_ndm_2)/sqn
RMSE_exp_ndm_2[k]=lina.norm(x_temp_ndm_2-x_temp_exp_ndm_2)/sqn
RMSE_Bochner_ndm_2[k]=lina.norm(x_temp_ndm_2-x_temp_Bochner_ndm_2)/sqn
RMSE_KI_ndm_2[k]=lina.norm(x_temp_ndm_2-x_temp_KI_ndm_2)/sqn
RMSE_true_ndm_2[k]=lina.norm(x_temp_ndm_2-x_temp_true_ndm_2)
RMSE_sqexp_ndm_2_mean=np.mean(RMSE_sqexp_ndm_2)
RMSE_exp_ndm_2_mean=np.mean(RMSE_exp_ndm_2)
RMSE_Bochner_ndm_2_mean=np.mean(RMSE_Bochner_ndm_2)
RMSE_KI_ndm_2_mean=np.mean(RMSE_KI_ndm_2)
RMSE_true_ndm_2_mean=np.mean(RMSE_true_ndm_2)/sqn
print('Nondiagonal Mercer simulations done!')
"""
8. Plots and illustrations -----------------------------------------------
"""
# i) Example interpolations
circle_size=200
zero_line=np.zeros([n,1])
zero_line_illu=np.zeros([n,1])
# First row : Sqexp covariance
w,h=plt.figaspect(0.3)
fig1 = plt.figure(dpi=400,constrained_layout=True,figsize=(w,h))
gs1 = fig1.add_gridspec(1, 5)
f1_ax1 = fig1.add_subplot(gs1[0,0])
f1_ax1.plot(t,x_temp_sqexp,linestyle='dashed',color='0.7', label='Ground truth')
f1_ax1.plot(t,x_temp_sqexp_sqexp,linestyle='solid',color='0.0',label='Estimation')
f1_ax1.scatter(t_sample,x_temp_meas_sqexp,facecolors='none',edgecolors='0',label='Observations',s=circle_size)
f1_ax1.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f1_ax1.set_title('Sqexp cov for sqexp process')
f1_ax1.legend(loc='upper left')
f1_ax2 = fig1.add_subplot(gs1[0,1])
f1_ax2.plot(t,x_temp_exp,linestyle='dashed',color='0.7')
f1_ax2.plot(t,x_temp_sqexp_exp,linestyle='solid',color='0.0')
f1_ax2.scatter(t_sample,x_temp_meas_exp,facecolors='none',edgecolors='0',label='Noise',s=circle_size)
f1_ax2.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f1_ax2.set_title('Sqexp cov for exp process')
f1_ax3 = fig1.add_subplot(gs1[0,2])
f1_ax3.plot(t,x_temp_bb,linestyle='dashed',color='0.7')
f1_ax3.plot(t,x_temp_sqexp_bb,linestyle='solid',color='0.0')
f1_ax3.scatter(t_sample,x_temp_meas_bb,facecolors='none',edgecolors='0',label='Noise',s=circle_size)
f1_ax3.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f1_ax3.set_title('Sqexp cov for Brownian bridge process')
f1_ax4 = fig1.add_subplot(gs1[0,3])
f1_ax4.plot(t,x_temp_ndm_1,linestyle='dashed',color='0.7')
f1_ax4.plot(t,x_temp_sqexp_ndm_1,linestyle='solid',color='0.0')
f1_ax4.scatter(t_sample,x_temp_meas_ndm_1,facecolors='none',edgecolors='0',label='Noise',s=circle_size)
f1_ax4.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f1_ax4.set_title('Sqexp cov for random cov process 1')
f1_ax5 = fig1.add_subplot(gs1[0,4])
f1_ax5.plot(t,x_temp_ndm_2,linestyle='dashed',color='0.7')
f1_ax5.plot(t,x_temp_sqexp_ndm_2,linestyle='solid',color='0.0')
f1_ax5.scatter(t_sample,x_temp_meas_ndm_2,facecolors='none',edgecolors='0',label='Noise',s=circle_size)
f1_ax5.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f1_ax5.set_title('Sqexp cov for random cov process 2')
# Second row : Exp covariance
w,h=plt.figaspect(0.3)
fig2 = plt.figure(dpi=400,constrained_layout=True,figsize=(w,h))
gs2 = fig2.add_gridspec(1, 5)
f2_ax6 = fig2.add_subplot(gs2[0,0])
f2_ax6.plot(t,x_temp_sqexp,linestyle='dashed',color='0.7', label='Ground truth')
f2_ax6.plot(t,x_temp_exp_sqexp,linestyle='solid',color='0.0', label='Estimation')
f2_ax6.scatter(t_sample,x_temp_meas_sqexp,facecolors='none',edgecolors='0',label='Observations',s=circle_size)
f2_ax6.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f2_ax6.set_title('Exp cov for sqexp process')
f2_ax6.legend(loc='upper left')
f2_ax7 = fig2.add_subplot(gs2[0,1])
f2_ax7.plot(t,x_temp_exp,linestyle='dashed',color='0.7')
f2_ax7.plot(t,x_temp_exp_exp,linestyle='solid',color='0.0')
f2_ax7.scatter(t_sample,x_temp_meas_exp,facecolors='none',edgecolors='0',label='Noise',s=circle_size)
f2_ax7.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f2_ax7.set_title('Exp cov for exp process')
f2_ax8 = fig2.add_subplot(gs2[0,2])
f2_ax8.plot(t,x_temp_bb,linestyle='dashed',color='0.7')
f2_ax8.plot(t,x_temp_exp_bb,linestyle='solid',color='0.0')
f2_ax8.scatter(t_sample,x_temp_meas_bb,facecolors='none',edgecolors='0',label='Noise',s=circle_size)
f2_ax8.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f2_ax8.set_title('Exp cov for Brownian bridge process')
f2_ax9 = fig2.add_subplot(gs2[0,3])
f2_ax9.plot(t,x_temp_ndm_1,linestyle='dashed',color='0.7')
f2_ax9.plot(t,x_temp_exp_ndm_1,linestyle='solid',color='0.0')
f2_ax9.scatter(t_sample,x_temp_meas_ndm_1,facecolors='none',edgecolors='0',label='Noise',s=circle_size)
f2_ax9.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f2_ax9.set_title('Exp cov for random cov process 1')
f2_ax10 = fig2.add_subplot(gs2[0,4])
f2_ax10.plot(t,x_temp_ndm_2,linestyle='dashed',color='0.7')
f2_ax10.plot(t,x_temp_exp_ndm_2,linestyle='solid',color='0.0')
f2_ax10.scatter(t_sample,x_temp_meas_ndm_2,facecolors='none',edgecolors='0',label='Noise',s=circle_size)
f2_ax10.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f2_ax10.set_title('Exp cov for random cov process 2')
# Third row : Bochner covariance
w,h=plt.figaspect(0.3)
fig3 = plt.figure(dpi=400,constrained_layout=True,figsize=(w,h))
gs3 = fig3.add_gridspec(1, 5)
f3_ax11 = fig3.add_subplot(gs3[0,0])
f3_ax11.plot(t,x_temp_sqexp,linestyle='dashed',color='0.7', label='Ground truth')
f3_ax11.plot(t,x_temp_Bochner_sqexp,linestyle='solid',color='0.0', label='Estimation')
f3_ax11.scatter(t_sample,x_temp_meas_sqexp,facecolors='none',edgecolors='0',label='Observations',s=circle_size)
f3_ax11.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f3_ax11.set_title('Bochner cov for sqexp process')
f3_ax11.legend(loc='upper left')
f3_ax12 = fig3.add_subplot(gs3[0,1])
f3_ax12.plot(t,x_temp_exp,linestyle='dashed',color='0.7')
f3_ax12.plot(t,x_temp_Bochner_exp,linestyle='solid',color='0.0')
f3_ax12.scatter(t_sample,x_temp_meas_exp,facecolors='none',edgecolors='0',label='Noise',s=circle_size)
f3_ax12.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f3_ax12.set_title('Bochner cov for exp process')
f3_ax13 = fig3.add_subplot(gs3[0,2])
f3_ax13.plot(t,x_temp_bb,linestyle='dashed',color='0.7')
f3_ax13.plot(t,x_temp_Bochner_bb,linestyle='solid',color='0.0')
f3_ax13.scatter(t_sample,x_temp_meas_bb,facecolors='none',edgecolors='0',label='Noise',s=circle_size)
f3_ax13.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f3_ax13.set_title('Bochner cov for Brownian bridge process')
f3_ax14 = fig3.add_subplot(gs3[0,3])
f3_ax14.plot(t,x_temp_ndm_1,linestyle='dashed',color='0.7')
f3_ax14.plot(t,x_temp_Bochner_ndm_1,linestyle='solid',color='0.0')
f3_ax14.scatter(t_sample,x_temp_meas_ndm_1,facecolors='none',edgecolors='0',label='Noise',s=circle_size)
f3_ax14.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f3_ax14.set_title('Bochner cov for random cov process 1')
f3_ax15 = fig3.add_subplot(gs3[0,4])
f3_ax15.plot(t,x_temp_ndm_2,linestyle='dashed',color='0.7')
f3_ax15.plot(t,x_temp_Bochner_ndm_2,linestyle='solid',color='0.0')
f3_ax15.scatter(t_sample,x_temp_meas_ndm_2,facecolors='none',edgecolors='0',label='Noise',s=circle_size)
f3_ax15.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f3_ax15.set_title('Bochner cov for random cov process 2')
# Fourth row : KI covariance
w,h=plt.figaspect(0.3)
fig4 = plt.figure(dpi=400,constrained_layout=True,figsize=(w,h))
gs4 = fig4.add_gridspec(1, 5)
f4_ax16 = fig4.add_subplot(gs4[0,0])
f4_ax16.plot(t,x_temp_sqexp,linestyle='dashed',color='0.7',label='Ground truth')
f4_ax16.plot(t,x_temp_KI_sqexp,linestyle='solid',color='0.0', label='Estimation')
f4_ax16.scatter(t_sample,x_temp_meas_sqexp,facecolors='none',edgecolors='0',label='Observations',s=circle_size)
f4_ax16.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f4_ax16.set_title('KI cov for sqexp process')
f4_ax16.legend(loc='upper left')
f4_ax17 = fig4.add_subplot(gs4[0,1])
f4_ax17.plot(t,x_temp_exp,linestyle='dashed',color='0.7')
f4_ax17.plot(t,x_temp_KI_exp,linestyle='solid',color='0.0')
f4_ax17.scatter(t_sample,x_temp_meas_exp,facecolors='none',edgecolors='0',label='Noise',s=circle_size)
f4_ax17.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f4_ax17.set_title('KI cov for exp process')
f4_ax18 = fig4.add_subplot(gs4[0,2])
f4_ax18.plot(t,x_temp_bb,linestyle='dashed',color='0.7')
f4_ax18.plot(t,x_temp_KI_bb,linestyle='solid',color='0.0')
f4_ax18.scatter(t_sample,x_temp_meas_bb,facecolors='none',edgecolors='0',label='Noise',s=circle_size)
f4_ax18.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f4_ax18.set_title('KI cov for Brownian bridge process')
f4_ax19 = fig4.add_subplot(gs4[0,3])
f4_ax19.plot(t,x_temp_ndm_1,linestyle='dashed',color='0.7')
f4_ax19.plot(t,x_temp_KI_ndm_1,linestyle='solid',color='0.0')
f4_ax19.scatter(t_sample,x_temp_meas_ndm_1,facecolors='none',edgecolors='0',label='Noise',s=circle_size)
f4_ax19.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f4_ax19.set_title('KI cov for random cov process 1')
f4_ax20 = fig4.add_subplot(gs4[0,4])
f4_ax20.plot(t,x_temp_ndm_2,linestyle='dashed',color='0.7')
f4_ax20.plot(t,x_temp_KI_ndm_2,linestyle='solid',color='0.0')
f4_ax20.scatter(t_sample,x_temp_meas_ndm_2,facecolors='none',edgecolors='0',label='Noise',s=circle_size)
f4_ax20.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f4_ax20.set_title('KI cov for random cov process 2')
# ii) Example covariance fits
# Calculations for ground truth
circle_size=100
cov_true_sqexp=np.zeros([n])
cov_true_exp=np.zeros([n])
cov_true_bb=np.zeros([n])
for k in range(n):
cov_true_sqexp[k]=cov_fun_sqexp_true(0, t[k])
cov_true_exp[k]=cov_fun_exp_true(0, t[k])
cov_true_bb[k]=cov_fun_bb_true(0, t[k])
cov_sqexp_sqexp=np.zeros([n])
cov_sqexp_exp=np.zeros([n])
cov_sqexp_bb=np.zeros([n])
cov_sqexp_ndm_1=np.zeros([n])
cov_sqexp_ndm_2=np.zeros([n])
cov_exp_sqexp=np.zeros([n])
cov_exp_exp=np.zeros([n])
cov_exp_bb=np.zeros([n])
cov_exp_ndm_1=np.zeros([n])
cov_exp_ndm_2=np.zeros([n])
cov_Bochner_sqexp=np.zeros([n])
cov_Bochner_exp=np.zeros([n])
cov_Bochner_bb=np.zeros([n])
cov_Bochner_ndm_1=np.zeros([n])
cov_Bochner_ndm_2=np.zeros([n])
for k in range(n):
cov_sqexp_sqexp[k]=cov_fun_sqexp_model(t[k],0,params_optimal_sqexp_exp[0],params_optimal_sqexp_sqexp[1])
cov_sqexp_exp[k]=cov_fun_sqexp_model(t[k],0,params_optimal_sqexp_exp[0],params_optimal_sqexp_exp[1])
cov_sqexp_bb[k]=cov_fun_sqexp_model(t[k],0,params_optimal_sqexp_bb[0],params_optimal_sqexp_bb[1])
cov_sqexp_ndm_1[k]=cov_fun_sqexp_model(t[k],0,params_optimal_sqexp_ndm_1[0],params_optimal_sqexp_ndm_1[1])
cov_sqexp_ndm_2[k]=cov_fun_sqexp_model(t[k],0,params_optimal_sqexp_ndm_2[0],params_optimal_sqexp_ndm_2[1])
cov_exp_sqexp[k]=cov_fun_exp_model(t[k],0,params_optimal_exp_sqexp[0],params_optimal_exp_sqexp[1])
cov_exp_exp[k]=cov_fun_exp_model(t[k],0,params_optimal_exp_exp[0],params_optimal_exp_exp[1])
cov_exp_bb[k]=cov_fun_exp_model(t[k],0,params_optimal_exp_bb[0],params_optimal_exp_bb[1])
cov_exp_ndm_1[k]=cov_fun_exp_model(t[k],0,params_optimal_exp_ndm_1[0],params_optimal_exp_ndm_1[1])
cov_exp_ndm_2[k]=cov_fun_exp_model(t[k],0,params_optimal_exp_ndm_2[0],params_optimal_exp_ndm_2[1])
cov_Bochner_sqexp=K_Bochner_sqexp[:,0]
cov_Bochner_exp=K_Bochner_exp[:,0]
cov_Bochner_bb=K_Bochner_bb[:,0]
cov_Bochner_ndm_1=K_Bochner_ndm_1[:,0]
cov_Bochner_ndm_2=K_Bochner_ndm_2[:,0]
# First row : Sqexp covariance
w,h=plt.figaspect(0.3)
fig1 = plt.figure(dpi=400,constrained_layout=True,figsize=(w,h))
gs1 = fig1.add_gridspec(1, 3)
f1_ax1 = fig1.add_subplot(gs1[0,0])
f1_ax1.plot(t,cov_true_sqexp,linestyle='dashed',color='0.7', label='True covariance')
f1_ax1.plot(t,cov_sqexp_sqexp,linestyle='solid',color='0.0',label='Estimation')
f1_ax1.plot(t,correlogram_sqexp,linestyle='dotted',color='0.0',label='Correlogram')
f1_ax1.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f1_ax1.set_title('Sqexp cov fun for sqexp process')
f1_ax1.legend(loc='upper left')
f1_ax2 = fig1.add_subplot(gs1[0,1])
f1_ax2.plot(t,cov_true_exp,linestyle='dashed',color='0.7', label='True covariance')
f1_ax2.plot(t,cov_sqexp_exp,linestyle='solid',color='0.0',label='Estimation')
f1_ax2.plot(t,correlogram_exp,linestyle='dotted',color='0.0',label='Correlogram')
f1_ax2.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f1_ax2.set_title('Sqexp cov fun for exp process')
f1_ax3 = fig1.add_subplot(gs1[0,2])
#f1_ax3.plot(t,cov_true_bb,linestyle='dashed',color='0.7', label='True covariance')
f1_ax3.plot(t,cov_sqexp_bb,linestyle='solid',color='0.0',label='Estimation')
f1_ax3.plot(t,correlogram_bb,linestyle='dotted',color='0.0',label='Correlogram')
f1_ax3.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f1_ax3.set_title('Sqexp cov fun for Brownian bridge process')
# Second row : Exp covariance
w,h=plt.figaspect(0.3)
fig2 = plt.figure(dpi=400,constrained_layout=True,figsize=(w,h))
gs2 = fig2.add_gridspec(1, 3)
f2_ax1 = fig2.add_subplot(gs2[0,0])
f2_ax1.plot(t,cov_true_sqexp,linestyle='dashed',color='0.7', label='True covariance')
f2_ax1.plot(t,cov_exp_sqexp,linestyle='solid',color='0.0',label='Estimation')
f2_ax1.plot(t,correlogram_sqexp,linestyle='dotted',color='0.0',label='Correlogram')
f2_ax1.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f2_ax1.set_title('Exp cov fun for sqexp process')
f2_ax1.legend(loc='upper left')
f2_ax2 = fig2.add_subplot(gs2[0,1])
f2_ax2.plot(t,cov_true_exp,linestyle='dashed',color='0.7', label='True covariance')
f2_ax2.plot(t,cov_exp_exp,linestyle='solid',color='0.0',label='Estimation')
f2_ax2.plot(t,correlogram_exp,linestyle='dotted',color='0.0',label='Correlogram')
f2_ax2.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f2_ax2.set_title('Exp cov fun for exp process')
f2_ax3 = fig2.add_subplot(gs2[0,2])
#f2_ax3.plot(t,cov_true_bb,linestyle='dashed',color='0.7', label='True covariance')
f2_ax3.plot(t,cov_exp_bb,linestyle='solid',color='0.0',label='Estimation')
f2_ax3.plot(t,correlogram_bb,linestyle='dotted',color='0.0',label='Correlogram')
f2_ax3.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f2_ax3.set_title('Exp cov fun for Brownian bridge process')
# Third row : Bochner covariance
w,h=plt.figaspect(0.3)
fig3 = plt.figure(dpi=400,constrained_layout=True,figsize=(w,h))
gs3 = fig3.add_gridspec(1, 3)
f3_ax1 = fig3.add_subplot(gs3[0,0])
f3_ax1.plot(t,cov_true_sqexp,linestyle='dashed',color='0.7', label='True covariance')
f3_ax1.plot(t,cov_Bochner_sqexp,linestyle='solid',color='0.0',label='Estimation')
f3_ax1.plot(t,correlogram_sqexp,linestyle='dotted',color='0.0',label='Correlogram')
f3_ax1.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f3_ax1.set_title('Bochner cov fun for sqexp process')
f3_ax1.legend(loc='upper left')
f3_ax2 = fig3.add_subplot(gs3[0,1])
f3_ax2.plot(t,cov_true_exp,linestyle='dashed',color='0.7', label='True covariance')
f3_ax2.plot(t,cov_Bochner_exp,linestyle='solid',color='0.0',label='Estimation')
f3_ax2.plot(t,correlogram_exp,linestyle='dotted',color='0.0',label='Correlogram')
f3_ax2.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f3_ax2.set_title('Bochner cov fun for exp process')
f3_ax3 = fig3.add_subplot(gs3[0,2])
#f3_ax3.plot(t,cov_true_bb,linestyle='dashed',color='0.7', label='True covariance')
f3_ax3.plot(t,cov_Bochner_bb,linestyle='solid',color='0.0',label='Estimation')
f3_ax3.plot(t,correlogram_bb,linestyle='dotted',color='0.0',label='Correlogram')
f3_ax3.plot(t,zero_line,linestyle='dotted',color='0.75')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f3_ax3.set_title('Bochner cov fun for Brownian bridge process')
```
#### File: kernel_inference/Figures/Special_case_4_trajectories.py
```python
"""
1. Imports and definitions -----------------------------------------------
"""
# i) Imports
import numpy as np
import numpy.linalg as lina
import scipy.linalg as spla
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 12})
# ii) Definition of auxiliary quantities
n=100
n_sample=20
n_simu=100
t=np.linspace(0,1,n)
sample_index=np.round(np.linspace(0,n-1,n_sample))
t_sample=t[sample_index.astype(int)]
np.random.seed(0)
tol=10**(-4)
"""
2. Create covariance matrices --------------------------------------------
"""
# i) Define covariance functions
d_x=0.1
d_y=0.05
def cov_fun_x(t1,t2):
return 1*np.exp(-(lina.norm(t1-t2)/d_x)**2)
def cov_fun_y(t1,t2):
return 1.5*np.exp(-(lina.norm(t1-t2)/d_y)**2)
# ii) Create covariance matrices
K_x=np.zeros([n,n])
K_y=np.zeros([n,n])
for k in range(n):
for l in range(n):
K_x[k,l]=cov_fun_x(t[k],t[l])
K_y[k,l]=cov_fun_y(t[k],t[l])
# iii) Introduce constrained behavior
# weight_mat=np.ones([n,n])
# for k in range(n):
# for l in range(n):
# if np.min([k,l])/(np.round(n/3))<=1:
# weight_mat[k,l]=np.min([k,l])/(np.round(n/3))
# K_x=weight_mat*K_x
# K_y=weight_mat*K_y
Nabla=np.delete(np.eye(n)-np.roll(np.eye(n),1,1),n-1,0)
L=np.zeros([1,n])
L[0,0]=1;
A_constraints=np.vstack((Nabla,L))
K_x_mod=np.delete(K_x,[n-1],0)
K_x_mod=np.delete(K_x_mod,[n-1],1)
K_x_constrained=spla.block_diag(K_x_mod,np.zeros([1,1]))
K_y_mod=np.delete(K_y,[n-1],0)
K_y_mod=np.delete(K_y_mod,[n-1],1)
K_y_constrained=spla.block_diag(K_y_mod,np.zeros([1,1]))
# iv) Solve A_c K_x A_c.T=K_c
K_x=lina.pinv(A_constraints)@[email protected](A_constraints).T
K_y=lina.pinv(A_constraints)@[email protected](A_constraints).T
"""
3. Simulation of autocorrelated data -------------------------------------
"""
# i) Draw from a distribution with covariance matrix K_x
x_simu=np.zeros([n,n_simu])
y_simu=np.zeros([n,n_simu])
for k in range(n_simu):
x_simu[:,k]=np.random.multivariate_normal(np.zeros([n]),K_x)
y_simu[:,k]=np.random.multivariate_normal(np.zeros([n]),K_y)
x_measured=x_simu[sample_index.astype(int),:]
y_measured=y_simu[sample_index.astype(int),:]
S_emp_x=(1/n_simu)*(x_simu@x_simu.T)
S_emp_measured_x=(1/n_simu)*(x_measured@x_measured.T)
S_emp_y=(1/n_simu)*(y_simu@y_simu.T)
S_emp_measured_y=(1/n_simu)*(y_measured@y_measured.T)
"""
4. Kernel inference ------------------------------------------------------
"""
# i) Preparation
r=2
n_exp=10
d_sqexp=0.3
def cov_fun_exp(t1,t2):
return (1/n**2)*np.exp(-(lina.norm(t1-t2)/d_sqexp)**1)
K_exp=np.zeros([n,n])
for k in range(n):
for l in range(n):
K_exp[k,l]=cov_fun_exp(t[k],t[l])
[U_p,Lambda_p,V_p]=lina.svd(K_exp,hermitian=True)
U_p_cut=U_p[:,:n_exp]
Psi=U_p_cut[sample_index.astype(int),:]
Lambda_p_cut=np.diag(Lambda_p[:n_exp])
# ii) Execute inference
import sys
sys.path.append("..")
import KI
beta_x, mu_x, gamma_x, C_gamma_x, KI_logfile_x = KI.Kernel_inference_homogeneous(x_measured,Lambda_p_cut,Psi,r,max_iter=300)
beta_y, mu_y, gamma_y, C_gamma_y, KI_logfile_y = KI.Kernel_inference_homogeneous(y_measured,Lambda_p_cut,Psi,r, max_iter=300)
"""
5. Optimal estimation ---------------------------------------------------
"""
# i) Auxiliary quantities
n_datapoints= 10
datapoint_index=np.sort(np.random.choice(range(n),size=n_datapoints))
t_datapoints=t[datapoint_index.astype(int)]
x_datapoints=x_simu[datapoint_index.astype(int),:]
y_datapoints=y_simu[datapoint_index.astype(int),:]
# ii) Interpolate x using inferred kernel
K_gamma_x=U_p_cut@gamma_x@U_p_cut.T
K_gamma_x_sample=K_gamma_x[np.ix_(datapoint_index.astype(int),datapoint_index.astype(int))]
K_gamma_x_subset=K_gamma_x[:,datapoint_index.astype(int)]
[email protected](K_gamma_x_sample,rcond=tol,hermitian=True)@x_datapoints
# iii) Interpolate y using inferred kernel
K_gamma_y=U_p_cut@gamma_y@U_p_cut.T
K_gamma_y_sample=K_gamma_y[np.ix_(datapoint_index.astype(int),datapoint_index.astype(int))]
K_gamma_y_subset=K_gamma_y[:,datapoint_index.astype(int)]
[email protected](K_gamma_y_sample,rcond=tol,hermitian=True)@y_datapoints
# iv) Interpolate using true kernel
K_x_true_sample=K_x[np.ix_(datapoint_index.astype(int),datapoint_index.astype(int))]
K_x_true_subset=K_x[:,datapoint_index.astype(int)]
[email protected](K_x_true_sample,rcond=tol,hermitian=True)@x_datapoints
K_y_true_sample=K_y[np.ix_(datapoint_index.astype(int),datapoint_index.astype(int))]
K_y_true_subset=K_y[:,datapoint_index.astype(int)]
[email protected](K_y_true_sample,rcond=tol,hermitian=True)@y_datapoints
# v) Interpolate using generic squared exponential
K_exp_sample=K_exp[np.ix_(datapoint_index.astype(int),datapoint_index.astype(int))]
K_exp_subset=K_exp[:,datapoint_index.astype(int)]
[email protected](K_exp_sample,rcond=tol,hermitian=True)@x_datapoints
[email protected](K_exp_sample,rcond=tol,hermitian=True)@y_datapoints
"""
6. Plots and illustrations -----------------------------------------------
"""
# i) Auxiliary definitions
zero_line=np.zeros([n,1])
K=spla.block_diag(K_x,K_y)
K_gamma=spla.block_diag(K_gamma_x,K_gamma_y)
S_emp=spla.block_diag(S_emp_x,S_emp_y)
K_gamma_x_sample=K_gamma_x[np.ix_(sample_index.astype(int),sample_index.astype(int))]
K_gamma_y_sample=K_gamma_y[np.ix_(sample_index.astype(int),sample_index.astype(int))]
K_gamma_sample=spla.block_diag(K_gamma_x_sample,K_gamma_y_sample
)
S_emp_measured=spla.block_diag(S_emp_measured_x,S_emp_measured_y)
gamma=spla.block_diag(gamma_x,gamma_y)
# ii) Invoke figure 1
n_plot=2
w,h=plt.figaspect(0.3)
fig1 = plt.figure(dpi=400,constrained_layout=True,figsize=(w,h))
gs1 = fig1.add_gridspec(1, 3)
# Location 1,1 Underlying covariance function
f1_ax1 = fig1.add_subplot(gs1[0,0])
f1_ax1.imshow(K)
plt.ylabel('Locations x,y')
plt.xlabel('Locations x,y')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f1_ax1.set_title('Covariance function')
# Location 1,2 Example realizations
f1_ax2 = fig1.add_subplot(gs1[0,1])
f1_ax2.plot(x_simu[:,1:n_plot],y_simu[:,1:n_plot],linestyle='solid',color='0')
y_min,y_max=plt.ylim()
plt.ylabel('Function value y(t)')
plt.xlabel('Function value x(t)')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f1_ax2.set_title('Example realizations')
# Location 1,3 Plot of the empirical covariance matrix
f1_ax3 = fig1.add_subplot(gs1[0,2])
f1_ax3.imshow(S_emp)
plt.ylabel('Locations x,y')
plt.xlabel('Locations x,y')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f1_ax3.set_title('Empirical covariance')
# Save the figure
# plt.savefig('Special_case_4a_trajectories',dpi=400)
# iii) Invoke figure 2
n_plot=2
n_illu=1
w,h=plt.figaspect(0.35)
fig2 = plt.figure(dpi=400,constrained_layout=True,figsize=(w,h))
gs2 = fig2.add_gridspec(4, 6)
f2_ax1 = fig2.add_subplot(gs2[0:2, 0:2])
f2_ax1.imshow(K)
f2_ax1.set_title('True covariance function')
f2_ax1.axis('off')
f2_ax2 = fig2.add_subplot(gs2[0:2, 4:6])
f2_ax2.imshow(K_gamma)
f2_ax2.set_title('Estimated covariance function')
f2_ax2.axis('off')
f2_ax3 = fig2.add_subplot(gs2[0, 2])
f2_ax3.imshow(S_emp_measured)
f2_ax3.set_title('Empirical covariance')
f2_ax3.axis('off')
f2_ax4 = fig2.add_subplot(gs2[0, 3])
f2_ax4.imshow(K_gamma_sample)
f2_ax4.set_title('Estimated covariance')
f2_ax4.axis('off')
f2_ax5 = fig2.add_subplot(gs2[1, 2])
f2_ax5.imshow(spla.block_diag(Lambda_p_cut,Lambda_p_cut))
f2_ax5.set_title('Prior gamma')
f2_ax5.axis('off')
f2_ax6 = fig2.add_subplot(gs2[1, 3])
f2_ax6.imshow(gamma)
f2_ax6.set_title('Inferred gamma')
f2_ax6.axis('off')
# Save the figure
# plt.savefig('Special_case_4b_trajectories',dpi=400)
# iii) Invoke figure 3
w,h=plt.figaspect(0.25)
fig3 = plt.figure(dpi=400,constrained_layout=True,figsize=(w,h))
gs3 = fig3.add_gridspec(1, 3)
# Location 1.2 Estimations using squared exponential covariance
f3_ax1 = fig3.add_subplot(gs3[0,1])
f3_ax1.scatter(x_datapoints[:,0],y_datapoints[:,0],facecolors='none',edgecolors='0',label='Data points')
for k in range(n_illu-1):
f3_ax1.scatter(x_datapoints[:,k+1],y_datapoints[:,k+1],facecolors='none',edgecolors='0')
exp_est = f3_ax1.plot(x_est_K_exp[:,:n_illu] ,y_est_K_exp[:,:n_illu],linestyle='solid',color='0',label='Estimate sqexp cov')
plt.setp(exp_est[1:], label="_")
true_est = f3_ax1.plot(x_est_K_x_true[:,:n_illu],y_est_K_y_true[:,:n_illu],linestyle='dotted',color='0.65',label='Estimate true cov')
plt.setp(true_est[1:], label="_")
f3_ax1.plot(t,zero_line,linestyle='dotted',color='0.5')
f3_ax1.plot(zero_line,t,linestyle='dotted',color='0.5')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
plt.xlabel('Locations x,y')
f3_ax1.set_title('Estimations using exp. covariance')
f3_ax1.legend(loc='lower right')
# Location 1.3 Estimations using inferred covariance
f3_ax2 = fig3.add_subplot(gs3[0,2])
f3_ax2.scatter(x_datapoints[:,0],y_datapoints[:,0],facecolors='none',edgecolors='0',label='Data points')
for k in range(n_illu-1):
f3_ax2.scatter(x_datapoints[:,k+1],y_datapoints[:,k+1],facecolors='none',edgecolors='0')
KI_est = f3_ax2.plot(x_est_K_gamma_x[:,:n_illu] ,y_est_K_gamma_y[:,:n_illu],linestyle='solid',color='0',label='Estimate KI cov')
plt.setp(KI_est[1:], label="_")
true_est = f3_ax2.plot(x_est_K_x_true[:,:n_illu],y_est_K_y_true[:,:n_illu],linestyle='dotted',color='0.65',label='Estimate true cov')
plt.setp(true_est[1:], label="_")
f3_ax2.plot(t,zero_line,linestyle='dotted',color='0.5')
f3_ax2.plot(zero_line,t,linestyle='dotted',color='0.5')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
plt.xlabel('Locations x,y')
f3_ax2.set_title('Estimations using inferred covariance')
f3_ax2.legend(loc='lower right')
# Location 1.1 Example realizations
f3_ax3 = fig3.add_subplot(gs3[0,0])
f3_ax3.plot(x_simu[:,1:n_plot],y_simu[:,1:n_plot],linestyle='solid',color='0',label='Estimate sqexp')
y_min,y_max=plt.ylim()
plt.ylabel('Locations x,y')
plt.xlabel('Locations x,y')
plt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)
plt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)
f3_ax3.set_title('Example realizations')
# Save the figure
# plt.savefig('Special_case_4c_trajectories',dpi=400)
```
|
{
"source": "jemil-butt/Optimal_Discretization_RL",
"score": 3
}
|
#### File: jemil-butt/Optimal_Discretization_RL/benchmark_beam_bending.py
```python
import numpy as np
import time
from scipy.optimize import basinhopping
import class_beam_bending_env as beam
# ii) Import stable baselines
from stable_baselines3 import TD3
from stable_baselines3.common.env_checker import check_env
# iii) Initialize and check
np.random.seed(0)
beam_env=beam.Env()
beam_env.reset()
check_env(beam_env)
"""
2. Train with stable baselines
"""
# i) Train a TD3 Model
# start_time=time.time()
# model = TD3("MlpPolicy", beam_env,verbose=1,seed=0)
# model.learn(total_timesteps=100000)
# end_time=time.time()
# model.save('./Saved_models/trained_benchmark_beam_bending')
model=TD3.load('./Saved_models/trained_benchmark_beam_bending')
"""
3. Apply alternative methods
"""
# Note: All actions are in [-1,1] and get mapped to [0,1] by to the environment
# translating input actions from the symmetric box space [-1,1] to indices
# i) Grid based sampling
def grid_based_sampling(environment):
action_index=environment.round_to_index(environment.epoch/(environment.max_epoch-1)-1)
action=np.array(2*environment.x[action_index]-1)
return action
# ii) Pseudo random sampling
def pseudo_random_sampling(environment):
Halton_sequence=np.array([1/2, 1/4, 3/4, 1/8, 5/8, 3/8, 7/8])*2-np.ones([7])
action=Halton_sequence[environment.epoch]
return action
# iii) Random sampling
def random_sampling(environment):
action=np.random.uniform(-1,1,[1])
return action
# iv) Numerical integration
def quadrature_sampling(environment):
Gauss_points=np.array([-0.861, -0.34, 0.34, 0.861])
action=Gauss_points[environment.epoch]
return action
# v) Experiment design based sampling
n_average=10000
fun_table=np.zeros([n_average,beam_env.n_disc_x])
for k in range(n_average):
beam_env.reset()
fun_table[k,:]=beam_env.def_fun
def loss_fun(x_vec):
index_vec=np.zeros(beam_env.n_meas)
for k in range(beam_env.n_meas):
index_vec[k]=beam_env.round_to_index(x_vec[k]*0.5+0.5)
f_max=np.max(fun_table,axis=1)
f_obs_mat=fun_table[:,index_vec.astype(int)]
f_obs_max=np.max(f_obs_mat,axis=1)
loss_vec=np.abs(f_obs_max-f_max)
loss_val=np.mean(loss_vec)
return loss_val
x_0 = np.array([-0.7,-0.3,0.3,0.7])
x_design = basinhopping(loss_fun, x_0, disp=True)
def experiment_design_sampling(environment):
action=x_design.x[environment.epoch]
return action
"""
4. Summarize and plot results
"""
# i) Summarize results in table
n_episodes_table=1000
table=np.zeros([n_episodes_table,6])
# Grid based sampling results
for k in range(n_episodes_table):
done=False
obs = beam_env.reset()
while done ==False:
action = grid_based_sampling(beam_env)
obs, reward, done, info = beam_env.step(action)
if done:
table[k,0]=reward
break
# Pseudo random sampling results
for k in range(n_episodes_table):
done=False
obs = beam_env.reset()
while done ==False:
action = pseudo_random_sampling(beam_env)
obs, reward, done, info = beam_env.step(action)
if done:
table[k,1]=reward
break
# Random sampling results
for k in range(n_episodes_table):
done=False
obs = beam_env.reset()
while done ==False:
action = random_sampling(beam_env)
obs, reward, done, info = beam_env.step(action)
if done:
table[k,2]=reward
break
# Numerical integration sampling results
for k in range(n_episodes_table):
done=False
obs = beam_env.reset()
while done ==False:
action = quadrature_sampling(beam_env)
obs, reward, done, info = beam_env.step(action)
if done:
table[k,3]=reward
break
# Experiment design sampling results
for k in range(n_episodes_table):
done=False
obs = beam_env.reset()
while done ==False:
action = experiment_design_sampling(beam_env)
obs, reward, done, info = beam_env.step(action)
if done:
table[k,4]=reward
break
# RL sampling results
for k in range(n_episodes_table):
done=False
obs = beam_env.reset()
while done ==False:
action, _states = model.predict(obs, deterministic=True)
obs, reward, done, info = beam_env.step(action)
if done:
table[k,5]=reward
break
# ii) Illustrate results
n_episodes=3
for k in range(n_episodes):
done=False
obs = beam_env.reset()
while done ==False:
action, _states = model.predict(obs, deterministic=True)
obs, reward, done, info = beam_env.step(action)
if done:
beam_env.render(reward)
# time.sleep(0.5)
break
mean_summary=np.mean(table,axis=0)
std_summary=np.std(table,axis=0)
print(' Reward means of different methods')
print(mean_summary)
print(' Reward standard_deviations of different methods')
print(std_summary)
# print('Time for RL procedure = ', end_time-start_time ,'sec')
```
|
{
"source": "jemilc/shap",
"score": 2
}
|
#### File: shap/benchmark/plots.py
```python
import numpy as np
from .experiments import run_experiments
from ..plots import colors
from . import models
from . import methods
import sklearn
import io
import base64
try:
import matplotlib.pyplot as pl
except ImportError:
pass
labels = {
"consistency_guarantees": {
"title": "Consistency Guarantees"
},
"local_accuracy": {
"title": "Local Accuracy"
},
"runtime": {
"title": "Runtime"
},
"remove_positive": {
"title": "Remove Positive",
"xlabel": "Max fraction of features removed",
"ylabel": "Negative mean model output"
},
"mask_remove_positive": {
"title": "Mask Remove Positive",
"xlabel": "Max fraction of features removed",
"ylabel": "Negative mean model output"
},
"remove_negative": {
"title": "Remove Negative",
"xlabel": "Max fraction of features removed",
"ylabel": "Mean model output"
},
"mask_remove_negative": {
"title": "Mask Remove Negative",
"xlabel": "Max fraction of features removed",
"ylabel": "Mean model output"
},
"keep_positive": {
"title": "Keep Positive",
"xlabel": "Max fraction of features kept",
"ylabel": "Mean model output"
},
"mask_keep_positive": {
"title": "Mask Keep Positive",
"xlabel": "Max fraction of features kept",
"ylabel": "Mean model output"
},
"keep_negative": {
"title": "Keep Negative",
"xlabel": "Max fraction of features kept",
"ylabel": "Negative mean model output"
},
"mask_keep_negative": {
"title": "Mask Keep Negative",
"xlabel": "Max fraction of features kept",
"ylabel": "Negative mean model output"
},
"batch_remove_absolute__r2": {
"title": "Batch Remove Absolute",
"xlabel": "Fraction of features removed",
"ylabel": "1 - R^2"
},
"batch_keep_absolute__r2": {
"title": "Batch Keep Absolute",
"xlabel": "Fraction of features kept",
"ylabel": "R^2"
},
"batch_remove_absolute__roc_auc": {
"title": "Batch Remove Absolute",
"xlabel": "Fraction of features removed",
"ylabel": "1 - ROC AUC"
},
"batch_keep_absolute__roc_auc": {
"title": "Batch Keep Absolute",
"xlabel": "Fraction of features kept",
"ylabel": "ROC AUC"
},
"linear_shap_corr": {
"title": "Linear SHAP (corr)"
},
"linear_shap_ind": {
"title": "Linear SHAP (ind)"
},
"coef": {
"title": "Coefficents"
},
"random": {
"title": "Random"
},
"kernel_shap_1000_meanref": {
"title": "Kernel SHAP 1000 mean ref."
},
"sampling_shap_1000": {
"title": "Sampling SHAP 1000"
},
"tree_shap": {
"title": "Tree SHAP"
},
"saabas": {
"title": "Saabas"
},
"tree_gain": {
"title": "Gain/Gini Importance"
},
"mean_abs_tree_shap": {
"title": "mean(|Tree SHAP|)"
},
"lasso_regression": {
"title": "Lasso Regression"
},
"ridge_regression": {
"title": "Ridge Regression"
},
"gbm_regression": {
"title": "Gradient Boosting Regression"
}
}
benchmark_color_map = {
"tree_shap": "#1E88E5",
"deep_shap": "#1E88E5",
"linear_shap_corr": "#1E88E5",
"linear_shap_ind": "#ff0d57",
"coef": "#13B755",
"random": "#999999",
"const_random": "#666666",
"kernel_shap_1000_meanref": "#7C52FF"
}
negated_metrics = [
"runtime",
"remove_positive",
"mask_remove_positive",
"keep_negative",
"mask_keep_negative"
]
one_minus_metrics = [
"batch_remove_absolute__r2",
"batch_remove_absolute__roc_auc"
]
def plot_curve(dataset, model, metric, cmap=benchmark_color_map):
experiments = run_experiments(dataset=dataset, model=model, metric=metric)
pl.figure()
method_arr = []
for (name,(fcounts,scores)) in experiments:
_,_,method,_ = name
if metric in negated_metrics:
scores = -scores
elif metric in one_minus_metrics:
scores = 1 - scores
auc = sklearn.metrics.auc(fcounts, scores) / fcounts[-1]
method_arr.append((auc, method, scores))
for (auc,method,scores) in sorted(method_arr):
method_title = getattr(methods, method).__doc__.split("\n")[0].strip()
l = "{:6.3f} - ".format(auc) + method_title
pl.plot(fcounts / fcounts[-1], scores, label=l, color=cmap.get(method, "#000000"), linewidth=2)
pl.xlabel(labels[metric]["xlabel"])
pl.ylabel(labels[metric]["ylabel"])
pl.title(labels[metric]["title"])
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('left')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
ahandles, alabels = pl.gca().get_legend_handles_labels()
pl.legend(reversed(ahandles), reversed(alabels))
return pl.gcf()
def make_grid(scores, dataset, model):
color_vals = {}
for (_,_,method,metric),(fcounts,score) in filter(lambda x: x[0][0] == dataset and x[0][1] == model, scores):
if metric not in color_vals:
color_vals[metric] = {}
if metric in negated_metrics:
score = -score
elif metric in one_minus_metrics:
score = 1 - score
if fcounts is None:
color_vals[metric][method] = score
else:
auc = sklearn.metrics.auc(fcounts, score) / fcounts[-1]
color_vals[metric][method] = auc
col_keys = list(color_vals.keys())
row_keys = list(set([v for k in col_keys for v in color_vals[k].keys()]))
data = -28567 * np.ones((len(row_keys), len(col_keys)))
for i in range(len(row_keys)):
for j in range(len(col_keys)):
data[i,j] = color_vals[col_keys[j]][row_keys[i]]
assert np.sum(data == -28567) == 0, "There are missing data values!"
data = (data - data.min(0)) / (data.max(0) - data.min(0))
# sort by performans
inds = np.argsort(-data.mean(1))
row_keys = [row_keys[i] for i in inds]
data = data[inds,:]
return row_keys, col_keys, data
from matplotlib.colors import LinearSegmentedColormap
red_blue_solid = LinearSegmentedColormap('red_blue_solid', {
'red': ((0.0, 198./255, 198./255),
(1.0, 5./255, 5./255)),
'green': ((0.0, 34./255, 34./255),
(1.0, 198./255, 198./255)),
'blue': ((0.0, 5./255, 5./255),
(1.0, 24./255, 24./255)),
'alpha': ((0.0, 1, 1),
(1.0, 1, 1))
})
from IPython.core.display import HTML
def plot_grids(dataset, model_names):
scores = []
for model in model_names:
scores.extend(run_experiments(dataset=dataset, model=model))
prefix = ""
out = "" # background: rgb(30, 136, 229)
out += "<div style='font-weight: regular; font-size: 24px; text-align: center; background: #f8f8f8; color: #000; padding: 20px;'>SHAP Benchmark</div>"
out += "<div style='height: 1px; background: #ddd;'></div>"
#out += "<div style='height: 7px; background-image: linear-gradient(to right, rgb(30, 136, 229), rgb(255, 13, 87));'></div>"
out += "<table style='border-width: 1px; font-size: 14px; margin-left: 40px'>"
for ind,model in enumerate(model_names):
row_keys, col_keys, data = make_grid(scores, dataset, model)
# print(data)
# print(colors.red_blue_solid(0.))
# print(colors.red_blue_solid(1.))
# return
for metric in col_keys:
if metric not in ["local_accuracy", "runtime", "consistency_guarantees"]:
plot_curve(dataset, model, metric)
buf = io.BytesIO()
pl.savefig(buf, format = 'png')
pl.close()
buf.seek(0)
data_uri = base64.b64encode(buf.read()).decode('utf-8').replace('\n', '')
plot_id = "plot__"+dataset+"__"+model+"__"+metric
prefix += "<div onclick='document.getElementById(\"%s\").style.display = \"none\"' style='display: none; position: fixed; z-index: 10000; left: 0px; right: 0px; top: 0px; bottom: 0px; background: rgba(255,255,255,0.5);' id='%s'>" % (plot_id, plot_id)
prefix += "<img style='margin-left: auto; margin-right: auto; margin-top: 200px;' src='data:image/png;base64,%s'>" % data_uri
prefix += "</div>"
model_title = getattr(models, dataset+"__"+model).__doc__.split("\n")[0].strip()
if ind == 0:
out += "<tr><td style='background: #fff'></td></td>"
for j in range(data.shape[1]):
metric_title = labels[col_keys[j]]["title"]
out += "<td style='width: 40px; background: #fff'><div style='margin-bottom: -5px; white-space: nowrap; transform: rotate(-45deg); transform-origin: left top 0; width: 1.5em; margin-top: 8em'>" + metric_title + "</div></td>"
out += "</tr>"
out += "<tr><td style='background: #fff'></td><td colspan='%d' style='background: #fff; font-weight: bold; text-align: center'>%s</td></tr>" % (data.shape[1], model_title)
for i in range(data.shape[0]):
out += "<tr>"
# if i == 0:
# out += "<td rowspan='%d' style='background: #fff; text-align: center; white-space: nowrap; vertical-align: middle; '><div style='font-weight: bold; transform: rotate(-90deg); transform-origin: left top 0; width: 1.5em; margin-top: 8em'>%s</div></td>" % (data.shape[0], model_name)
method_title = getattr(methods, row_keys[i]).__doc__.split("\n")[0].strip()
out += "<td style='background: #ffffff' title='shap.LinearExplainer(model)'>" + method_title + "</td>"
for j in range(data.shape[1]):
plot_id = "plot__"+dataset+"__"+model+"__"+col_keys[j]
out += "<td onclick='document.getElementById(\"%s\").style.display = \"block\"' style='padding: 0px; padding-left: 0px; padding-right: 0px; border-left: 0px solid #999; width: 42px; height: 34px; background-color: #fff'>" % plot_id
#out += "<div style='opacity: "+str(2*(max(1-data[i,j], data[i,j])-0.5))+"; background-color: rgb" + str(tuple(v*255 for v in colors.red_blue_solid(0. if data[i,j] < 0.5 else 1.)[:-1])) + "; height: "+str((30*max(1-data[i,j], data[i,j])))+"px; margin-left: auto; margin-right: auto; width:"+str((30*max(1-data[i,j], data[i,j])))+"px'></div>"
out += "<div style='opacity: "+str(1)+"; background-color: rgb" + str(tuple(v*255 for v in colors.red_blue_solid(2*(data[i,j]-0.5))[:-1])) + "; height: "+str((30*data[i,j]))+"px; margin-left: auto; margin-right: auto; width:"+str((30*data[i,j]))+"px'></div>"
#out += "<div style='float: left; background-color: #eee; height: 10px; width: "+str((40*(1-data[i,j])))+"px'></div>"
out += "</td>"
out += "</tr>" #
out += "<tr><td colspan='%d' style='background: #fff'></td>" % (data.shape[1] + 1)
out += "</table>"
return HTML(prefix + out)
```
|
{
"source": "JemisaR/Current-Weather",
"score": 3
}
|
#### File: Current-Weather/weather/models.py
```python
from django.db import models
from django.contrib.auth.models import User
class City(models.Model):
name = models.CharField(max_length=85)
country = models.CharField(max_length=85, blank=True)
country_code = models.CharField(max_length=2, blank=True)
latitude = models.DecimalField(max_digits=6, decimal_places=4, default=0)
longitude = models.DecimalField(max_digits=7, decimal_places=4, default=0)
zip_code = models.PositiveIntegerField(default=0)
user = models.ManyToManyField(User, related_name="my_cities", blank=True)
def _str_(self): # show the actual city name on the dashboard
return self.name
def save(self, *args, **kwargs):
self.name = self.name.capitalize()
self.country = self.country.capitalize()
self.country_code = self.country_code.lower()
return super(City, self).save(*args, **kwargs)
class Meta:
verbose_name_plural = "cities"
unique_together = ("name", "country_code")
```
#### File: Current-Weather/weather/views.py
```python
from django.db import IntegrityError
from rest_framework import status
from django.shortcuts import render
from rest_framework.response import Response
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView, ListAPIView
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated
from .open_weather_map import OpenWeatherMap
from .serializers import CitySerializer, CityWeatherSerializer
from .models import City
class CityWeather(APIView):
permission_classes = (IsAuthenticated,)
queryset = City.objects.all()
def post(self, request):
api = OpenWeatherMap()
print("Request", request, request.data)
city = request.data["city"]
country = request.data["country"]
unit = request.data["unit"]
if country != "":
location = city + "," + country
else:
location = city
weather_data = api.current_weather(location, unit)
weather_data["city"] = city.capitalize()
results = CityWeatherSerializer(weather_data, many=False).data
return Response(results)
class CitiesWeather(APIView):
queryset = City.objects.all()
# serializer_class = CitySerializer
def get(self, request):
api = OpenWeatherMap()
weather_data = []
cities = City.objects.all().order_by("name")
for city in cities:
weather = api.current_weather(city.name, unit="c")
weather["city"] = city.name
# print("City weather", weather)
weather_data.append(weather)
results = CityWeatherSerializer(weather_data, many=True).data
return Response(results)
class CityRetrieveUpdateDestroyAPIView(RetrieveUpdateDestroyAPIView):
""" View cities. """
permission_classes = (IsAuthenticated, )
queryset = City.objects.all()
serializer_class = CitySerializer
class CityListAPIView(ListAPIView):
""" View cities. """
permission_classes = (IsAuthenticated, )
queryset = City.objects.all().order_by("name")
serializer_class = CitySerializer
class CityListCreate(ListCreateAPIView):
""" View and create cities. """
permission_classes = (IsAuthenticated, )
queryset = City.objects.all()
serializer_class = CitySerializer
def create(self, request, *args, **kwargs):
try:
return super(ListCreateAPIView, self).create(request, *args, **kwargs)
except IntegrityError:
# This is due to case sensitivity in the City model.
return Response(
data={
"error": "This city is already saved."
},
status=status.HTTP_400_BAD_REQUEST
)
class MyCities(APIView):
""" Allow user to view his/her favorite cities. """
# permission_classes = [permissions.IsAuthenticated, ]
queryset = City.objects.all()
serializer_class = CitySerializer
def get_queryset(self):
"""
This view should return a list of all the purchases
for the currently authenticated user.
"""
user = self.request.user
return City.objects.filter(user=user)
```
|
{
"source": "jemisjoky/Continuous-uMPS",
"score": 2
}
|
#### File: Continuous-uMPS/continuous-umps/utils.py
```python
def null(*args, **kwargs):
pass
class FakeLogger:
__init__ = null
log_metric = null
log_metrics = null
end = null
```
|
{
"source": "jemisjoky/TorchMPS",
"score": 2
}
|
#### File: TorchMPS/torchmps/embeddings.py
```python
from math import sqrt, pi
from functools import partial
from typing import Union, Optional, Callable
import torch
from .utils2 import einsum
class DataDomain:
r"""
Defines a domain for input data to a probabilistic model
DataDomain supports both continuous and discrete domains, with the
latter always associated with indices of the form `0, 1, ..., max_val-1`.
For continuous domains, real intervals of the form `[min_val, max_val]`
can be defined.
Args:
continuous (bool): Whether data domain is continuous or discrete
max_val (int or float): For discrete domains, this is the number of
indices to use, with the maximum index being max_val - 1. For
continuous domains, this is the endpoint of the real interval.
min_val (float): Only used for continuous domains, this is the
startpoint of the real interval.
"""
def __init__(
self,
continuous: bool,
max_val: Union[int, float],
min_val: Optional[float] = None,
):
# Check defining input for correctness
if continuous:
assert max_val > min_val
self.min_val = min_val
else:
assert max_val >= 0
self.max_val = max_val
self.continuous = continuous
class FixedEmbedding:
r"""
Framework for fixed embedding function converting data to vectors
Args:
emb_fun (function): Function taking arbitrary tensors of values and
returning tensor of embedded vectors, which has one additional
axis in the last position. These values must be either integers,
for discrete data domains, or reals, for continuous data domains.
data_domain (DataDomain): Object which specifies the domain on which
the data fed to the embedding function is defined.
"""
def __init__(self, emb_fun: Callable, data_domain: DataDomain):
assert hasattr(emb_fun, "__call__")
# Save defining data, compute lambda matrix
self.domain = data_domain
self.emb_fun = emb_fun
self.make_lambda()
# Initialize parameters to be set later
self.num_points = None
self.special_case = None
def make_lambda(self, num_points: int = 1000):
"""
Compute the lambda matrix used for normalization
"""
# Compute the raw lambda matrix, computing number of points if needed
if self.domain.continuous:
points = torch.linspace(
self.domain.min_val, self.domain.max_val, steps=num_points
)
self.num_points = num_points
emb_vecs = self.emb_fun(points)
assert emb_vecs.ndim == 2
self.emb_dim = emb_vecs.shape[1]
assert emb_vecs.shape[0] == num_points
# Get rank-1 matrices for each point, then numerically integrate
emb_mats = einsum("bi,bj->bij", emb_vecs, emb_vecs.conj())
lamb_mat = torch.trapz(emb_mats, points, dim=0)
else:
points = torch.arange(self.domain.max_val).long()
emb_vecs = self.emb_fun(points)
assert emb_vecs.ndim == 2
self.emb_dim = emb_vecs.shape[1]
assert emb_vecs.shape[0] == self.domain.max_val
# Get rank-1 matrices for each point, then sum together
emb_mats = einsum("bi,bj->bij", emb_vecs, emb_vecs.conj())
lamb_mat = torch.sum(emb_mats, dim=0)
assert lamb_mat.ndim == 2
assert lamb_mat.shape[0] == lamb_mat.shape[1]
self.lamb_mat = lamb_mat
# Check if the computed matrix is diagonal or multiple of the identity
if torch.allclose(lamb_mat.diag().diag(), lamb_mat):
lamb_mat = lamb_mat.diag()
if torch.allclose(lamb_mat.mean(), lamb_mat):
self.lamb_mat = lamb_mat.mean()
else:
self.lamb_mat = lamb_mat
def embed(self, input_data):
"""
Embed input data via the user-specified embedding function
"""
return self.emb_fun(input_data)
unit_interval = DataDomain(continuous=True, max_val=1, min_val=0)
def onehot_embed(tensor, emb_dim):
"""
Function giving trivial one-hot embedding of categorical data
"""
shape = tensor.shape + (emb_dim,)
output = torch.zeros(*shape)
output.scatter_(-1, tensor[..., None], 1)
return output
def trig_embed(data, emb_dim=2):
r"""
Function giving embedding from powers of sine and cosine
Based on Equation B4 of <NAME> and <NAME>, "Supervised
Learning With Quantum-Inspired Tensor Networks", NIPS 2016, which maps an
input x in the unit interval to a d-dim vector whose j'th component
(where j = 0, 1, ..., d-1) is:
.. math::
\phi(x)_j = \sqrt{d-1 \choose j} \cos(\frac{pi}{2}x)^{d-j-1}
\sin(\frac{pi}{2}x)^{j}
Written by <NAME>on
"""
from scipy.special import binom
emb_data = []
for s in range(emb_dim):
comp = (
torch.cos(data * pi / 2) ** (emb_dim - s - 1)
* torch.sin(data * pi / 2) ** s
)
comp *= sqrt(binom(emb_dim - 1, s))
emb_data.append(comp)
emb_data = torch.stack(emb_data, dim=-1)
assert emb_data.shape == data.shape + (emb_dim,)
return emb_data
sincos_embed = partial(trig_embed, emb_dim=2)
```
#### File: TorchMPS/torchmps/utils.py
```python
import numpy as np
import torch
def svd_flex(tensor, svd_string, max_D=None, cutoff=1e-10, sv_right=True, sv_vec=None):
"""
Split an input tensor into two pieces using a SVD across some partition
Args:
tensor (Tensor): Pytorch tensor with at least two indices
svd_string (str): String of the form 'init_str->left_str,right_str',
where init_str describes the indices of tensor, and
left_str/right_str describe those of the left and
right output tensors. The characters of left_str
and right_str form a partition of the characters in
init_str, but each contain one additional character
representing the new bond which comes from the SVD
Reversing the terms in svd_string to the left and
right of '->' gives an ein_string which can be used
to multiply both output tensors to give a (low rank
approximation) of the input tensor
cutoff (float): A truncation threshold which eliminates any
singular values which are strictly less than cutoff
max_D (int): A maximum allowed value for the new bond. If max_D
is specified, the returned tensors
sv_right (bool): The SVD gives two orthogonal matrices and a matrix
of singular values. sv_right=True merges the SV
matrix with the right output, while sv_right=False
merges it with the left output
sv_vec (Tensor): Pytorch vector with length max_D, which is modified
in place to return the vector of singular values
Returns:
left_tensor (Tensor),
right_tensor (Tensor): Tensors whose indices are described by the
left_str and right_str parts of svd_string
bond_dim: The dimension of the new bond appearing from
the cutoff in our SVD. Note that this generally
won't match the dimension of left_/right_tensor
at this mode, which is padded with zeros
whenever max_D is specified
"""
def prod(int_list):
output = 1
for num in int_list:
output *= num
return output
with torch.no_grad():
# Parse svd_string into init_str, left_str, and right_str
svd_string = svd_string.replace(" ", "")
init_str, post_str = svd_string.split("->")
left_str, right_str = post_str.split(",")
# Check formatting of init_str, left_str, and right_str
assert all([c.islower() for c in init_str + left_str + right_str])
assert len(set(init_str + left_str + right_str)) == len(init_str) + 1
assert len(set(init_str)) + len(set(left_str)) + len(set(right_str)) == len(
init_str
) + len(left_str) + len(right_str)
# Get the special character representing our SVD-truncated bond
bond_char = set(left_str).intersection(set(right_str)).pop()
left_part = left_str.replace(bond_char, "")
right_part = right_str.replace(bond_char, "")
# Permute our tensor into something that can be viewed as a matrix
ein_str = f"{init_str}->{left_part+right_part}"
tensor = torch.einsum(ein_str, [tensor]).contiguous()
left_shape = list(tensor.shape[: len(left_part)])
right_shape = list(tensor.shape[len(left_part) :])
left_dim, right_dim = prod(left_shape), prod(right_shape)
tensor = tensor.view([left_dim, right_dim])
# Get SVD and format so that left_mat * diag(svs) * right_mat = tensor
left_mat, svs, right_mat = torch.svd(tensor)
svs, _ = torch.sort(svs, descending=True)
right_mat = torch.t(right_mat)
# Decrease or increase our tensor sizes in the presence of max_D
if max_D and len(svs) > max_D:
svs = svs[:max_D]
left_mat = left_mat[:, :max_D]
right_mat = right_mat[:max_D]
elif max_D and len(svs) < max_D:
copy_svs = torch.zeros([max_D])
copy_svs[: len(svs)] = svs
copy_left = torch.zeros([left_mat.size(0), max_D])
copy_left[:, : left_mat.size(1)] = left_mat
copy_right = torch.zeros([max_D, right_mat.size(1)])
copy_right[: right_mat.size(0)] = right_mat
svs, left_mat, right_mat = copy_svs, copy_left, copy_right
# If given as input, copy singular values into sv_vec
if sv_vec is not None and svs.shape == sv_vec.shape:
sv_vec[:] = svs
elif sv_vec is not None and svs.shape != sv_vec.shape:
raise TypeError(
f"sv_vec.shape must be {list(svs.shape)}, but is "
f"currently {list(sv_vec.shape)}"
)
# Find the truncation point relative to our singular value cutoff
truncation = 0
for s in svs:
if s < cutoff:
break
truncation += 1
if truncation == 0:
raise RuntimeError(
"SVD cutoff too large, attempted to truncate "
"tensor to bond dimension 0"
)
# Perform the actual truncation
if max_D:
svs[truncation:] = 0
left_mat[:, truncation:] = 0
right_mat[truncation:] = 0
else:
# If max_D wasn't given, set it to the truncation index
max_D = truncation
svs = svs[:truncation]
left_mat = left_mat[:, :truncation]
right_mat = right_mat[:truncation]
# Merge the singular values into the appropriate matrix
if sv_right:
right_mat = torch.einsum("l,lr->lr", [svs, right_mat])
else:
left_mat = torch.einsum("lr,r->lr", [left_mat, svs])
# Reshape the matrices to make them proper tensors
left_tensor = left_mat.view(left_shape + [max_D])
right_tensor = right_mat.view([max_D] + right_shape)
# Finally, permute the indices into the desired order
if left_str != left_part + bond_char:
left_tensor = torch.einsum(
f"{left_part+bond_char}->{left_str}", [left_tensor]
)
if right_str != bond_char + right_part:
right_tensor = torch.einsum(
f"{bond_char+right_part}->{right_str}", [right_tensor]
)
return left_tensor, right_tensor, truncation
def init_tensor(shape, bond_str, init_method):
"""
Initialize a tensor with a given shape
Args:
shape: The shape of our output parameter tensor.
bond_str: The bond string describing our output parameter tensor,
which is used in 'random_eye' initialization method.
The characters 'l' and 'r' are used to refer to the
left or right virtual indices of our tensor, and are
both required to be present for the random_eye and
min_random_eye initialization methods.
init_method: The method used to initialize the entries of our tensor.
This can be either a string, or else a tuple whose first
entry is an initialization method and whose remaining
entries are specific to that method. In each case, std
will always refer to a standard deviation for a random
normal random component of each entry of the tensor.
Allowed options are:
* ('random_eye', std): Initialize each tensor input
slice close to the identity
* ('random_zero', std): Initialize each tensor input
slice close to the zero matrix
* ('min_random_eye', std, init_dim): Initialize each
tensor input slice close to a truncated identity
matrix, whose truncation leaves init_dim unit
entries on the diagonal. If init_dim is larger
than either of the bond dimensions, then init_dim
is capped at the smaller bond dimension.
"""
# Unpack init_method if it is a tuple
if not isinstance(init_method, str):
init_str = init_method[0]
std = init_method[1]
if init_str == "min_random_eye":
init_dim = init_method[2]
init_method = init_str
else:
std = 1e-9
# Check that bond_str is properly sized and doesn't have repeat indices
assert len(shape) == len(bond_str)
assert len(set(bond_str)) == len(bond_str)
if init_method not in ["random_eye", "min_random_eye", "random_zero"]:
raise ValueError(f"Unknown initialization method: {init_method}")
if init_method in ["random_eye", "min_random_eye"]:
bond_chars = ["l", "r"]
assert all([c in bond_str for c in bond_chars])
# Initialize our tensor slices as identity matrices which each fill
# some or all of the initially allocated bond space
if init_method == "min_random_eye":
# The dimensions for our initial identity matrix. These will each
# be init_dim, unless init_dim exceeds one of the bond dimensions
bond_dims = [shape[bond_str.index(c)] for c in bond_chars]
if all([init_dim <= full_dim for full_dim in bond_dims]):
bond_dims = [init_dim, init_dim]
else:
init_dim = min(bond_dims)
eye_shape = [init_dim if c in bond_chars else 1 for c in bond_str]
expand_shape = [
init_dim if c in bond_chars else shape[i]
for i, c in enumerate(bond_str)
]
elif init_method == "random_eye":
eye_shape = [
shape[i] if c in bond_chars else 1 for i, c in enumerate(bond_str)
]
expand_shape = shape
bond_dims = [shape[bond_str.index(c)] for c in bond_chars]
eye_tensor = torch.eye(bond_dims[0], bond_dims[1]).view(eye_shape)
eye_tensor = eye_tensor.expand(expand_shape)
tensor = torch.zeros(shape)
tensor[[slice(dim) for dim in expand_shape]] = eye_tensor
# Add on a bit of random noise
tensor += std * torch.randn(shape)
elif init_method == "random_zero":
tensor = std * torch.randn(shape)
return tensor
### OLDER MISCELLANEOUS FUNCTIONS ### # noqa: E266
def onehot(labels, max_value):
"""
Convert a batch of labels from the set {0, 1,..., num_value-1} into their
onehot encoded counterparts
"""
label_vecs = torch.zeros([len(labels), max_value])
for i, label in enumerate(labels):
label_vecs[i, label] = 1.0
return label_vecs
def joint_shuffle(input_data, input_labels):
"""
Shuffle input data and labels in a joint manner, so each label points to
its corresponding datum. Works for both regular and CUDA tensors
"""
assert input_data.is_cuda == input_labels.is_cuda
use_gpu = input_data.is_cuda
if use_gpu:
input_data, input_labels = input_data.cpu(), input_labels.cpu()
data, labels = input_data.numpy(), input_labels.numpy()
# Shuffle relative to the same seed
np.random.seed(0)
np.random.shuffle(data)
np.random.seed(0)
np.random.shuffle(labels)
data, labels = torch.from_numpy(data), torch.from_numpy(labels)
if use_gpu:
data, labels = data.cuda(), labels.cuda()
return data, labels
def load_HV_data(length):
"""
Output a toy "horizontal/vertical" data set of black and white
images with size length x length. Each image contains a single
horizontal or vertical stripe, set against a background
of the opposite color. The labels associated with these images
are either 0 (horizontal stripe) or 1 (vertical stripe).
In its current version, this returns two data sets, a training
set with 75% of the images and a test set with 25% of the
images.
"""
num_images = 4 * (2 ** (length - 1) - 1)
num_patterns = num_images // 2
split = num_images // 4
if length > 14:
print(
"load_HV_data will generate {} images, "
"this could take a while...".format(num_images)
)
images = np.empty([num_images, length, length], dtype=np.float32)
labels = np.empty(num_images, dtype=np.int)
# Used to generate the stripe pattern from integer i below
template = "{:0" + str(length) + "b}"
for i in range(1, num_patterns + 1):
pattern = template.format(i)
pattern = [int(s) for s in pattern]
for j, val in enumerate(pattern):
# Horizontal stripe pattern
images[2 * i - 2, j, :] = val
# Vertical stripe pattern
images[2 * i - 1, :, j] = val
labels[2 * i - 2] = 0
labels[2 * i - 1] = 1
# Shuffle and partition into training and test sets
np.random.seed(0)
np.random.shuffle(images)
np.random.seed(0)
np.random.shuffle(labels)
train_images, train_labels = images[split:], labels[split:]
test_images, test_labels = images[:split], labels[:split]
return (
torch.from_numpy(train_images),
torch.from_numpy(train_labels),
torch.from_numpy(test_images),
torch.from_numpy(test_labels),
)
```
|
{
"source": "jemisjoky/UnsupGenModbyMPS",
"score": 3
}
|
#### File: UnsupGenModbyMPS/BStest/BS_main.py
```python
import sys
sys.path.append("../")
from MPScumulant import MPS_c
import numpy as np
from numpy.random import randint, rand
import os
def state2ind(state):
"""binary configuration -> int"""
return np.int(state.dot(2 ** np.arange(len(state))))
def remember(mps, steps, nsam):
"""Inference with Metropolis approach
nsam: number of walkers = number of samples
steps: number of steps they walk
Their final states are returned
"""
nsize = mps.mps_len
print("n_sample=%d" % nsam)
current = randint(2, size=(nsam, nsize))
for n in range(1, steps + 1):
if n % (steps // 10) == 0:
print(".", end="")
flipper = randint(2, size=(nsam, nsize))
new = current ^ flipper
for x in range(nsam):
prob = mps.Give_probab(current[x])
prob_ = mps.Give_probab(new[x])
if prob_ > prob or rand() < prob_ / prob:
current[x] = new[x]
return current
def remember_zipper(mps, nsam):
"""Zipper sampling
nsam: number of samples
"""
mps.left_cano()
print("n_sample=%d" % nsam)
sam = np.asarray([mps.generate_sample() for _ in range(nsam)])
return sam
def statistic(current):
"""Categorize and count the samples
Return an numpy.record whose dtype=[('x',int),('f',int)]"""
samprob = {}
for x in current:
xind = state2ind(x)
if xind in samprob:
samprob[xind] += 1
else:
samprob[xind] = 1
memory = [(x, samprob[x]) for x in samprob]
memory = np.asarray(memory, dtype=[("x", int), ("f", int)])
return np.sort(memory, order="f")
if __name__ == "__main__":
dataset = np.load("BSdata.npy").reshape(-1, 16)
"""The binary number form of BS is stored in BSind.npy, with the identical order with BSdata.npy"""
m = MPS_c(16)
m.left_cano()
m.designate_data(dataset)
m.cutoff = 5e-5
m.descent_step_length = 0.05
m.nbatch = 10
m.train(2)
m.saveMPS("BS-", True)
sam_zip = remember_zipper(m, 1000)
# os.chdir('BS-MPS')
np.save("sam_zip.npy", sam_zip)
np.save("memo_zip.npy", statistic(sam_zip))
sam_met = remember(m, 5000, 1000)
np.save("sam_met.npy", sam_met)
np.save("memo_met.npy", statistic(sam_met))
```
#### File: jemisjoky/UnsupGenModbyMPS/datasets.py
```python
import os
from math import floor
import numpy as np
import torch
@torch.no_grad()
def bin_data(input, num_bins=None):
"""
Discretize greyscale values into a finite number of bins
"""
if num_bins is None:
return input
assert num_bins > 0
# Set each of the corresponding bin indices
out_data = torch.full_like(input, -1)
for i in range(num_bins):
bin_inds = (i / num_bins <= input) * (input <= (i + 1) / num_bins)
out_data[bin_inds] = i
assert out_data.max() >= 0
return out_data.long()
def load_genz(genz_num: int, slice_len=None):
"""
Load a dataset of time series with dynamics set by various Genz functions
Separate train, validation, and test datasets are returned, containing data
from 8000, 1000, and 1000 time series. The length of each time series
depends on `slice_len`, and is 100 by default (`slice_len=None`). For
positive integer values of `slice_len`, these time series are split into
contiguous chunks of length equal to `slice_len`.
Args:
genz_num: Integer between 1 and 6 setting choice of Genz function
Returns:
train, val, test: Three arrays with respective shape (8000, 100, 1),
(1000, 100, 1), and (1000, 100, 1).
"""
# Length between startpoints of output sliced series
stride = 2
assert 1 <= genz_num <= 6
assert slice_len is None or 1 < slice_len <= 100
if slice_len is None:
slice_suffix = ""
slice_len = 100
else:
assert isinstance(slice_len, int)
slice_suffix = f"_l{slice_len}_s{stride}"
# Number of slices per time series
s_per_ts = (100 - slice_len) // stride + 1
# Return saved dataset if we have already generated this previously
save_file = f"datasets/genz/genz{genz_num}{slice_suffix}.npz"
if os.path.isfile(save_file):
out = np.load(save_file)
train, val, test = out["train"], out["val"], out["test"]
assert val.shape == test.shape == (1000 * s_per_ts, slice_len)
assert train.shape == (8000 * s_per_ts, slice_len)
return train, val, test
# Definitions of each of the Genz functions which drive the time series
gfun = genz_funs[genz_num]
# Initialize random starting values and update using Genz update function
rng = np.random.default_rng(genz_num)
x = rng.permutation(np.linspace(0.0, 1.0, num=10000))
long_series = np.empty((10000, 100))
for i in range(100):
x = gfun(x)
long_series[:, i] = x
# Normalize the time series values to lie in range [0, 1]
min_val, max_val = long_series.min(), long_series.max()
long_series = (long_series - min_val) / (max_val - min_val)
# Split into train, validation, and test sets
base_series = (long_series[:8000], long_series[8000:9000], long_series[9000:])
# Cut up the full time series into shorter sliced time series
all_series = []
for split in base_series:
num_series = split.shape[0]
s_split = np.empty((num_series * s_per_ts, slice_len))
for i in range(s_per_ts):
j = i * stride
s_split[i * num_series : (i + 1) * num_series] = split[
:, j : (j + slice_len)
]
all_series.append(s_split)
# Shuffle individual time series, save everything to disk
train, val, test = [rng.permutation(ts) for ts in all_series]
np.savez_compressed(save_file, train=train, val=val, test=test)
return train, val, test
def bars_and_stripes(width=10, max_size=12000, seed=0):
"""
Generate images from bars and stripes dataset
Note that *all* images are generated before a subset are selected, so
choosing height/width too large will lead to a long runtime
Args:
width (int): Width (and height) of square B&S images
max_size (int): Maximum number of images in all returned splits
seed (int): Random seed for reproducibility
Returns:
bs_data (Tensor): Flattened integer-valued bars and stripes
data, with shape (num_output, width**2)
"""
width = int(width)
num_total = 2 ** (width + 1) - 2
num_output = min(num_total, max_size)
# Create bit masks which will be used to define bar/stripe patterns
patterns = np.arange(2 ** width)
filters = np.arange(width)
bit_masks = (((patterns[:, np.newaxis] & (1 << filters))) > 0).astype(int)
# Generate all 2**(width + 1) - 2 images using above bit masks
bs_data = np.zeros((num_total, width, width))
bs_data[: num_total // 2] = bit_masks[:-1, :, np.newaxis] # Bars
bs_data[num_total // 2 :] = bit_masks[1:, np.newaxis, :] # Stripes
# Shuffle dataset and determine size to output
bs_data = np.random.RandomState(seed).permutation(bs_data)
# Split dataset into train, val, and test
bs_data = bs_data[:num_output].reshape((num_output, -1)).astype("float32")
lrg, sml = floor(num_output * 10 / 12), floor(num_output * 1 / 12)
train, val, test = bs_data[:lrg], bs_data[lrg:lrg+sml], bs_data[lrg+sml:lrg+2*sml]
return train, val, test
w = 0.5
c = 1.0 # I'm using the fact that c=1.0 to set c**2 = c**-2 = c
genz_funs = [
None, # Placeholder to give 1-based indexing
lambda x: np.cos(2 * np.pi * w + c * x),
lambda x: (c + (x + w)) ** -1,
lambda x: (1 + c * x) ** -2,
lambda x: np.exp(-c * np.pi * (x - w) ** 2),
lambda x: np.exp(-c * np.pi * np.abs(x - w)),
lambda x: np.where(x > w, 0, np.exp(c * x)),
]
```
|
{
"source": "jemisonf/bombman-cs-362",
"score": 2
}
|
#### File: jemisonf/bombman-cs-362/bombman.py
```python
import sys
import pygame
import os
import math
import copy
import random
import re
import time
from collections import defaultdict
from playerClass import *
DEBUG_PROFILING = False
DEBUG_FPS = False
DEBUG_VERBOSE = False
#------------------------------------------------------------------------------
def debug_log(message):
if DEBUG_VERBOSE:
print(message)
#==============================================================================
class Profiler(object):
SHOW_LAST = 10
#----------------------------------------------------------------------------
def __init__(self):
self.sections = {}
#----------------------------------------------------------------------------
def measure_start(self, section_name):
if not DEBUG_PROFILING:
return
if not (section_name in self.sections):
self.sections[section_name] = [0.0 for i in xrange(Profiler.SHOW_LAST)]
section_values = self.sections[section_name]
section_values[0] -= pygame.time.get_ticks()
#----------------------------------------------------------------------------
def measure_stop(self, section_name):
if not DEBUG_PROFILING:
return
if not section_name in self.sections:
return
section_values = self.sections[section_name]
section_values[0] += pygame.time.get_ticks()
#----------------------------------------------------------------------------
def end_of_frame(self):
for section_name in self.sections:
section_values = self.sections[section_name]
section_values.pop()
section_values.insert(0,0)
#----------------------------------------------------------------------------
def get_profile_string(self):
result = "PROFILING INFO:"
section_names = list(self.sections.keys())
section_names.sort()
for section_name in section_names:
result += "\n" + section_name.ljust(25) + ": "
section_values = self.sections[section_name]
for i in xrange(len(section_values)):
result += str(section_values[i]).ljust(5)
result += " AVG: " + str(sum(section_values) / float(len(section_values)))
return result
#==============================================================================
## Something that has a float position on the map.
class Positionable(object):
#----------------------------------------------------------------------------
def __init__(self):
self.position = (0.0,0.0)
#----------------------------------------------------------------------------
def set_position(self,position):
self.position = position
#----------------------------------------------------------------------------
def get_position(self):
return self.position
#----------------------------------------------------------------------------
def get_neighbour_tile_coordinates(self):
tile_coordinates = self.get_tile_position()
top = (tile_coordinates[0],tile_coordinates[1] - 1)
right = (tile_coordinates[0] + 1,tile_coordinates[1])
down = (tile_coordinates[0],tile_coordinates[1] + 1)
left = (tile_coordinates[0] - 1,tile_coordinates[1])
return (top,right,down,left)
#----------------------------------------------------------------------------
def get_tile_position(self):
return Positionable.position_to_tile(self.position)
#----------------------------------------------------------------------------
## Moves the object to center of tile (if not specified, objects current tile is used).
def move_to_tile_center(self, tile_coordinates=None):
if tile_coordinates != None:
self.position = tile_coordinates
self.position = (math.floor(self.position[0]) + 0.5,math.floor(self.position[1]) + 0.5)
#----------------------------------------------------------------------------
## Converts float position to integer tile position.
@staticmethod
def position_to_tile(position):
return (int(math.floor(position[0])),int(math.floor(position[1])))
#----------------------------------------------------------------------------
def is_near_tile_center(self):
position_within_tile = (self.position[0] % 1,self.position[1] % 1)
limit = 0.2
limit2 = 1.0 - limit
return (limit < position_within_tile[0] < limit2) and (limit < position_within_tile[1] < limit2)
#==============================================================================
## Info about a bomb's flight (when boxed or thrown).
class BombFlightInfo(object):
#----------------------------------------------------------------------------
def __init__(self):
self.total_distance_to_travel = 0 ##< in tiles
self.distance_travelled = 0 ##< in tiles
self.direction = (0,0) ##< in which direction the bomb is flying, 0, 1 or -1
#==============================================================================
class Bomb(Positionable):
ROLLING_SPEED = 4
FLYING_SPEED = 5
BOMB_ROLLING_UP = 0
BOMB_ROLLING_RIGHT = 1
BOMB_ROLLING_DOWN = 2
BOMB_ROLLING_LEFT = 3
BOMB_FLYING = 4
BOMB_NO_MOVEMENT = 5
DETONATOR_EXPIRATION_TIME = 20000
BOMB_EXPLODES_IN = 3000
EXPLODES_IN_QUICK = 800 ##< for when the player has quick explosion disease
#----------------------------------------------------------------------------
def __init__(self, player):
super(Bomb,self).__init__()
self.time_of_existence = 0 ##< for how long (in ms) the bomb has existed
self.flame_length = player.get_flame_length() ##< how far the flame will go
self.player = player ##< to which player the bomb belongs
self.explodes_in = Bomb.BOMB_EXPLODES_IN ##< time in ms in which the bomb explodes from the time it was created (detonator_time must expire before this starts counting down)
self.detonator_time = 0 ##< if > 0, the bomb has a detonator on it, after expiring it becomes a regular bomb
self.set_position(player.get_position())
self.move_to_tile_center()
self.has_spring = player.bombs_have_spring()
self.movement = Bomb.BOMB_NO_MOVEMENT
self.has_exploded = False
self.flight_info = BombFlightInfo()
#----------------------------------------------------------------------------
## Sends the bomb flying from its currents position to given tile (can be outside the map boundaries, will fly over the border from the other side).
def send_flying(self, destination_tile_coords):
self.movement = Bomb.BOMB_FLYING
current_tile = self.get_tile_position()
self.flight_info.distance_travelled = 0
axis = 1 if current_tile[0] == destination_tile_coords[0] else 0
self.flight_info.total_distance_to_travel = abs(current_tile[axis] - destination_tile_coords[axis])
self.flight_info.direction = [0,0]
self.flight_info.direction[axis] = -1 if current_tile[axis] > destination_tile_coords[axis] else 1
self.flight_info.direction = tuple(self.flight_info.direction)
destination_tile_coords = (destination_tile_coords[0] % GameMap.MAP_WIDTH,destination_tile_coords[1] % GameMap.MAP_HEIGHT)
self.move_to_tile_center(destination_tile_coords)
#----------------------------------------------------------------------------
def has_detonator(self):
return self.detonator_time > 0 and self.time_of_existence < Bomb.DETONATOR_EXPIRATION_TIME
#----------------------------------------------------------------------------
## Returns a time until the bomb explodes by itself.
def time_until_explosion(self):
return self.explodes_in + self.detonator_time - self.time_of_existence
#----------------------------------------------------------------------------
def explodes(self):
if not self.has_exploded:
self.player.bomb_exploded()
self.has_exploded = True
#==============================================================================
## Represents a flame coming off of an exploding bomb.
class Flame(object):
#----------------------------------------------------------------------------
def __init__(self):
self.player = None ##< reference to player to which the exploding bomb belonged
self.time_to_burnout = 1000 ##< time in ms till the flame disappears
self.direction = "all" ##< string representation of the flame direction
#==============================================================================
class MapTile(object):
TILE_FLOOR = 0 ##< walkable map tile
TILE_BLOCK = 1 ##< non-walkable but destroyable map tile
TILE_WALL = 2 ##< non-walkable and non-destroyable map tile
SPECIAL_OBJECT_TRAMPOLINE = 0
SPECIAL_OBJECT_TELEPORT_A = 1
SPECIAL_OBJECT_TELEPORT_B = 2
SPECIAL_OBJECT_ARROW_UP = 3
SPECIAL_OBJECT_ARROW_RIGHT = 4
SPECIAL_OBJECT_ARROW_DOWN = 5
SPECIAL_OBJECT_ARROW_LEFT = 6
SPECIAL_OBJECT_LAVA = 7
#----------------------------------------------------------------------------
def __init__(self, coordinates):
self.kind = MapTile.TILE_FLOOR
self.flames = []
self.coordinates = coordinates
self.to_be_destroyed = False ##< Flag that marks the tile to be destroyed after the flames go out.
self.item = None ##< Item that's present on the file
self.special_object = None ##< special object present on the tile, like trampoline or teleport
self.destination_teleport = None ##< in case of special_object equal to SPECIAL_OBJECT_TELEPORT_A or SPECIAL_OBJECT_TELEPORT_B holds the destionation teleport tile coordinates
def shouldnt_walk(self):
return self.kind in [MapTile.TILE_WALL,MapTile.TILE_BLOCK] or len(self.flames) >= 1 or self.special_object == MapTile.SPECIAL_OBJECT_LAVA
#==============================================================================
## Holds and manipulates the map data including the players, bombs etc.
class GameMap(object):
MAP_WIDTH = 15
MAP_HEIGHT = 11
WALL_MARGIN_HORIZONTAL = 0.2
WALL_MARGIN_VERTICAL = 0.4
COLLISION_BORDER_UP = 0 ##< position is inside upper border with non-walkable tile
COLLISION_BORDER_RIGHT = 1 ##< position is inside right border with non-walkable tile
COLLISION_BORDER_DOWN = 2 ##< position is inside bottom border with non-walkable tile
COLLISION_BORDER_LEFT = 3 ##< position is inside left border with non-walkable tile
COLLISION_TOTAL = 4 ##< position is inside non-walkable tile
COLLISION_NONE = 5 ##< no collision
ITEM_BOMB = 0
ITEM_FLAME = 1
ITEM_SUPERFLAME = 2
ITEM_SPEEDUP = 3
ITEM_DISEASE = 4
ITEM_RANDOM = 5
ITEM_SPRING = 6
ITEM_SHOE = 7
ITEM_MULTIBOMB = 8
ITEM_BOXING_GLOVE = 9
ITEM_DETONATOR = 10
ITEM_THROWING_GLOVE = 11
SAFE_DANGER_VALUE = 5000 ##< time in ms, used in danger map to indicate safe tile
GIVE_AWAY_DELAY = 3000 ##< after how many ms the items of dead players will be given away
START_GAME_AFTER = 2500 ##< delay in ms before the game begins
STATE_WAITING_TO_PLAY = 0 ##< players can't do anything yet
STATE_PLAYING = 1 ##< game is being played
STATE_FINISHING = 2 ##< game is over but the map is still being updated for a while after
STATE_GAME_OVER = 3 ##< the game is definitely over and should no longer be updated
EARTHQUAKE_DURATION = 10000
#----------------------------------------------------------------------------
## Initialises a new map from map_data (string) and a PlaySetup object.
def __init__(self, map_data, play_setup, game_number, max_games, all_items_cheat=False):
# make the tiles array:
self.danger_map_is_up_to_date = False # to regenerate danger map only when needed
self.tiles = []
self.starting_positions = [(0.0,0.0) for i in xrange(10)] # starting position for each player
map_data = map_data.replace(" ","").replace("\n","") # get rid of white characters
string_split = map_data.split(";")
self.environment_name = string_split[0]
self.end_game_at = -1 ##< time at which the map should go to STATE_GAME_OVER state
self.start_game_at = GameMap.START_GAME_AFTER
self.win_announced = False
self.announce_win_at = -1
self.state = GameMap.STATE_WAITING_TO_PLAY
self.winner_team = -1 ##< if map state is GameMap.STATE_GAME_OVER, this holds the winning team (-1 = draw)
self.game_number = game_number
self.max_games = max_games
self.earthquake_time_left = 0
self.time_from_start = 0 ##< time in ms from the start of the map, the time increases with each update (so time spent in game menu is excluded)
block_tiles = []
line = -1
column = 0
# function call to translate map data on tiles into MapTile objects
self.tile_translator(string_split[3], block_tiles)
# place items under the block tiles:
for i in xrange(len(string_split[2])):
random_tile = random.choice(block_tiles)
random_tile.item = self.letter_to_item(string_split[2][i])
block_tiles.remove(random_tile)
# init danger map:
self.danger_map = [[GameMap.SAFE_DANGER_VALUE for i in xrange(GameMap.MAP_WIDTH)] for j in xrange(GameMap.MAP_HEIGHT)] ##< 2D array of times in ms for each square that
# initialise players:
self.players = [] ##< list of players in the game
self.players_by_numbers = {} ##< mapping of numbers to players
self.players_by_numbers[-1] = None
player_slots = play_setup.get_slots()
for i in xrange(len(player_slots)):
if player_slots[i] != None:
new_player = Player()
new_player.set_number(i)
new_player.set_team_number(player_slots[i][1])
new_player.move_to_tile_center(self.starting_positions[i])
self.players.append(new_player)
self.players_by_numbers[i] = new_player
else:
self.players_by_numbers[i] = None
# give players starting items:
start_items_string = string_split[1] if not all_items_cheat else "bbbbbFkxtsssssmp"
self.player_starting_items = []
for i in xrange(len(start_items_string)):
for player in self.players:
item_to_give = self.letter_to_item(start_items_string[i])
player.give_item(item_to_give)
self.player_starting_items.append(item_to_give)
self.bombs = [] ##< bombs on the map
self.sound_events = [] ##< list of currently happening sound event (see SoundPlayer class)
self.animation_events = [] ##< list of animation events, tuples in format (animation_event, coordinates)
self.items_to_give_away = [] ##< list of tuples in format (time_of_giveaway, list_of_items)
self.create_disease_cloud_at = 0 ##< at what time (in ms) the disease clouds should be released
#----------------------------------------------------------------------------
def tile_translator(self, tileData, block_tiles):
teleport_a_tile = None # helper variables used to pair teleports
teleport_b_tile = None
self.number_of_blocks = 0 ##< says how many block tiles there are currently on the map
column = 0
line = -1
for i in xrange(len(tileData)):
tile_character = tileData[i]
if i % GameMap.MAP_WIDTH == 0: # add new row
line += 1
column = 0
self.tiles.append([])
tile = MapTile((column,line))
if tile_character == "x":
tile.kind = MapTile.TILE_BLOCK
block_tiles.append(tile)
elif tile_character == "#":
tile.kind = MapTile.TILE_WALL
elif tile_character in ("u","r","d","l","U","R","D","L"):
if tile_character.islower():
tile.kind = MapTile.TILE_FLOOR
else:
tile.kind = MapTile.TILE_BLOCK
tile_character = tile_character.lower()
if tile_character == "u":
tile.special_object = MapTile.SPECIAL_OBJECT_ARROW_UP
elif tile_character == "r":
tile.special_object = MapTile.SPECIAL_OBJECT_ARROW_RIGHT
elif tile_character == "d":
tile.special_object = MapTile.SPECIAL_OBJECT_ARROW_DOWN
else:
tile.special_object = MapTile.SPECIAL_OBJECT_ARROW_LEFT
else:
tile.kind = MapTile.TILE_FLOOR
if tile_character == "A":
tile.special_object = MapTile.SPECIAL_OBJECT_TELEPORT_A
if teleport_a_tile == None:
teleport_a_tile = tile
else:
tile.destination_teleport = teleport_a_tile.coordinates
teleport_a_tile.destination_teleport = tile.coordinates
elif tile_character == "B":
tile.special_object = MapTile.SPECIAL_OBJECT_TELEPORT_A
if teleport_b_tile == None:
teleport_b_tile = tile
else:
tile.destination_teleport = teleport_b_tile.coordinates
teleport_b_tile.destination_teleport = tile.coordinates
elif tile_character == "T":
tile.special_object = MapTile.SPECIAL_OBJECT_TRAMPOLINE
elif tile_character == "V":
tile.special_object = MapTile.SPECIAL_OBJECT_LAVA
if tile.kind == MapTile.TILE_BLOCK:
self.number_of_blocks += 1
self.tiles[-1].append(tile)
if tile_character.isdigit():
self.starting_positions[int(tile_character)] = (float(column),float(line))
column += 1
#----------------------------------------------------------------------------
def get_starting_items(self):
return self.player_starting_items
#----------------------------------------------------------------------------
def get_starting_positions(self):
return self.starting_positions
#----------------------------------------------------------------------------
## Returns a tuple (game number, max games).
def get_game_number_info(self):
return (self.game_number,self.max_games)
#----------------------------------------------------------------------------
def start_earthquake(self):
self.earthquake_time_left = GameMap.EARTHQUAKE_DURATION
#----------------------------------------------------------------------------
def earthquake_is_active(self):
return self.earthquake_time_left > 0
#----------------------------------------------------------------------------
def get_number_of_block_tiles(self):
return self.number_of_blocks
#----------------------------------------------------------------------------
## Efficiently (lazily) gets a danger value of given tile. Danger value says
# how much time in ms has will pass until there will be a fire at the tile.
def get_danger_value(self, tile_coordinates):
if not self.danger_map_is_up_to_date:
self.update_danger_map()
self.danger_map_is_up_to_date = True
if not self.tile_is_withing_map(tile_coordinates):
return 0 # never walk outside map
return self.danger_map[tile_coordinates[1]][tile_coordinates[0]]
#----------------------------------------------------------------------------
def tile_has_lava(self, tile_coordinates):
if not self.tile_is_withing_map(tile_coordinates):
return False
return self.tiles[tile_coordinates[1]][tile_coordinates[0]].special_object == MapTile.SPECIAL_OBJECT_LAVA
#----------------------------------------------------------------------------
## Gives away a set of given items (typically after a player dies). The items
# are spread randomly on the map floor tiles after a while.
def give_away_items(self, items):
self.items_to_give_away.append((pygame.time.get_ticks() + GameMap.GIVE_AWAY_DELAY,items))
#----------------------------------------------------------------------------
def update_danger_map(self):
# reset the map:
self.danger_map = [map(lambda tile: 0 if tile.shouldnt_walk() else GameMap.SAFE_DANGER_VALUE, tile_row) for tile_row in self.tiles]
for bomb in self.bombs:
bomb_tile = bomb.get_tile_position()
time_until_explosion = bomb.time_until_explosion()
if bomb.has_detonator(): # detonator = bad
time_until_explosion = 100
self.danger_map[bomb_tile[1]][bomb_tile[0]] = min(self.danger_map[bomb_tile[1]][bomb_tile[0]],time_until_explosion)
# up right down left
position = [[bomb_tile[0],bomb_tile[1] - 1], [bomb_tile[0] + 1,bomb_tile[1]], [bomb_tile[0],bomb_tile[1] + 1], [bomb_tile[0] - 1,bomb_tile[1]]]
flame_stop = [False, False, False, False]
tile_increment = [(0,-1), (1,0), (0,1), (-1,0)]
for i in xrange(bomb.flame_length):
for direction in (0,1,2,3):
if flame_stop[direction]:
continue
if not self.tile_is_walkable(position[direction]) or not self.tile_is_withing_map(position[direction]):
flame_stop[direction] = True
continue
current_tile = position[direction]
self.danger_map[current_tile[1]][current_tile[0]] = min(self.danger_map[current_tile[1]][current_tile[0]],time_until_explosion)
position[direction][0] += tile_increment[direction][0]
position[direction][1] += tile_increment[direction][1]
#----------------------------------------------------------------------------
def add_sound_event(self, sound_event):
self.sound_events.append(sound_event)
#----------------------------------------------------------------------------
def add_animation_event(self, animation_event, coordinates):
self.animation_events.append((animation_event,coordinates))
#----------------------------------------------------------------------------
def get_tile_at(self, tile_coordinates):
if self.tile_is_withing_map(tile_coordinates):
return self.tiles[tile_coordinates[1]][tile_coordinates[0]]
return None
#----------------------------------------------------------------------------
def get_and_clear_sound_events(self):
result = self.sound_events[:] # copy of the list
self.sound_events = []
return result
#----------------------------------------------------------------------------
def get_and_clear_animation_events(self):
result = self.animation_events[:] # copy of the list
self.animation_events = []
return result
#----------------------------------------------------------------------------
## Converts given letter (as in map encoding string) to item code (see class constants).
def letter_to_item(self, letter):
mapping = {
"f": GameMap.ITEM_FLAME,
"F": GameMap.ITEM_SUPERFLAME,
"b": GameMap.ITEM_BOMB,
"k": GameMap.ITEM_SHOE,
"s": GameMap.ITEM_SPEEDUP,
"p": GameMap.ITEM_SPRING,
"m": GameMap.ITEM_MULTIBOMB,
"d": GameMap.ITEM_DISEASE,
"r": GameMap.ITEM_RANDOM,
"x": GameMap.ITEM_BOXING_GLOVE,
"e": GameMap.ITEM_DETONATOR,
"t": GameMap.ITEM_THROWING_GLOVE
}
return mapping[letter] if letter in mapping else -1
#----------------------------------------------------------------------------
def tile_has_flame(self, tile_coordinates):
if not self.tile_is_withing_map(tile_coordinates):
return False # coordinates outside the map
return len(self.tiles[tile_coordinates[1]][tile_coordinates[0]].flames) >= 1
#----------------------------------------------------------------------------
def tile_has_teleport(self, tile_coordinates):
tile_coordinates = Positionable.position_to_tile(tile_coordinates)
if not self.tile_is_withing_map(tile_coordinates):
return False # coordinates outside the map
return self.tiles[tile_coordinates[1]][tile_coordinates[0]].special_object in (MapTile.SPECIAL_OBJECT_TELEPORT_A,MapTile.SPECIAL_OBJECT_TELEPORT_B)
#----------------------------------------------------------------------------
def bomb_on_tile(self, tile_coordinates):
bombs = self.bombs_on_tile(tile_coordinates)
if len(bombs) > 0:
return bombs[0]
return None
#----------------------------------------------------------------------------
## Checks if there is a bomb at given tile (coordinates may be float or int).
def tile_has_bomb(self, tile_coordinates):
return self.bomb_on_tile(tile_coordinates) != None
#----------------------------------------------------------------------------
def get_players_at_tile(self, tile_coordinates):
result = []
for player in self.players:
player_tile_position = player.get_tile_position()
if not player.is_dead() and not player.is_in_air() and player_tile_position[0] == tile_coordinates[0] and player_tile_position[1] == tile_coordinates[1]:
result.append(player)
return result
#----------------------------------------------------------------------------
def tile_has_player(self, tile_coordinates):
return len(self.get_players_at_tile(tile_coordinates))
#----------------------------------------------------------------------------
## Checks if given tile coordinates are within the map boundaries.
def tile_is_withing_map(self, tile_coordinates):
return tile_coordinates[0] >= 0 and tile_coordinates[1] >= 0 and tile_coordinates[0] <= GameMap.MAP_WIDTH - 1 and tile_coordinates[1] <= GameMap.MAP_HEIGHT - 1
#----------------------------------------------------------------------------
def tile_is_walkable(self, tile_coordinates):
if not self.tile_is_withing_map(tile_coordinates):
return False
tile = self.tiles[tile_coordinates[1]][tile_coordinates[0]]
return self.tile_is_withing_map(tile_coordinates) and (self.tiles[tile_coordinates[1]][tile_coordinates[0]].kind == MapTile.TILE_FLOOR or tile.to_be_destroyed) and not self.tile_has_bomb(tile_coordinates)
#----------------------------------------------------------------------------
## Gets a collision type (see class constants) for given float position.
def get_position_collision_type(self, position):
tile_coordinates = Positionable.position_to_tile(position)
if not self.tile_is_walkable(tile_coordinates):
return GameMap.COLLISION_TOTAL
position_within_tile = (position[0] % 1,position[1] % 1)
if position_within_tile[1] < GameMap.WALL_MARGIN_HORIZONTAL:
if not self.tile_is_walkable((tile_coordinates[0],tile_coordinates[1] - 1)):
return GameMap.COLLISION_BORDER_UP
elif position_within_tile[1] > 1.0 - GameMap.WALL_MARGIN_HORIZONTAL:
if not self.tile_is_walkable((tile_coordinates[0],tile_coordinates[1] + 1)):
return GameMap.COLLISION_BORDER_DOWN
if position_within_tile[0] < GameMap.WALL_MARGIN_VERTICAL:
if not self.tile_is_walkable((tile_coordinates[0] - 1,tile_coordinates[1])):
return GameMap.COLLISION_BORDER_LEFT
elif position_within_tile[0] > 1.0 - GameMap.WALL_MARGIN_VERTICAL:
if not self.tile_is_walkable((tile_coordinates[0] + 1,tile_coordinates[1])):
return GameMap.COLLISION_BORDER_RIGHT
return GameMap.COLLISION_NONE
#----------------------------------------------------------------------------
def bombs_on_tile(self, tile_coordinates):
result = []
tile_coordinates = Positionable.position_to_tile(tile_coordinates)
for bomb in self.bombs:
bomb_tile_position = bomb.get_tile_position()
if bomb.movement != Bomb.BOMB_FLYING and bomb_tile_position[0] == tile_coordinates[0] and bomb_tile_position[1] == tile_coordinates[1]:
result.append(bomb)
return result
#----------------------------------------------------------------------------
## Gets time in ms spent in actual game from the start of the map.
def get_map_time(self):
return self.time_from_start
#----------------------------------------------------------------------------
## Tells the map that given bomb is exploding, the map then creates
# flames from the bomb, the bomb is destroyed and players are informed.
def bomb_explodes(self, bomb):
self.add_sound_event(SoundPlayer.SOUND_EVENT_EXPLOSION)
bomb_position = bomb.get_tile_position()
new_flame = Flame()
new_flame.player = bomb.player
new_flame.direction = "all"
self.tiles[bomb_position[1]][bomb_position[0]].flames.append(new_flame)
# information relevant to flame spreading in each direction:
# up right down left
axis_position = [bomb_position[1] - 1,bomb_position[0] + 1,bomb_position[1] + 1,bomb_position[0] - 1]
flame_stop = [False, False, False, False]
map_limit = [0, GameMap.MAP_WIDTH - 1, GameMap.MAP_HEIGHT - 1, 0]
increment = [-1, 1, 1, -1]
goes_horizontaly = [False, True, False, True]
previous_flame = [None, None, None, None]
# spread the flame in all 4 directions:
for i in xrange(bomb.flame_length + 1):
if i >= bomb.flame_length:
flame_stop = [True, True, True, True]
for direction in (0,1,2,3): # for each direction
if flame_stop[direction]:
if previous_flame[direction] != None: # flame stopped in previous iteration
previous_flame[direction].direction = {0: "up", 1: "right", 2: "down", 3: "left"}[direction]
previous_flame[direction] = None
else:
if ((increment[direction] == -1 and axis_position[direction] >= map_limit[direction]) or
(increment[direction] == 1 and axis_position[direction] <= map_limit[direction])):
# flame is inside the map here
if goes_horizontaly[direction]:
tile_for_flame = self.tiles[bomb_position[1]][axis_position[direction]]
else:
tile_for_flame = self.tiles[axis_position[direction]][bomb_position[0]]
if tile_for_flame.kind == MapTile.TILE_WALL:
flame_stop[direction] = True
else:
new_flame2 = copy.copy(new_flame)
new_flame2.direction = "horizontal" if goes_horizontaly[direction] else "vertical"
tile_for_flame.flames.append(new_flame2)
previous_flame[direction] = new_flame2
if tile_for_flame.kind == MapTile.TILE_BLOCK:
flame_stop[direction] = True
else:
flame_stop[direction] = True
axis_position[direction] += increment[direction]
bomb.explodes()
if bomb in self.bombs:
self.bombs.remove(bomb)
#----------------------------------------------------------------------------
def spread_items(self, items):
possible_tiles = []
for y in xrange(GameMap.MAP_HEIGHT):
for x in xrange(GameMap.MAP_WIDTH):
tile = self.tiles[y][x]
if tile.kind == MapTile.TILE_FLOOR and tile.special_object == None and tile.item == None and not self.tile_has_player((x,y)):
possible_tiles.append(tile)
for item in items:
if len(possible_tiles) == 0:
break # no more tiles to place items on => end
tile = random.choice(possible_tiles)
tile.item = item
possible_tiles.remove(tile)
#----------------------------------------------------------------------------
def __update_bombs(self, dt):
i = 0
while i < len(self.bombs): # update all bombs
bomb = self.bombs[i]
if bomb.has_exploded: # just in case
self.bombs.remove(bomb)
continue
bomb.time_of_existence += dt
bomb_position = bomb.get_position()
bomb_tile = bomb.get_tile_position()
if bomb.movement != Bomb.BOMB_FLYING and bomb.time_of_existence > bomb.explodes_in + bomb.detonator_time: # bomb explodes
self.bomb_explodes(bomb)
continue
elif bomb.movement != Bomb.BOMB_FLYING and self.tiles[bomb_tile[1]][bomb_tile[0]].special_object == MapTile.SPECIAL_OBJECT_LAVA and bomb.is_near_tile_center():
self.bomb_explodes(bomb)
continue
else:
i += 1
if bomb.movement != Bomb.BOMB_NO_MOVEMENT:
if bomb.movement == Bomb.BOMB_FLYING:
distance_to_travel = dt / 1000.0 * Bomb.FLYING_SPEED
bomb.flight_info.distance_travelled += distance_to_travel
if bomb.flight_info.distance_travelled >= bomb.flight_info.total_distance_to_travel:
bomb_tile = bomb.get_tile_position()
self.add_sound_event(SoundPlayer.SOUND_EVENT_BOMB_PUT)
if not self.tile_is_walkable(bomb_tile) or self.tile_has_player(bomb_tile) or self.tile_has_teleport(bomb_tile):
destination_tile = (bomb_tile[0] + bomb.flight_info.direction[0],bomb_tile[1] + bomb.flight_info.direction[1])
bomb.send_flying(destination_tile)
else: # bomb lands
bomb.movement = Bomb.BOMB_NO_MOVEMENT
self.get_tile_at(bomb_tile).item = None
else: # bomb rolling
if bomb.is_near_tile_center():
object_at_tile = self.tiles[bomb_tile[1]][bomb_tile[0]].special_object
redirected = False
if object_at_tile == MapTile.SPECIAL_OBJECT_ARROW_UP and bomb.movement != Bomb.BOMB_ROLLING_UP:
bomb.movement = Bomb.BOMB_ROLLING_UP
bomb.set_position((bomb_tile[0] + 0.5,bomb_tile[1])) # aline with x axis
redirected = True
elif object_at_tile == MapTile.SPECIAL_OBJECT_ARROW_RIGHT and bomb.movement != Bomb.BOMB_ROLLING_RIGHT:
bomb.movement = Bomb.BOMB_ROLLING_RIGHT
bomb.set_position((bomb_position[0],bomb_tile[1] + 0.5))
redirected = True
elif object_at_tile == MapTile.SPECIAL_OBJECT_ARROW_DOWN and bomb.movement != Bomb.BOMB_ROLLING_DOWN:
bomb.movement = Bomb.BOMB_ROLLING_DOWN
bomb.set_position((bomb_tile[0] + 0.5,bomb_position[1]))
redirected = True
elif object_at_tile == MapTile.SPECIAL_OBJECT_ARROW_LEFT and bomb.movement != Bomb.BOMB_ROLLING_LEFT:
bomb.movement = Bomb.BOMB_ROLLING_LEFT
bomb.set_position((bomb_position[0],bomb_tile[1] + 0.5))
redirected = True
if redirected:
bomb_position = bomb.get_position()
if self.tiles[bomb_tile[1]][bomb_tile[0]].item != None: # rolling bomb destroys items
self.tiles[bomb_tile[1]][bomb_tile[0]].item = None
bomb_position_within_tile = (bomb_position[0] % 1,bomb_position[1] % 1)
check_collision = False
forward_tile = None
distance_to_travel = dt / 1000.0 * Bomb.ROLLING_SPEED
helper_boundaries = (0.5,0.9)
helper_boundaries2 = (1 - helper_boundaries[1],1 - helper_boundaries[0])
opposite_direction = Bomb.BOMB_NO_MOVEMENT
if bomb.movement == Bomb.BOMB_ROLLING_UP:
bomb.set_position((bomb_position[0],bomb_position[1] - distance_to_travel))
opposite_direction = Bomb.BOMB_ROLLING_DOWN
if helper_boundaries2[0] < bomb_position_within_tile[1] < helper_boundaries2[1]:
check_collision = True
forward_tile = (bomb_tile[0],bomb_tile[1] - 1)
elif bomb.movement == Bomb.BOMB_ROLLING_RIGHT:
bomb.set_position((bomb_position[0] + distance_to_travel,bomb_position[1]))
opposite_direction = Bomb.BOMB_ROLLING_LEFT
if helper_boundaries[0] < bomb_position_within_tile[0] < helper_boundaries[1]:
check_collision = True
forward_tile = (bomb_tile[0] + 1,bomb_tile[1])
elif bomb.movement == Bomb.BOMB_ROLLING_DOWN:
bomb.set_position((bomb_position[0],bomb_position[1] + distance_to_travel))
opposite_direction = Bomb.BOMB_ROLLING_UP
if helper_boundaries[0] < bomb_position_within_tile[1] < helper_boundaries[1]:
check_collision = True
forward_tile = (bomb_tile[0],bomb_tile[1] + 1)
elif bomb.movement == Bomb.BOMB_ROLLING_LEFT:
bomb.set_position((bomb_position[0] - distance_to_travel,bomb_position[1]))
opposite_direction = Bomb.BOMB_ROLLING_RIGHT
if helper_boundaries2[0] < bomb_position_within_tile[0] < helper_boundaries2[1]:
check_collision = True
forward_tile = (bomb_tile[0] - 1,bomb_tile[1])
if check_collision and (not self.tile_is_walkable(forward_tile) or self.tile_has_player(forward_tile) or self.tile_has_teleport(forward_tile)):
bomb.move_to_tile_center()
if bomb.has_spring:
bomb.movement = opposite_direction
self.add_sound_event(SoundPlayer.SOUND_EVENT_SPRING)
else:
bomb.movement = Bomb.BOMB_NO_MOVEMENT
self.add_sound_event(SoundPlayer.SOUND_EVENT_KICK)
#----------------------------------------------------------------------------
def __update_players(self, dt, immortal_player_numbers):
time_now = pygame.time.get_ticks()
release_disease_cloud = False
if time_now > self.create_disease_cloud_at:
self.create_disease_cloud_at = time_now + 200 # release the cloud every 200 ms
release_disease_cloud = True
for player in self.players:
if player.is_dead():
continue
if release_disease_cloud and player.get_disease() != Player.DISEASE_NONE:
self.add_animation_event(Renderer.ANIMATION_EVENT_DISEASE_CLOUD,Renderer.map_position_to_pixel_position(player.get_position(),(0,0)))
if self.winning_color == -1:
self.winning_color = player.get_team_number()
elif self.winning_color != player.get_team_number():
self.game_is_over = False
player_tile_position = player.get_tile_position()
player_tile = self.tiles[player_tile_position[1]][player_tile_position[0]]
if player.get_state() != Player.STATE_IN_AIR and player.get_state != Player.STATE_TELEPORTING and (self.tile_has_flame(player_tile.coordinates) or self.tile_has_lava(player_tile.coordinates)):
# if player immortality cheat isn't activated
if not (player.get_number() in immortal_player_numbers):
flames = self.get_tile_at(player_tile.coordinates).flames
# assign kill counts
for flame in flames:
increase_kills_by = 1 if flame.player != player else -1 # self kill decreases the kill count
flame.player.set_kills(flame.player.get_kills() + increase_kills_by)
player.kill(self)
continue
if player_tile.item != None:
player.give_item(player_tile.item,self)
player_tile.item = None
if player.is_in_air():
if player.get_state_time() > Player.JUMP_DURATION / 2: # jump to destination tile in the middle of the flight
player.move_to_tile_center(player.get_jump_destination())
elif player.is_teleporting():
if player.get_state_time() > Player.TELEPORT_DURATION / 2:
player.move_to_tile_center(player.get_teleport_destination())
elif player_tile.special_object == MapTile.SPECIAL_OBJECT_TRAMPOLINE and player.is_near_tile_center():
player.send_to_air(self)
elif (player_tile.special_object == MapTile.SPECIAL_OBJECT_TELEPORT_A or player_tile.special_object == MapTile.SPECIAL_OBJECT_TELEPORT_B) and player.is_near_tile_center():
player.teleport(self)
elif player.get_disease() != Player.DISEASE_NONE:
players_at_tile = self.get_players_at_tile(player_tile_position)
transmitted = False
for player_at_tile in players_at_tile:
if player_at_tile.get_disease() == Player.DISEASE_NONE:
transmitted = True
player_at_tile.set_disease(player.get_disease(),player.get_disease_time()) # transmit disease
#----------------------------------------------------------------------------
## Updates some things on the map that change with time.
def update(self, dt, immortal_player_numbers=[]):
self.time_from_start += dt
self.danger_map_is_up_to_date = False # reset this each frame
i = 0
self.earthquake_time_left = max(0,self.earthquake_time_left - dt)
while i < len(self.items_to_give_away): # giving away items of dead players
item = self.items_to_give_away[i]
if self.time_from_start >= item[0]:
self.spread_items(item[1])
self.items_to_give_away.remove(item)
debug_log("giving away items")
i += 1
self.__update_bombs(dt)
for line in self.tiles:
for tile in line:
if tile.to_be_destroyed and tile.kind == MapTile.TILE_BLOCK and not self.tile_has_flame(tile.coordinates):
tile.kind = MapTile.TILE_FLOOR
self.number_of_blocks -= 1
tile.to_be_destroyed = False
i = 0
while True:
if i >= len(tile.flames):
break
if tile.kind == MapTile.TILE_BLOCK: # flame on a block tile -> destroy the block
tile.to_be_destroyed = True
elif tile.kind == MapTile.TILE_FLOOR and tile.item != None:
tile.item = None # flame destroys the item
bombs_inside_flame = self.bombs_on_tile(tile.coordinates)
for bomb in bombs_inside_flame: # bomb inside flame -> detonate it
self.bomb_explodes(bomb)
flame = tile.flames[i]
flame.time_to_burnout -= dt
if flame.time_to_burnout < 0:
tile.flames.remove(flame)
i += 1
self.game_is_over = True
self.winning_color = -1
self.__update_players(dt,immortal_player_numbers)
if self.state == GameMap.STATE_WAITING_TO_PLAY:
if self.time_from_start >= self.start_game_at:
self.state = GameMap.STATE_PLAYING
self.add_sound_event(SoundPlayer.SOUND_EVENT_GO)
if self.state == GameMap.STATE_FINISHING:
if self.time_from_start >= self.end_game_at:
self.state = GameMap.STATE_GAME_OVER
elif not self.win_announced:
if self.time_from_start >= self.announce_win_at:
self.add_sound_event(SoundPlayer.SOUND_EVENT_WIN_0 + self.winner_team)
self.win_announced = True
elif self.state != GameMap.STATE_GAME_OVER and self.game_is_over:
self.end_game_at = self.time_from_start + 5000
self.state = GameMap.STATE_FINISHING
self.winner_team = self.winning_color
self.announce_win_at = self.time_from_start + 2000
#----------------------------------------------------------------------------
def get_winner_team(self):
return self.winner_team
#----------------------------------------------------------------------------
def get_state(self):
return self.state
#----------------------------------------------------------------------------
def add_bomb(self, bomb):
self.bombs.append(bomb)
#----------------------------------------------------------------------------
def get_bombs(self):
return self.bombs
#----------------------------------------------------------------------------
def get_environment_name(self):
return self.environment_name
#----------------------------------------------------------------------------
def get_players(self):
return self.players
#----------------------------------------------------------------------------
## Gets a dict that maps numbers to players (with Nones if player with given number doesn't exist).
def get_players_by_numbers(self):
return self.players_by_numbers
#----------------------------------------------------------------------------
def get_tiles(self):
return self.tiles
#----------------------------------------------------------------------------
def __str__(self):
result = ""
for line in self.tiles:
for tile in line:
if tile.kind == MapTile.TILE_FLOOR:
result += " "
elif tile.kind == MapTile.TILE_BLOCK:
result += "x"
else:
result += "#"
result += "\n"
return result
#==============================================================================
## Defines how a game is set up, i.e. how many players
# there are, what are the teams etc. Setup does not include
# the selected map.
class PlaySetup(object):
MAX_GAMES = 20
#----------------------------------------------------------------------------
def __init__(self):
self.player_slots = [None for i in xrange(10)] ##< player slots: (player_number, team_color),
# negative player_number = AI, slot index ~ player color index
self.number_of_games = 10
# default setup, player 0 vs 3 AI players:
self.player_slots[0] = (0,0)
self.player_slots[1] = (-1,1)
self.player_slots[2] = (-1,2)
self.player_slots[3] = (-1,3)
#----------------------------------------------------------------------------
def get_slots(self):
return self.player_slots
#----------------------------------------------------------------------------
def get_number_of_games(self):
return self.number_of_games
#----------------------------------------------------------------------------
def set_number_of_games(self, number_of_games):
self.number_of_games = number_of_games
#----------------------------------------------------------------------------
def increase_number_of_games(self):
self.number_of_games = self.number_of_games % PlaySetup.MAX_GAMES + 1
#----------------------------------------------------------------------------
def decrease_number_of_games(self):
self.number_of_games = (self.number_of_games - 2) % PlaySetup.MAX_GAMES + 1
#==============================================================================
## Something that can be saved/loaded to/from string.
class StringSerializable(object):
#----------------------------------------------------------------------------
def save_to_string(self):
return ""
#----------------------------------------------------------------------------
def load_from_string(self, input_string):
return
#----------------------------------------------------------------------------
def save_to_file(self, filename):
text_file = open(filename,"w")
text_file.write(self.save_to_string())
text_file.close()
#----------------------------------------------------------------------------
def load_from_file(self, filename):
with open(filename,"r") as text_file:
self.load_from_string(text_file.read())
#==============================================================================
## Handles conversion of keyboard events to actions of players, plus general
# actions (such as menu, ...). Also managed some more complex input processing.
class PlayerKeyMaps(StringSerializable):
ACTION_UP = 0
ACTION_RIGHT = 1
ACTION_DOWN = 2
ACTION_LEFT = 3
ACTION_BOMB = 4
ACTION_SPECIAL = 5
ACTION_MENU = 6 ##< brings up the main menu
ACTION_BOMB_DOUBLE = 7
MOUSE_CONTROL_UP = -1
MOUSE_CONTROL_RIGHT = -2
MOUSE_CONTROL_DOWN = -3
MOUSE_CONTROL_LEFT = -4
MOUSE_CONTROL_BUTTON_L = -5
MOUSE_CONTROL_BUTTON_M = -6
MOUSE_CONTROL_BUTTON_R = -7
MOUSE_CONTROL_BIAS = 2 ##< mouse movement bias in pixels
TYPED_STRING_BUFFER_LENGTH = 15
ACTION_NAMES = {
ACTION_UP : "up",
ACTION_RIGHT : "right",
ACTION_DOWN : "down",
ACTION_LEFT : "left",
ACTION_BOMB : "bomb",
ACTION_SPECIAL : "special",
ACTION_MENU : "menu",
ACTION_BOMB_DOUBLE : "bomb double"
}
MOUSE_ACTION_NAMES = {
MOUSE_CONTROL_UP : "m up",
MOUSE_CONTROL_RIGHT : "m right",
MOUSE_CONTROL_DOWN : "m down",
MOUSE_CONTROL_LEFT : "m left",
MOUSE_CONTROL_BUTTON_L : "m L",
MOUSE_CONTROL_BUTTON_M : "m M",
MOUSE_CONTROL_BUTTON_R : "m R"
}
MOUSE_CONTROL_SMOOTH_OUT_TIME = 50
#----------------------------------------------------------------------------
def __init__(self):
self.key_maps = {} ##< maps keys to tuples of a format: (player_number, action), for general actions player_number will be -1
self.bomb_key_last_pressed_time = [0 for i in xrange(10)] ##< for bomb double press detection
self.bomb_key_previous_state = [False for i in xrange(10)] ##< for bomb double press detection
self.allow_mouse_control = False ##< if true, player movement by mouse is allowed, otherwise not
mouse_control_constants = [
PlayerKeyMaps.MOUSE_CONTROL_UP,
PlayerKeyMaps.MOUSE_CONTROL_RIGHT,
PlayerKeyMaps.MOUSE_CONTROL_DOWN,
PlayerKeyMaps.MOUSE_CONTROL_LEFT,
PlayerKeyMaps.MOUSE_CONTROL_BUTTON_L,
PlayerKeyMaps.MOUSE_CONTROL_BUTTON_M,
PlayerKeyMaps.MOUSE_CONTROL_BUTTON_R]
self.mouse_control_states = {}
self.mouse_control_keep_until = {} ##< time in which specified control was activated,
# helps keeping them active for a certain amount of time to smooth them out
mouse_control_states = {
PlayerKeyMaps.MOUSE_CONTROL_UP : False,
PlayerKeyMaps.MOUSE_CONTROL_RIGHT : False,
PlayerKeyMaps.MOUSE_CONTROL_DOWN : False,
PlayerKeyMaps.MOUSE_CONTROL_LEFT : False,
PlayerKeyMaps.MOUSE_CONTROL_BUTTON_L : False,
PlayerKeyMaps.MOUSE_CONTROL_BUTTON_M : False,
PlayerKeyMaps.MOUSE_CONTROL_BUTTON_R : False
}
for item in mouse_control_constants:
self.mouse_control_states[item] = False
self.mouse_control_keep_until[item] = 0
self.mouse_button_states = [False,False,False,False,False] ##< (left, right, middle, wheel up, wheel down)
self.previous_mouse_button_states = [False,False,False,False,False]
self.last_mouse_update_frame = -1
self.name_code_mapping = {} # holds a mapping of key names to pygame key codes, since pygame itself offers no such functionality
keys_pressed = pygame.key.get_pressed()
for key_code in xrange(len(keys_pressed)):
self.name_code_mapping[pygame.key.name(key_code)] = key_code
self.typed_string_buffer = [" " for i in xrange(PlayerKeyMaps.TYPED_STRING_BUFFER_LENGTH)]
self.reset()
#----------------------------------------------------------------------------
def pygame_name_to_key_code(self, pygame_name):
try:
return self.name_code_mapping[pygame_name]
except KeyError:
return -1
#----------------------------------------------------------------------------
## Returns a state of mouse buttons including mouse wheel (unlike pygame.mouse.get_pressed) as
# a tuple (left, right, middle, wheel up, wheel down).
def get_mouse_button_states(self):
return self.mouse_button_states
#----------------------------------------------------------------------------
## Returns a tuple corresponding to mouse buttons (same as get_mouse_button_states) where each
# item says if the button has been pressed since the last frame.
def get_mouse_button_events(self):
result = []
for i in xrange(5):
result.append(self.mouse_button_states[i] and not self.previous_mouse_button_states[i])
return result
#----------------------------------------------------------------------------
## This informs the object abour pygame events so it can keep track of some input states.
def process_pygame_events(self, pygame_events, frame_number):
if frame_number != self.last_mouse_update_frame:
# first time calling this function this frame => reset states
for i in xrange(5): # for each of 5 buttons
self.previous_mouse_button_states[i] = self.mouse_button_states[i]
button_states = pygame.mouse.get_pressed()
self.mouse_button_states[0] = button_states[0]
self.mouse_button_states[1] = button_states[2]
self.mouse_button_states[2] = button_states[1]
self.mouse_button_states[3] = False
self.mouse_button_states[4] = False
self.last_mouse_update_frame = frame_number
for pygame_event in pygame_events:
if pygame_event.type == pygame.MOUSEBUTTONDOWN:
if pygame_event.button == 4:
self.mouse_button_states[3] = True
elif pygame_event.button == 5:
self.mouse_button_states[4] = True
elif pygame_event.type == pygame.KEYDOWN:
try:
self.typed_string_buffer = self.typed_string_buffer[1:]
self.typed_string_buffer.append(chr(pygame_event.key))
except Exception:
debug_log("couldn't append typed character to the buffer")
#----------------------------------------------------------------------------
def clear_typing_buffer(self):
self.typed_string_buffer = [" " for i in xrange(PlayerKeyMaps.TYPED_STRING_BUFFER_LENGTH)]
#----------------------------------------------------------------------------
def string_was_typed(self, string):
return str.find("".join(self.typed_string_buffer),string) >= 0
#----------------------------------------------------------------------------
def reset(self):
self.allow_control_by_mouse(False)
self.set_player_key_map(0,pygame.K_w,pygame.K_d,pygame.K_s,pygame.K_a,pygame.K_c,pygame.K_v)
self.set_player_key_map(1,pygame.K_UP,pygame.K_RIGHT,pygame.K_DOWN,pygame.K_LEFT,pygame.K_RETURN,pygame.K_RSHIFT)
self.set_player_key_map(2,pygame.K_u,pygame.K_k,pygame.K_j,pygame.K_h,pygame.K_o,pygame.K_p)
self.set_player_key_map(3,PlayerKeyMaps.MOUSE_CONTROL_UP,PlayerKeyMaps.MOUSE_CONTROL_RIGHT,PlayerKeyMaps.MOUSE_CONTROL_DOWN,PlayerKeyMaps.MOUSE_CONTROL_LEFT,PlayerKeyMaps.MOUSE_CONTROL_BUTTON_L,PlayerKeyMaps.MOUSE_CONTROL_BUTTON_R)
self.set_special_key_map(pygame.K_ESCAPE)
#----------------------------------------------------------------------------
##< Gets a direction of given action (0 - up, 1 - right, 2 - down, 3 - left).
@staticmethod
def get_action_direction_number(action):
if action == PlayerKeyMaps.ACTION_UP:
return 0
elif action == PlayerKeyMaps.ACTION_RIGHT:
return 1
elif action == PlayerKeyMaps.ACTION_DOWN:
return 2
elif action == PlayerKeyMaps.ACTION_LEFT:
return 3
return 0
#----------------------------------------------------------------------------
@staticmethod
def get_opposite_action(action):
if action == PlayerKeyMaps.ACTION_UP:
return PlayerKeyMaps.ACTION_DOWN
elif action == PlayerKeyMaps.ACTION_RIGHT:
return PlayerKeyMaps.ACTION_LEFT
elif action == PlayerKeyMaps.ACTION_DOWN:
return PlayerKeyMaps.ACTION_UP
elif action == PlayerKeyMaps.ACTION_LEFT:
return PlayerKeyMaps.ACTION_RIGHT
return action
#----------------------------------------------------------------------------
@staticmethod
def key_to_string(key):
if key == None:
return "none"
if key in PlayerKeyMaps.MOUSE_ACTION_NAMES:
result = PlayerKeyMaps.MOUSE_ACTION_NAMES[key]
else:
result = pygame.key.name(key)
if result == "unknown key":
result = str(key)
return result
#----------------------------------------------------------------------------
def set_one_key_map(self, key, player_number, action):
if key != None:
self.key_maps[key] = (player_number,action)
to_be_deleted = []
for item in self.key_maps: # get rid of possible collissions
if item != key and self.key_maps[item] == (player_number,action):
to_be_deleted.append(item)
for item in to_be_deleted:
del self.key_maps[item]
#----------------------------------------------------------------------------
## Sets a key mapping for a player of specified (non-negative) number.
def set_player_key_map(self, player_number, key_up, key_right, key_down, key_left, key_bomb, key_special):
self.set_one_key_map(key_up,player_number,PlayerKeyMaps.ACTION_UP)
self.set_one_key_map(key_right,player_number,PlayerKeyMaps.ACTION_RIGHT)
self.set_one_key_map(key_down,player_number,PlayerKeyMaps.ACTION_DOWN)
self.set_one_key_map(key_left,player_number,PlayerKeyMaps.ACTION_LEFT)
self.set_one_key_map(key_bomb,player_number,PlayerKeyMaps.ACTION_BOMB)
self.set_one_key_map(key_special,player_number,PlayerKeyMaps.ACTION_SPECIAL)
#----------------------------------------------------------------------------
## Gets a dict that says how keys are mapped for a specific player. Format: {action_code : key_code, ...}, the
# dict will contain all actions and possibly None values for unmapped actions.
def get_players_key_mapping(self, player_number):
result = {action : None for action in (
PlayerKeyMaps.ACTION_UP,
PlayerKeyMaps.ACTION_RIGHT,
PlayerKeyMaps.ACTION_DOWN,
PlayerKeyMaps.ACTION_LEFT,
PlayerKeyMaps.ACTION_BOMB,
PlayerKeyMaps.ACTION_SPECIAL)}
for key in self.key_maps:
if self.key_maps[key][0] == player_number:
result[self.key_maps[key][1]] = key
return result
#----------------------------------------------------------------------------
def allow_control_by_mouse(self, allow=True):
self.allow_mouse_control = allow
#----------------------------------------------------------------------------
def set_special_key_map(self, key_menu):
self.set_one_key_map(key_menu,-1,PlayerKeyMaps.ACTION_MENU)
#----------------------------------------------------------------------------
## Makes a human-readable string that represents the current key-mapping.
def save_to_string(self):
result = ""
for i in xrange(Game.NUMBER_OF_CONTROLLED_PLAYERS): # 4 players
mapping = self.get_players_key_mapping(i)
for action in mapping:
result += str(i + 1) + " " + PlayerKeyMaps.ACTION_NAMES[action] + ": " + str(mapping[action]) + "\n"
result += PlayerKeyMaps.ACTION_NAMES[PlayerKeyMaps.ACTION_MENU] + ": " + str(self.get_menu_key_map())
return result
#----------------------------------------------------------------------------
## Loads the mapping from string produced by save_to_string(...).
def load_from_string(self, input_string):
self.key_maps = {}
lines = input_string.split("\n")
for line in lines:
line = line.lstrip().rstrip()
try:
key = int(line[line.find(":") + 1:])
except Exception as e:
key = None
if line.find(PlayerKeyMaps.ACTION_NAMES[PlayerKeyMaps.ACTION_MENU]) == 0:
self.set_one_key_map(key,-1,PlayerKeyMaps.ACTION_MENU)
else:
player_number = int(line[0]) - 1
action_name = line[2:line.find(":")]
action = None
for helper_action in PlayerKeyMaps.ACTION_NAMES:
if PlayerKeyMaps.ACTION_NAMES[helper_action] == action_name:
action = helper_action
break
self.set_one_key_map(key,player_number,action)
#----------------------------------------------------------------------------
def get_menu_key_map(self):
for key in self.key_maps:
if self.key_maps[key][0] == -1:
return key
return None
#----------------------------------------------------------------------------
## Returns a list of mouse control actions currently being performed (if mouse
# control is not allowed, the list will always be empty)
def get_current_mouse_control_states(self):
result = []
if not self.allow_mouse_control:
return result
for mouse_action in self.mouse_control_states:
if self.mouse_control_states[mouse_action]:
result.append(mouse_action)
return result
#----------------------------------------------------------------------------
## From currently pressed keys makes a list of actions being currently performed and
# returns it, format: (player_number, action).
def get_current_actions(self):
keys_pressed = pygame.key.get_pressed()
result = []
reset_bomb_key_previous_state = [True for i in xrange(10)]
# check mouse control:
if self.allow_mouse_control:
screen_center = (Renderer.get_screen_size()[0] / 2,Renderer.get_screen_size()[1] / 2)
mouse_position = pygame.mouse.get_pos(screen_center)
pressed = pygame.mouse.get_pressed()
current_time = pygame.time.get_ticks()
for item in self.mouse_control_states: # reset
if current_time > self.mouse_control_keep_until[item]:
self.mouse_control_states[item] = False
dx = abs(mouse_position[0] - screen_center[0])
dy = abs(mouse_position[1] - screen_center[1])
if dx > dy: # choose the prevelant axis
d_value = dx
axis = 0
axis_forward = PlayerKeyMaps.MOUSE_CONTROL_RIGHT
axis_back = PlayerKeyMaps.MOUSE_CONTROL_LEFT
else:
axis = 1
axis_forward = PlayerKeyMaps.MOUSE_CONTROL_DOWN
axis_back = PlayerKeyMaps.MOUSE_CONTROL_UP
d_value = dy
if d_value > PlayerKeyMaps.MOUSE_CONTROL_BIAS:
forward = mouse_position[axis] > screen_center[axis]
self.mouse_control_states[axis_forward] = forward
self.mouse_control_states[axis_back] = not forward
self.mouse_control_keep_until[axis_forward if forward else axis_back] = current_time + PlayerKeyMaps.MOUSE_CONTROL_SMOOTH_OUT_TIME
helper_buttons = (PlayerKeyMaps.MOUSE_CONTROL_BUTTON_L, PlayerKeyMaps.MOUSE_CONTROL_BUTTON_M, PlayerKeyMaps.MOUSE_CONTROL_BUTTON_R)
for i in xrange(3):
if pressed[i]:
self.mouse_control_states[helper_buttons[i]] = True
self.mouse_control_keep_until[helper_buttons[i]] = current_time
pygame.mouse.set_pos(screen_center)
for key_code in self.key_maps:
try:
key_is_active = self.mouse_control_states[key_code] if key_code < 0 else keys_pressed[key_code]
except IndexError as e:
key_is_active = False
if key_is_active:
action_tuple = self.key_maps[key_code]
result.append(action_tuple)
if action_tuple[1] == PlayerKeyMaps.ACTION_BOMB:
player_number = action_tuple[0]
if self.bomb_key_previous_state[player_number] == False and pygame.time.get_ticks() - self.bomb_key_last_pressed_time[player_number] < 200:
result.append((player_number,PlayerKeyMaps.ACTION_BOMB_DOUBLE))
self.bomb_key_last_pressed_time[player_number] = pygame.time.get_ticks()
self.bomb_key_previous_state[player_number] = True
reset_bomb_key_previous_state[player_number] = False
for i in xrange(10):
if reset_bomb_key_previous_state[i]:
self.bomb_key_previous_state[i] = False
return result
#==============================================================================
class SoundPlayer(object):
# sound events used by other classes to tell soundplayer what to play
SOUND_EVENT_EXPLOSION = 0
SOUND_EVENT_BOMB_PUT = 1
SOUND_EVENT_WALK = 2
SOUND_EVENT_KICK = 3
SOUND_EVENT_DIARRHEA = 4
SOUND_EVENT_SPRING = 5
SOUND_EVENT_SLOW = 6
SOUND_EVENT_DISEASE = 7
SOUND_EVENT_CLICK = 8
SOUND_EVENT_THROW = 9
SOUND_EVENT_TRAMPOLINE = 10
SOUND_EVENT_TELEPORT = 11
SOUND_EVENT_DEATH = 12
SOUND_EVENT_WIN_0 = 13
SOUND_EVENT_WIN_1 = 14
SOUND_EVENT_WIN_2 = 15
SOUND_EVENT_WIN_3 = 16
SOUND_EVENT_WIN_4 = 17
SOUND_EVENT_WIN_5 = 18
SOUND_EVENT_WIN_6 = 19
SOUND_EVENT_WIN_7 = 20
SOUND_EVENT_WIN_8 = 21
SOUND_EVENT_WIN_9 = 22
SOUND_EVENT_GO_AWAY = 23
SOUND_EVENT_GO = 24
SOUND_EVENT_EARTHQUAKE = 25
SOUND_EVENT_CONFIRM = 26
#----------------------------------------------------------------------------
def __init__(self):
self.sound_volume = 0.5
self.music_volume = 0.5
self.sounds = {}
self.sounds[SoundPlayer.SOUND_EVENT_EXPLOSION] = pygame.mixer.Sound(os.path.join(Game.RESOURCE_PATH,"explosion.wav"))
self.sounds[SoundPlayer.SOUND_EVENT_BOMB_PUT] = pygame.mixer.Sound(os.path.join(Game.RESOURCE_PATH,"bomb.wav"))
self.sounds[SoundPlayer.SOUND_EVENT_WALK] = pygame.mixer.Sound(os.path.join(Game.RESOURCE_PATH,"footsteps.wav"))
self.sounds[SoundPlayer.SOUND_EVENT_KICK] = pygame.mixer.Sound(os.path.join(Game.RESOURCE_PATH,"kick.wav"))
self.sounds[SoundPlayer.SOUND_EVENT_SPRING] = pygame.mixer.Sound(os.path.join(Game.RESOURCE_PATH,"spring.wav"))
self.sounds[SoundPlayer.SOUND_EVENT_DIARRHEA] = pygame.mixer.Sound(os.path.join(Game.RESOURCE_PATH,"fart.wav"))
self.sounds[SoundPlayer.SOUND_EVENT_SLOW] = pygame.mixer.Sound(os.path.join(Game.RESOURCE_PATH,"slow.wav"))
self.sounds[SoundPlayer.SOUND_EVENT_DISEASE] = pygame.mixer.Sound(os.path.join(Game.RESOURCE_PATH,"disease.wav"))
self.sounds[SoundPlayer.SOUND_EVENT_CLICK] = pygame.mixer.Sound(os.path.join(Game.RESOURCE_PATH,"click.wav"))
self.sounds[SoundPlayer.SOUND_EVENT_THROW] = pygame.mixer.Sound(os.path.join(Game.RESOURCE_PATH,"throw.wav"))
self.sounds[SoundPlayer.SOUND_EVENT_TRAMPOLINE] = pygame.mixer.Sound(os.path.join(Game.RESOURCE_PATH,"trampoline.wav"))
self.sounds[SoundPlayer.SOUND_EVENT_TELEPORT] = pygame.mixer.Sound(os.path.join(Game.RESOURCE_PATH,"teleport.wav"))
self.sounds[SoundPlayer.SOUND_EVENT_DEATH] = pygame.mixer.Sound(os.path.join(Game.RESOURCE_PATH,"death.wav"))
self.sounds[SoundPlayer.SOUND_EVENT_GO] = pygame.mixer.Sound(os.path.join(Game.RESOURCE_PATH,"go.wav"))
self.sounds[SoundPlayer.SOUND_EVENT_EARTHQUAKE] = pygame.mixer.Sound(os.path.join(Game.RESOURCE_PATH,"earthquake.wav"))
self.sounds[SoundPlayer.SOUND_EVENT_CONFIRM] = pygame.mixer.Sound(os.path.join(Game.RESOURCE_PATH,"confirm.wav"))
self.music_filenames = [
"music_loyalty_freak_slow_pogo.wav",
"music_anonymous420_start_to_play.wav",
"music_anonymous420_first_step_for_your_tech.wav",
"music_anonymous420_echo_blues_effect.wav",
"music_loyalty_freak_music_enby.wav"
]
self.current_music_index = -1
self.playing_walk = False
self.kick_last_played_time = 0
#----------------------------------------------------------------------------
def play_once(self, filename):
sound = pygame.mixer.Sound(filename)
sound.set_volume(self.sound_volume)
sound.play()
#----------------------------------------------------------------------------
def set_music_volume(self, new_volume):
self.music_volume = new_volume if new_volume > Settings.SOUND_VOLUME_THRESHOLD else 0
debug_log("changing music volume to " + str(self.music_volume))
if new_volume > Settings.SOUND_VOLUME_THRESHOLD:
if not pygame.mixer.music.get_busy():
pygame.mixer.music.play()
pygame.mixer.music.set_volume(new_volume)
else:
pygame.mixer.music.stop()
#----------------------------------------------------------------------------
def set_sound_volume(self, new_volume):
self.sound_volume = new_volume if new_volume > Settings.SOUND_VOLUME_THRESHOLD else 0
debug_log("changing sound volume to " + str(self.sound_volume))
for sound in self.sounds:
self.sounds[sound].set_volume(self.sound_volume)
#----------------------------------------------------------------------------
def change_music(self):
while True:
new_music_index = random.randint(0,len(self.music_filenames) - 1)
if new_music_index == self.current_music_index:
continue
break
self.current_music_index = new_music_index
music_name = self.music_filenames[self.current_music_index]
debug_log("changing music to \"" + music_name + "\"")
pygame.mixer.music.stop()
pygame.mixer.music.load(os.path.join(Game.RESOURCE_PATH,music_name))
pygame.mixer.music.set_volume(self.music_volume)
pygame.mixer.music.play(-1)
#----------------------------------------------------------------------------
def play_sound_event(self,sound_event):
self.process_events([sound_event])
#----------------------------------------------------------------------------
## Processes a list of sound events (see class constants) by playing
# appropriate sounds.
def process_events(self, sound_event_list):
stop_playing_walk = True
for sound_event in sound_event_list:
if sound_event in ( # simple sound play
SoundPlayer.SOUND_EVENT_EXPLOSION,
SoundPlayer.SOUND_EVENT_CLICK,
SoundPlayer.SOUND_EVENT_BOMB_PUT,
SoundPlayer.SOUND_EVENT_SPRING,
SoundPlayer.SOUND_EVENT_DIARRHEA,
SoundPlayer.SOUND_EVENT_SLOW,
SoundPlayer.SOUND_EVENT_DISEASE,
SoundPlayer.SOUND_EVENT_THROW,
SoundPlayer.SOUND_EVENT_TRAMPOLINE,
SoundPlayer.SOUND_EVENT_TELEPORT,
SoundPlayer.SOUND_EVENT_DEATH,
SoundPlayer.SOUND_EVENT_GO,
SoundPlayer.SOUND_EVENT_EARTHQUAKE,
SoundPlayer.SOUND_EVENT_CONFIRM
):
self.sounds[sound_event].play()
elif sound_event == SoundPlayer.SOUND_EVENT_WALK:
if not self.playing_walk:
self.sounds[SoundPlayer.SOUND_EVENT_WALK].play(loops=-1)
self.playing_walk = True
stop_playing_walk = False
elif sound_event == SoundPlayer.SOUND_EVENT_KICK:
time_now = pygame.time.get_ticks()
if time_now > self.kick_last_played_time + 200: # wait 200 ms before playing kick sound again
self.sounds[SoundPlayer.SOUND_EVENT_KICK].play()
self.kick_last_played_time = time_now
elif SoundPlayer.SOUND_EVENT_WIN_0 <= sound_event <= SoundPlayer.SOUND_EVENT_WIN_9:
self.play_once(os.path.join(Game.RESOURCE_PATH,"win" + str(sound_event - SoundPlayer.SOUND_EVENT_WIN_0) + ".wav"))
if self.playing_walk and stop_playing_walk:
self.sounds[SoundPlayer.SOUND_EVENT_WALK].stop()
self.playing_walk = False
# if not self.playing_walk = False
#==============================================================================
class Animation(object):
#----------------------------------------------------------------------------
def __init__(self, filename_prefix, start_number, end_number, filename_postfix, framerate = 10):
self.framerate = framerate
self.frame_time = 1000 / self.framerate
self.frame_images = []
for i in xrange(start_number,end_number + 1):
self.frame_images.append(pygame.image.load(filename_prefix + str(i) + filename_postfix))
self.playing_instances = [] ##< A set of playing animations, it is a list of tuples in
# a format: (pixel_coordinates, started_playing).
#----------------------------------------------------------------------------
def play(self, coordinates):
# convert center coordinates to top left coordinates:
top_left = (coordinates[0] - self.frame_images[0].get_size()[0] / 2,coordinates[1] - self.frame_images[0].get_size()[1] / 2)
self.playing_instances.append((top_left,pygame.time.get_ticks()))
#----------------------------------------------------------------------------
def draw(self, surface):
i = 0
time_now = pygame.time.get_ticks()
while True:
if i >= len(self.playing_instances):
break
playing_instance = self.playing_instances[i]
frame = int((time_now - playing_instance[1]) / self.frame_time)
if frame >= len(self.frame_images):
self.playing_instances.remove(playing_instance)
continue
surface.blit(self.frame_images[frame],playing_instance[0])
i += 1
#==============================================================================
## Abstract class representing a game menu. Menu item strings can contain formatting characters:
#
# ^htmlcolorcode - sets the text color (HTML #rrggbb format,e.g. ^#2E44BF) from here to end of line or another formatting character
class Menu(object):
MENU_STATE_SELECTING = 0 ##< still selecting an item
MENU_STATE_CONFIRM = 1 ##< menu has been confirmed
MENU_STATE_CANCEL = 2 ##< menu has been cancelled
MENU_STATE_CONFIRM_PROMPT = 3 ##< prompting an action
MENU_MAX_ITEMS_VISIBLE = 11
#----------------------------------------------------------------------------
def __init__(self,sound_player):
self.text = ""
self.selected_item = (0,0) ##< row, column
self.items = [] ##< list (rows) of lists (column)
self.menu_left = False
self.confirm_prompt_result = None ##< True, False or None
self.scroll_position = 0 ##< index of the first visible row
self.sound_player = sound_player
self.action_keys_previous_state = {
PlayerKeyMaps.ACTION_UP : True,
PlayerKeyMaps.ACTION_RIGHT : True,
PlayerKeyMaps.ACTION_DOWN : True,
PlayerKeyMaps.ACTION_LEFT : True,
PlayerKeyMaps.ACTION_BOMB : True,
PlayerKeyMaps.ACTION_SPECIAL : True,
PlayerKeyMaps.ACTION_BOMB_DOUBLE: True,
PlayerKeyMaps.ACTION_MENU : True} ##< to detect single key presses, the values have to be True in order not to rect immediatelly upon entering the menu
self.state = Menu.MENU_STATE_SELECTING
pass
#----------------------------------------------------------------------------
def get_scroll_position(self):
return self.scroll_position
#----------------------------------------------------------------------------
def get_state(self):
return self.state
#----------------------------------------------------------------------------
def prompt_action_confirm(self):
self.confirm_prompt_result = None
self.state = Menu.MENU_STATE_CONFIRM_PROMPT
#----------------------------------------------------------------------------
def get_text(self):
return self.text
#----------------------------------------------------------------------------
## Returns menu items in format: ( (column 1 row 1 text), (column 1 row 2 text), ...), ((column 2 row 1 text), ...) ).
def get_items(self):
return self.items
#----------------------------------------------------------------------------
## Returns a selected menu item in format (row, column).
def get_selected_item(self):
return self.selected_item
#----------------------------------------------------------------------------
def process_inputs(self, input_list):
if self.menu_left:
self.menu_left = False
self.state = Menu.MENU_STATE_SELECTING
for action_code in self.action_keys_previous_state:
self.action_keys_previous_state[action_code] = True
return
actions_processed = []
actions_pressed = []
for action in input_list:
action_code = action[1]
if not self.action_keys_previous_state[action_code]:
# the following condition disallows ACTION_BOMB and ACTION_BOMB_DOUBLE to be in the list at the same time => causes trouble
if (not (action_code in actions_pressed) and not(
(action_code == PlayerKeyMaps.ACTION_BOMB and PlayerKeyMaps.ACTION_BOMB_DOUBLE in actions_pressed) or
(action_code == PlayerKeyMaps.ACTION_BOMB_DOUBLE and PlayerKeyMaps.ACTION_BOMB in actions_pressed) )):
actions_pressed.append(action_code)
actions_processed.append(action_code)
for action_code in self.action_keys_previous_state:
self.action_keys_previous_state[action_code] = False
for action_code in actions_processed:
self.action_keys_previous_state[action_code] = True
for action in actions_pressed:
self.action_pressed(action)
#----------------------------------------------------------------------------
def mouse_went_over_item(self, item_coordinates):
self.selected_item = item_coordinates
#----------------------------------------------------------------------------
## Handles mouse button events in the menu.
def mouse_button_pressed(self, button_number):
if button_number == 0: # left
self.action_pressed(PlayerKeyMaps.ACTION_BOMB)
elif button_number == 1: # right
self.action_pressed(PlayerKeyMaps.ACTION_SPECIAL)
elif button_number == 3: # up
self.scroll(True)
elif button_number == 4: # down
self.scroll(False)
#----------------------------------------------------------------------------
def scroll(self, up):
if up:
if self.scroll_position > 0:
self.scroll_position -= 1
self.action_pressed(PlayerKeyMaps.ACTION_UP)
else: # down
rows = len(self.items[self.selected_item[1]])
maximum_row = rows - Menu.MENU_MAX_ITEMS_VISIBLE
if self.scroll_position < maximum_row:
self.scroll_position += 1
self.action_pressed(PlayerKeyMaps.ACTION_DOWN)
#----------------------------------------------------------------------------
## Should be called when the menu is being left.
def leaving(self):
self.menu_left = True
self.confirm_prompt_result = None
self.sound_player.play_sound_event(SoundPlayer.SOUND_EVENT_CONFIRM)
#----------------------------------------------------------------------------
## Prompts confirmation of given menu item if it has been selected.
def prompt_if_needed(self, menu_item_coordinates):
if self.state == Menu.MENU_STATE_CONFIRM and (self.confirm_prompt_result == None or self.confirm_prompt_result == False) and self.selected_item == menu_item_coordinates:
self.prompt_action_confirm()
#----------------------------------------------------------------------------
## Is called once for every action key press (not each frame, which is
# not good for menus). This can be overridden.
def action_pressed(self, action):
old_selected_item = self.selected_item
if self.state == Menu.MENU_STATE_CONFIRM_PROMPT:
if action == PlayerKeyMaps.ACTION_BOMB or action == PlayerKeyMaps.ACTION_BOMB_DOUBLE:
self.confirm_prompt_result = True
self.state = Menu.MENU_STATE_CONFIRM
else:
self.confirm_prompt_result = False
self.state = Menu.MENU_STATE_SELECTING
else:
if action == PlayerKeyMaps.ACTION_UP:
self.selected_item = (max(0,self.selected_item[0] - 1),self.selected_item[1])
elif action == PlayerKeyMaps.ACTION_DOWN:
self.selected_item = (min(len(self.items[self.selected_item[1]]) - 1,self.selected_item[0] + 1),self.selected_item[1])
elif action == PlayerKeyMaps.ACTION_LEFT:
new_column = max(0,self.selected_item[1] - 1)
self.selected_item = (min(len(self.items[new_column]) - 1,self.selected_item[0]),new_column)
elif action == PlayerKeyMaps.ACTION_RIGHT:
new_column = min(len(self.items) - 1,self.selected_item[1] + 1)
self.selected_item = (min(len(self.items[new_column]) - 1,self.selected_item[0]),new_column)
elif action == PlayerKeyMaps.ACTION_BOMB or action == PlayerKeyMaps.ACTION_BOMB_DOUBLE:
self.state = Menu.MENU_STATE_CONFIRM
elif action == PlayerKeyMaps.ACTION_SPECIAL:
self.state = Menu.MENU_STATE_CANCEL
if self.selected_item[0] >= self.scroll_position + Menu.MENU_MAX_ITEMS_VISIBLE:
self.scroll_position += 1
elif self.selected_item[0] < self.scroll_position:
self.scroll_position -= 1
if self.selected_item != old_selected_item:
self.sound_player.play_sound_event(SoundPlayer.SOUND_EVENT_CLICK)
#==============================================================================
class MainMenu(Menu):
#----------------------------------------------------------------------------
def __init__(self, sound_player):
super(MainMenu,self).__init__(sound_player)
self.items = [(
"let's play!",
"tweak some stuff",
"what's this about",
"run away!")]
#----------------------------------------------------------------------------
def action_pressed(self, action):
super(MainMenu,self).action_pressed(action)
self.prompt_if_needed((3,0))
#==============================================================================
class ResultMenu(Menu):
SEPARATOR = "__________________________________________________"
#----------------------------------------------------------------------------
def __init__(self, sound_player):
super(ResultMenu,self).__init__(sound_player)
self.items = [["I get it"]]
#----------------------------------------------------------------------------
def _format_player_cel(self, player):
return (
Renderer.colored_color_name(player.get_number()) + " (" +
Renderer.colored_text(player.get_team_number(),str(player.get_team_number() + 1)) + "): " +
str(player.get_kills()) + "/" + str(player.get_wins())
)
#----------------------------------------------------------------------------
def _format_winning_teams(self, players):
announcement_text =""
win_maximum = 0
winner_team_numbers = []
teams = defaultdict(int) # { team_number : number_of_wins }
for player in players:
teams[player.get_team_number()] += 1
for team_number, team_wins in teams.items():
if team_wins == win_maximum:
winner_team_numbers.append(team_number)
elif team_wins > win_maximum:
win_maximum = team_wins
winner_team_numbers = [team_number]
if len(winner_team_numbers) == 1:
announcement_text = "Winner team is " + Renderer.colored_color_name(winner_team_numbers[0]) + "!"
else:
announcement_text = "Winners teams are: "
announcement_text += ", ".join(map(lambda team_no: Renderer.colored_color_name(team_no),
winner_team_numbers))
announcement_text += "!"
return announcement_text
#----------------------------------------------------------------------------
def _format_player_stats(self, players):
announcement_text = ""
row = 0
column = 0
# decide how many columns for different numbers of players will the table have
columns_by_player_count = (1,2,3,2,3,3,4,4,3,5)
table_columns = columns_by_player_count[len(players) - 1]
for player in players:
announcement_text += self._format_player_cel(player)
column += 1
if column >= table_columns:
column = 0
row += 1
announcement_text += "\n"
else:
announcement_text += " "
return announcement_text
#----------------------------------------------------------------------------
def set_results(self, players):
self.text = self._format_winning_teams(players)
self.text += "\n" + ResultMenu.SEPARATOR + "\n"
self.text += self._format_player_stats(players)
self.text += "\n" + ResultMenu.SEPARATOR
#==============================================================================
class PlayMenu(Menu):
#----------------------------------------------------------------------------
def __init__(self,sound_player):
super(PlayMenu,self).__init__(sound_player)
self.items = [("resume","to main menu")]
#----------------------------------------------------------------------------
def action_pressed(self, action):
super(PlayMenu,self).action_pressed(action)
self.prompt_if_needed((1,0))
#==============================================================================
class SettingsMenu(Menu):
COLOR_ON = "^#1DF53A"
COLOR_OFF = "^#F51111"
#----------------------------------------------------------------------------
def __init__(self, sound_player, settings, game):
super(SettingsMenu,self).__init__(sound_player)
self.settings = settings
self.game = game
self.update_items()
#----------------------------------------------------------------------------
def bool_to_str(self, bool_value):
return SettingsMenu.COLOR_ON + "on" if bool_value else SettingsMenu.COLOR_OFF + "off"
#----------------------------------------------------------------------------
def update_items(self):
self.items = [(
"sound volume: " + (SettingsMenu.COLOR_ON if self.settings.sound_is_on() else SettingsMenu.COLOR_OFF) + str(int(self.settings.sound_volume * 10) * 10) + " %",
"music volume: " + (SettingsMenu.COLOR_ON if self.settings.music_is_on() > 0.0 else SettingsMenu.COLOR_OFF) + str(int(self.settings.music_volume * 10) * 10) + " %",
"screen resolution: " + str(self.settings.screen_resolution[0]) + " x " + str(self.settings.screen_resolution[1]),
"fullscreen: " + self.bool_to_str(self.settings.fullscreen),
"allow control by mouse: " + self.bool_to_str(self.settings.control_by_mouse),
"configure controls",
"complete reset",
"back"
)]
#----------------------------------------------------------------------------
def action_pressed(self, action):
super(SettingsMenu,self).action_pressed(action)
self.prompt_if_needed((6,0))
mouse_control_selected = False
fullscreen_selected = False
if self.state == Menu.MENU_STATE_SELECTING:
if action == PlayerKeyMaps.ACTION_RIGHT:
if self.selected_item == (0,0):
self.settings.sound_volume = min(1.0,self.settings.sound_volume + 0.1)
self.game.apply_sound_settings()
self.game.save_settings()
elif self.selected_item == (1,0):
self.settings.music_volume = min(1.0,self.settings.music_volume + 0.1)
self.game.apply_sound_settings()
self.game.save_settings()
elif self.selected_item == (2,0):
self.settings.screen_resolution = Settings.POSSIBLE_SCREEN_RESOLUTIONS[(self.settings.current_resolution_index() + 1) % len(Settings.POSSIBLE_SCREEN_RESOLUTIONS)]
self.game.apply_screen_settings()
self.game.save_settings()
elif action == PlayerKeyMaps.ACTION_LEFT:
if self.selected_item == (0,0):
self.settings.sound_volume = max(0.0,self.settings.sound_volume - 0.1)
self.game.apply_sound_settings()
self.game.save_settings()
elif self.selected_item == (1,0):
self.settings.music_volume = max(0.0,self.settings.music_volume - 0.1)
self.game.apply_sound_settings()
self.game.save_settings()
elif self.selected_item == (2,0):
self.settings.screen_resolution = Settings.POSSIBLE_SCREEN_RESOLUTIONS[(self.settings.current_resolution_index() - 1) % len(Settings.POSSIBLE_SCREEN_RESOLUTIONS)]
self.game.apply_screen_settings()
self.game.save_settings()
elif self.state == Menu.MENU_STATE_CONFIRM:
if self.selected_item == (6,0):
debug_log("resetting settings")
self.settings.reset()
self.game.save_settings()
self.game.apply_sound_settings()
self.game.apply_screen_settings()
self.game.apply_other_settings()
self.confirm_prompt_result = None
self.state = Menu.MENU_STATE_SELECTING
elif self.selected_item == (3,0):
fullscreen_selected = True
self.state = Menu.MENU_STATE_SELECTING
elif self.selected_item == (4,0):
mouse_control_selected = True
self.state = Menu.MENU_STATE_SELECTING
elif self.selected_item != (7,0) and self.selected_item != (5,0):
self.state = Menu.MENU_STATE_SELECTING
if mouse_control_selected:
self.settings.control_by_mouse = not self.settings.control_by_mouse
self.game.apply_other_settings()
self.game.save_settings()
self.state = Menu.MENU_STATE_SELECTING
if fullscreen_selected:
self.settings.fullscreen = not self.settings.fullscreen
self.game.apply_screen_settings()
self.game.save_settings()
self.state = Menu.MENU_STATE_SELECTING
self.update_items()
#==============================================================================
class ControlsMenu(Menu):
#----------------------------------------------------------------------------
def __init__(self, sound_player, player_key_maps, game):
super(ControlsMenu,self).__init__(sound_player)
self.player_key_maps = player_key_maps
self.game = game
self.waiting_for_key = None # if not None, this contains a tuple (player number, action) of action that is currently being remapped
self.wait_for_release = False # used to wait for keys release before new key map is captured
self.update_items()
#----------------------------------------------------------------------------
def color_key_string(self, key_string):
return <KEY>" + key_string if key_string != "none" else "^#E83535" + key_string
#----------------------------------------------------------------------------
def update_items(self):
self.items = [["go back"]]
prompt_string = "press some key"
for i in xrange(Game.NUMBER_OF_CONTROLLED_PLAYERS):
player_string = "p " + str(i + 1)
player_maps = self.player_key_maps.get_players_key_mapping(i)
for action in player_maps:
item_string = player_string + " " + PlayerKeyMaps.ACTION_NAMES[action] + ": "
if self.waiting_for_key == (i,action):
item_string += prompt_string
else:
item_string += self.color_key_string(PlayerKeyMaps.key_to_string(player_maps[action]))
self.items[0] += [item_string]
# add menu item
item_string = "open menu: "
if self.waiting_for_key != None and self.waiting_for_key[1] == PlayerKeyMaps.ACTION_MENU:
item_string += prompt_string
else:
item_string += self.color_key_string(PlayerKeyMaps.key_to_string(self.player_key_maps.get_menu_key_map()))
self.items[0] += [item_string]
#----------------------------------------------------------------------------
## This should be called periodically when the menu is active. It will
# take care of catching pressed keys if waiting for key remap.
def update(self, player_key_maps):
if self.waiting_for_key != None:
keys_pressed = list(pygame.key.get_pressed())
key_pressed = None
mouse_actions = player_key_maps.get_current_mouse_control_states()
if len(mouse_actions) > 0:
key_pressed = mouse_actions[0]
for i in xrange(len(keys_pressed)): # find pressed key
if not (i in (pygame.K_NUMLOCK,pygame.K_CAPSLOCK,pygame.K_SCROLLOCK,322)) and keys_pressed[i]:
key_pressed = i
break
if self.wait_for_release:
if key_pressed == None:
self.wait_for_release = False
else:
if key_pressed != None:
debug_log("new key mapping")
self.player_key_maps.set_one_key_map(key_pressed,self.waiting_for_key[0],self.waiting_for_key[1])
self.waiting_for_key = None
self.state = Menu.MENU_STATE_SELECTING
self.game.save_settings()
for item in self.action_keys_previous_state:
self.action_keys_previous_state[item] = True
self.update_items()
#----------------------------------------------------------------------------
def action_pressed(self, action):
super(ControlsMenu,self).action_pressed(action)
if self.waiting_for_key != None:
self.waiting_for_key = None
self.state = Menu.MENU_STATE_SELECTING
elif action == PlayerKeyMaps.ACTION_BOMB and self.selected_item[0] > 0:
# new key map will be captured
helper_index = self.selected_item[0] - 1
if helper_index == Game.NUMBER_OF_CONTROLLED_PLAYERS * 6: # 6 controls for each player, then menu item follows
self.waiting_for_key = (-1,PlayerKeyMaps.ACTION_MENU)
else:
action_index = helper_index % 6
helper_array = (PlayerKeyMaps.ACTION_UP,PlayerKeyMaps.ACTION_RIGHT,PlayerKeyMaps.ACTION_DOWN,PlayerKeyMaps.ACTION_LEFT,PlayerKeyMaps.ACTION_BOMB,PlayerKeyMaps.ACTION_SPECIAL)
helper_action = helper_array[action_index]
self.waiting_for_key = (helper_index / 6,helper_action)
self.wait_for_release = True
self.state = Menu.MENU_STATE_SELECTING
self.update_items()
#==============================================================================
class AboutMenu(Menu):
#----------------------------------------------------------------------------
def __init__(self,sound_player):
super(AboutMenu,self).__init__(sound_player)
self.text = ("^#2E44BFBombman^#FFFFFF - free Bomberman clone, ^#4EF259version " + Game.VERSION_STR + "\n"
"Miloslav \"tastyfish\" Ciz, 2016\n\n"
"This game is free software, published under CC0 1.0.\n")
self.items = [["ok, nice, back"]]
#==============================================================================
class MapSelectMenu(Menu):
#----------------------------------------------------------------------------
def __init__(self,sound_player):
super(MapSelectMenu,self).__init__(sound_player)
self.text = "Now select a map."
self.map_filenames = []
self.update_items()
#----------------------------------------------------------------------------
def update_items(self):
self.map_filenames = sorted([filename for filename in os.listdir(Game.MAP_PATH) if os.path.isfile(os.path.join(Game.MAP_PATH,filename))])
special_color = (100,100,255)
self.items = [["^" + Renderer.rgb_to_html_notation(special_color) + "pick random","^" + Renderer.rgb_to_html_notation(special_color) + "each game random"]]
for filename in self.map_filenames:
self.items[0].append(filename)
#----------------------------------------------------------------------------
def random_was_selected(self):
return self.selected_item[0] == 1
#----------------------------------------------------------------------------
def show_map_preview(self):
return self.selected_item[0] != 0 and self.selected_item[0] != 1
#----------------------------------------------------------------------------
def get_random_map_name(self):
return random.choice(self.map_filenames)
#----------------------------------------------------------------------------
def get_selected_map_name(self):
if self.selected_item[0] == 0: # pick random
return random.choice(self.map_filenames)
try:
index = self.selected_item[0] - 2
if index < 0:
return ""
return self.map_filenames[index]
except IndexError:
return ""
#==============================================================================
class PlaySetupMenu(Menu):
#----------------------------------------------------------------------------
def __init__(self, sound_player, play_setup):
super(PlaySetupMenu,self).__init__(sound_player)
self.selected_item = (0,1)
self.play_setup = play_setup
self.update_items()
#----------------------------------------------------------------------------
def update_items(self):
self.items = [[],[],["games: " + str(self.play_setup.get_number_of_games())]]
dark_grey = (50,50,50)
self.items[0].append("back")
self.items[1].append("next")
for i in xrange(10):
slot_color = Renderer.COLOR_RGB_VALUES[i] if i != Game.COLOR_BLACK else dark_grey # black with black border not visible, use dark grey
self.items[0].append(Renderer.colored_text(i,str(i + 1)) + ": ")
slot = self.play_setup.get_slots()[i]
if slot == None:
self.items[0][-1] += "-"
self.items[1].append("-")
else:
team_color = Renderer.COLOR_RGB_VALUES[slot[1]] if slot[1] != Game.COLOR_BLACK else dark_grey
self.items[0][-1] += ("player " + str(slot[0] + 1)) if slot[0] >= 0 else "AI"
self.items[1].append(Renderer.colored_text(slot[1],str(slot[1] + 1))) # team number
#----------------------------------------------------------------------------
def action_pressed(self, action):
super(PlaySetupMenu,self).action_pressed(action)
if action == PlayerKeyMaps.ACTION_UP:
if self.selected_item == (0,2):
self.play_setup.increase_number_of_games()
self.state = Menu.MENU_STATE_SELECTING
elif action == PlayerKeyMaps.ACTION_DOWN:
if self.selected_item == (0,2):
self.play_setup.decrease_number_of_games()
self.state = Menu.MENU_STATE_SELECTING
elif self.state == Menu.MENU_STATE_CONFIRM:
if self.selected_item == (0,2):
self.play_setup.increase_number_of_games()
self.state = Menu.MENU_STATE_SELECTING
if self.selected_item[0] > 0: # override behaviour for confirm button
slots = self.play_setup.get_slots()
slot = slots[self.selected_item[0] - 1]
if self.selected_item[1] == 0:
# changing players
if slot == None:
new_value = -1
else:
new_value = slot[0] + 1
slots[self.selected_item[0] - 1] = (new_value,slot[1] if slot != None else self.selected_item[0] - 1) if new_value <= 3 else None
else:
# changing teams
if slot != None:
slots[self.selected_item[0] - 1] = (slot[0],(slot[1] + 1) % 10)
self.state = Menu.MENU_STATE_SELECTING
self.update_items()
#==============================================================================
class Renderer(object):
COLOR_RGB_VALUES = [
(210,210,210), # white
(10,10,10), # black
(255,0,0), # red
(0,0,255), # blue
(0,255,0), # green
(52,237,250), # cyan
(255,255,69), # yellow
(255,192,74), # orange
(168,127,56), # brown
(209,117,206) # purple
]
MAP_TILE_WIDTH = 50 ##< tile width in pixels
MAP_TILE_HEIGHT = 45 ##< tile height in pixels
MAP_TILE_HALF_WIDTH = MAP_TILE_WIDTH / 2
MAP_TILE_HALF_HEIGHT = MAP_TILE_HEIGHT / 2
PLAYER_SPRITE_CENTER = (30,80) ##< player's feet (not geometrical) center of the sprite in pixels
BOMB_SPRITE_CENTER = (22,33)
SHADOW_SPRITE_CENTER = (25,22)
MAP_BORDER_WIDTH = 37
ANIMATION_EVENT_EXPLOSION = 0
ANIMATION_EVENT_RIP = 1
ANIMATION_EVENT_SKELETION = 2
ANIMATION_EVENT_DISEASE_CLOUD = 3
ANIMATION_EVENT_DIE = 4
FONT_SMALL_SIZE = 12
FONT_NORMAL_SIZE = 25
MENU_LINE_SPACING = 10
MENU_FONT_COLOR = (255,255,255)
SCROLLBAR_RELATIVE_POSITION = (-200,-50)
SCROLLBAR_HEIGHT = 300
MENU_DESCRIPTION_Y_OFFSET = -80
#----------------------------------------------------------------------------
def __init__(self):
self.update_screen_info()
self.environment_images = {}
self.preview_map_name = ""
self.preview_map_image = None
self.font_small = pygame.font.Font(os.path.join(Game.RESOURCE_PATH,"LibertySans.ttf"),Renderer.FONT_SMALL_SIZE)
self.font_normal = pygame.font.Font(os.path.join(Game.RESOURCE_PATH,"LibertySans.ttf"),Renderer.FONT_NORMAL_SIZE)
self.previous_mouse_coordinates = (-1,-1)
pygame.mouse.set_visible(False) # hide mouse cursor
environment_names = ["env1","env2","env3","env4","env5","env6","env7"]
for environment_name in environment_names:
filename_floor = os.path.join(Game.RESOURCE_PATH,"tile_" + environment_name + "_floor.png")
filename_block = os.path.join(Game.RESOURCE_PATH,"tile_" + environment_name + "_block.png")
filename_wall = os.path.join(Game.RESOURCE_PATH,"tile_" + environment_name + "_wall.png")
self.environment_images[environment_name] = (pygame.image.load(filename_floor),pygame.image.load(filename_block),pygame.image.load(filename_wall))
self.prerendered_map = None # keeps a reference to a map for which some parts have been prerendered
self.prerendered_map_background = pygame.Surface((GameMap.MAP_WIDTH * Renderer.MAP_TILE_WIDTH + 2 * Renderer.MAP_BORDER_WIDTH,GameMap.MAP_HEIGHT * Renderer.MAP_TILE_HEIGHT + 2 * Renderer.MAP_BORDER_WIDTH))
self.player_images = [] ##< player images in format [color index]["sprite name"] and [color index]["sprite name"][frame]
for i in xrange(10):
self.player_images.append({})
for helper_string in ["up","right","down","left"]:
self.player_images[-1][helper_string] = self.color_surface(pygame.image.load(os.path.join(Game.RESOURCE_PATH,"player_" + helper_string + ".png")),i)
string_index = "walk " + helper_string
self.player_images[-1][string_index] = []
self.player_images[-1][string_index].append(self.color_surface(pygame.image.load(os.path.join(Game.RESOURCE_PATH,"player_" + helper_string + "_walk1.png")),i))
if helper_string == "up" or helper_string == "down":
self.player_images[-1][string_index].append(self.color_surface(pygame.image.load(os.path.join(Game.RESOURCE_PATH,"player_" + helper_string + "_walk2.png")),i))
else:
self.player_images[-1][string_index].append(self.player_images[-1][helper_string])
self.player_images[-1][string_index].append(self.color_surface(pygame.image.load(os.path.join(Game.RESOURCE_PATH,"player_" + helper_string + "_walk3.png")),i))
self.player_images[-1][string_index].append(self.player_images[-1][string_index][0])
string_index = "box " + helper_string
self.player_images[-1][string_index] = self.color_surface(pygame.image.load(os.path.join(Game.RESOURCE_PATH,"player_" + helper_string + "_box.png")),i)
self.bomb_images = []
self.bomb_images.append(pygame.image.load(os.path.join(Game.RESOURCE_PATH,"bomb1.png")))
self.bomb_images.append(pygame.image.load(os.path.join(Game.RESOURCE_PATH,"bomb2.png")))
self.bomb_images.append(pygame.image.load(os.path.join(Game.RESOURCE_PATH,"bomb3.png")))
self.bomb_images.append(self.bomb_images[0])
# load flame images
self.flame_images = []
for i in [1,2]:
helper_string = "flame" + str(i)
self.flame_images.append({})
self.flame_images[-1]["all"] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,helper_string + ".png"))
self.flame_images[-1]["horizontal"] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,helper_string + "_horizontal.png"))
self.flame_images[-1]["vertical"] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,helper_string + "_vertical.png"))
self.flame_images[-1]["left"] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,helper_string + "_left.png"))
self.flame_images[-1]["right"] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,helper_string + "_right.png"))
self.flame_images[-1]["up"] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,helper_string + "_up.png"))
self.flame_images[-1]["down"] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,helper_string + "_down.png"))
# load item images
self.item_images = {}
self.item_images[GameMap.ITEM_BOMB] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"item_bomb.png"))
self.item_images[GameMap.ITEM_FLAME] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"item_flame.png"))
self.item_images[GameMap.ITEM_SUPERFLAME] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"item_superflame.png"))
self.item_images[GameMap.ITEM_SPEEDUP] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"item_speedup.png"))
self.item_images[GameMap.ITEM_DISEASE] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"item_disease.png"))
self.item_images[GameMap.ITEM_RANDOM] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"item_random.png"))
self.item_images[GameMap.ITEM_SPRING] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"item_spring.png"))
self.item_images[GameMap.ITEM_SHOE] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"item_shoe.png"))
self.item_images[GameMap.ITEM_MULTIBOMB] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"item_multibomb.png"))
self.item_images[GameMap.ITEM_RANDOM] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"item_random.png"))
self.item_images[GameMap.ITEM_BOXING_GLOVE] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"item_boxing_glove.png"))
self.item_images[GameMap.ITEM_DETONATOR] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"item_detonator.png"))
self.item_images[GameMap.ITEM_THROWING_GLOVE] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"item_throwing_glove.png"))
# load/make gui images
self.gui_images = {}
self.gui_images["info board"] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"gui_info_board.png"))
self.gui_images["arrow up"] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"gui_arrow_up.png"))
self.gui_images["arrow down"] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"gui_arrow_down.png"))
self.gui_images["seeker"] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"gui_seeker.png"))
self.gui_images["cursor"] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"gui_cursor.png"))
self.gui_images["prompt"] = self.render_text(self.font_normal,"You sure?",(255,255,255))
self.gui_images["version"] = self.render_text(self.font_small,"v " + Game.VERSION_STR,(0,100,0))
self.player_info_board_images = [None for i in xrange(10)] # up to date infoboard image for each player
self.gui_images["out"] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"gui_out.png"))
self.gui_images["countdown"] = {}
self.gui_images["countdown"][1] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"gui_countdown_1.png"))
self.gui_images["countdown"][2] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"gui_countdown_2.png"))
self.gui_images["countdown"][3] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"gui_countdown_3.png"))
self.menu_background_image = None ##< only loaded when in menu
self.menu_item_images = None ##< images of menu items, only loaded when in menu
# load other images
self.other_images = {}
self.other_images["shadow"] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"other_shadow.png"))
self.other_images["spring"] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"other_spring.png"))
self.other_images["antena"] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"other_antena.png"))
self.other_images["disease"] = []
self.other_images["disease"].append(pygame.image.load(os.path.join(Game.RESOURCE_PATH,"other_disease1.png")))
self.other_images["disease"].append(pygame.image.load(os.path.join(Game.RESOURCE_PATH,"other_disease2.png")))
# load icon images
self.icon_images = {}
self.icon_images[GameMap.ITEM_BOMB] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"icon_bomb.png"))
self.icon_images[GameMap.ITEM_FLAME] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"icon_flame.png"))
self.icon_images[GameMap.ITEM_SPEEDUP] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"icon_speedup.png"))
self.icon_images[GameMap.ITEM_SHOE] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"icon_kicking_shoe.png"))
self.icon_images[GameMap.ITEM_BOXING_GLOVE] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"icon_boxing_glove.png"))
self.icon_images[GameMap.ITEM_THROWING_GLOVE] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"icon_throwing_glove.png"))
self.icon_images[GameMap.ITEM_SPRING] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"icon_spring.png"))
self.icon_images[GameMap.ITEM_MULTIBOMB] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"icon_multibomb.png"))
self.icon_images[GameMap.ITEM_DISEASE] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"icon_disease.png"))
self.icon_images[GameMap.ITEM_DETONATOR] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"icon_detonator.png"))
self.icon_images["etc"] = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"icon_etc.png"))
# load animations
self.animations = {}
self.animations[Renderer.ANIMATION_EVENT_EXPLOSION] = Animation(os.path.join(Game.RESOURCE_PATH,"animation_explosion"),1,10,".png",7)
self.animations[Renderer.ANIMATION_EVENT_RIP] = Animation(os.path.join(Game.RESOURCE_PATH,"animation_rip"),1,1,".png",0.3)
self.animations[Renderer.ANIMATION_EVENT_SKELETION] = Animation(os.path.join(Game.RESOURCE_PATH,"animation_skeleton"),1,10,".png",7)
self.animations[Renderer.ANIMATION_EVENT_DISEASE_CLOUD] = Animation(os.path.join(Game.RESOURCE_PATH,"animation_disease"),1,6,".png",5)
self.animations[Renderer.ANIMATION_EVENT_DIE] = Animation(os.path.join(Game.RESOURCE_PATH,"animation_die"),1,7,".png",7)
self.party_circles = [] ##< holds info about party cheat circles, list of tuples in format (coords,radius,color,phase,speed)
self.party_circles.append(((-180,110),40,(255,100,50),0.0,1.0))
self.party_circles.append(((160,70),32,(100,200,150),1.4,1.5))
self.party_circles.append(((40,-150),65,(150,100,170),2.0,0.7))
self.party_circles.append(((-170,-92),80,(200,200,32),3.2,1.3))
self.party_circles.append(((50,110),63,(10,180,230),0.1,1.8))
self.party_circles.append(((205,-130),72,(180,150,190),0.5,2.0))
self.party_players = [] ##< holds info about party cheat players, list of tuples in format (coords,color index,millisecond delay, rotate right)
self.party_players.append(((-230,80),0,0,True))
self.party_players.append(((180,10),2,220,False))
self.party_players.append(((90,-150),4,880,True))
self.party_players.append(((-190,-95),6,320,False))
self.party_players.append(((-40,110),8,50,True))
self.party_bombs = [] ##< holds info about party bombs, list of lists in format [x,y,increment x,increment y]
self.party_bombs.append([10,30,1,1])
self.party_bombs.append([700,200,1,-1])
self.party_bombs.append([512,512,-1,1])
self.party_bombs.append([1024,20,-1,-1])
self.party_bombs.append([900,300,1,1])
self.party_bombs.append([30,700,1,1])
self.party_bombs.append([405,530,1,-1])
self.party_bombs.append([250,130,-1,-1])
#----------------------------------------------------------------------------
def update_screen_info(self):
self.screen_resolution = Renderer.get_screen_size()
self.screen_center = (self.screen_resolution[0] / 2,self.screen_resolution[1] / 2)
self.map_render_location = Renderer.get_map_render_position()
#----------------------------------------------------------------------------
## Converts (r,g,b) tuple to html #rrggbb notation.
@staticmethod
def rgb_to_html_notation(rgb_color):
return "#" + hex(rgb_color[0])[2:].zfill(2) + hex(rgb_color[1])[2:].zfill(2) + hex(rgb_color[2])[2:].zfill(2)
#----------------------------------------------------------------------------
@staticmethod
def colored_text(color_index, text, end_with_white=True):
return "^" + Renderer.rgb_to_html_notation(Renderer.lighten_color(Renderer.COLOR_RGB_VALUES[color_index],75)) + text + "^#FFFFFF"
#----------------------------------------------------------------------------
@staticmethod
def colored_color_name(color_index, end_with_white=True):
return Renderer.colored_text(color_index,Game.COLOR_NAMES[color_index])
#----------------------------------------------------------------------------
## Returns colored image from another image (replaces red color with given color). This method is slow. Color is (r,g,b) tuple of 0 - 1 floats.
def color_surface(self, surface, color_number):
result = surface.copy()
# change all red pixels to specified color
for j in xrange(result.get_size()[1]):
for i in xrange(result.get_size()[0]):
pixel_color = result.get_at((i,j))
if pixel_color.r == 255 and pixel_color.g == 0 and pixel_color.b == 0:
pixel_color.r = Renderer.COLOR_RGB_VALUES[color_number][0]
pixel_color.g = Renderer.COLOR_RGB_VALUES[color_number][1]
pixel_color.b = Renderer.COLOR_RGB_VALUES[color_number][2]
result.set_at((i,j),pixel_color)
return result
#----------------------------------------------------------------------------
def tile_position_to_pixel_position(self, tile_position,center=(0,0)):
return (int(float(tile_position[0]) * Renderer.MAP_TILE_WIDTH) - center[0],int(float(tile_position[1]) * Renderer.MAP_TILE_HEIGHT) - center[1])
#----------------------------------------------------------------------------
@staticmethod
def get_screen_size():
display = pygame.display.get_surface()
return display.get_size() if display != None else (0,0)
#----------------------------------------------------------------------------
@staticmethod
def get_map_render_position():
screen_size = Renderer.get_screen_size()
return ((screen_size[0] - Renderer.MAP_BORDER_WIDTH * 2 - Renderer.MAP_TILE_WIDTH * GameMap.MAP_WIDTH) / 2,(screen_size[1] - Renderer.MAP_BORDER_WIDTH * 2 - Renderer.MAP_TILE_HEIGHT * GameMap.MAP_HEIGHT - 50) / 2)
#----------------------------------------------------------------------------
@staticmethod
def map_position_to_pixel_position(map_position, offset = (0,0)):
map_render_location = Renderer.get_map_render_position()
return (map_render_location[0] + int(map_position[0] * Renderer.MAP_TILE_WIDTH) + Renderer.MAP_BORDER_WIDTH + offset[0],map_render_location[1] + int(map_position[1] * Renderer.MAP_TILE_HEIGHT) + Renderer.MAP_BORDER_WIDTH + offset[1])
def set_resolution(self, new_resolution):
self.screen_resolution = new_resolution
#----------------------------------------------------------------------------
@staticmethod
def darken_color(color, by_how_may):
r = max(color[0] - by_how_may,0)
g = max(color[1] - by_how_may,0)
b = max(color[2] - by_how_may,0)
return (r,g,b)
#----------------------------------------------------------------------------
@staticmethod
def lighten_color(color, by_how_may):
r = min(color[0] + by_how_may,255)
g = min(color[1] + by_how_may,255)
b = min(color[2] + by_how_may,255)
return (r,g,b)
#----------------------------------------------------------------------------
def __render_info_board_item_row(self, x, y, limit, item_type, player, board_image):
item_count = 20 if item_type == GameMap.ITEM_FLAME and player.get_item_count(GameMap.ITEM_SUPERFLAME) >= 1 else player.get_item_count(item_type)
for i in xrange(item_count):
if i > limit:
break
image_to_draw = self.icon_images[item_type]
if i == limit and player.get_item_count(item_type) > limit + 1:
image_to_draw = self.icon_images["etc"]
board_image.blit(image_to_draw,(x,y))
x += self.icon_images[item_type].get_size()[0]
#----------------------------------------------------------------------------
## Updates info board images in self.player_info_board_images. This should be called each frame, as
# rerendering is done only when needed.
def update_info_boards(self, players):
for i in xrange(10): # for each player number
update_needed = False
if self.player_info_board_images[i] == None:
self.player_info_board_images[i] = self.gui_images["info board"].copy()
update_needed = True
player = None
for one_player in players:
if one_player.get_number() == i:
player = one_player
break
if player == None:
continue
if player.info_board_needs_update():
update_needed = True
if not update_needed or player == None:
continue
# rerendering needed here
debug_log("updating info board " + str(i))
board_image = self.player_info_board_images[i]
board_image.blit(self.gui_images["info board"],(0,0))
board_image.blit(self.font_small.render(str(player.get_kills()),True,(0,0,0)),(45,0))
board_image.blit(self.font_small.render(str(player.get_wins()),True,(0,0,0)),(65,0))
board_image.blit(self.font_small.render(Game.COLOR_NAMES[i],True,Renderer.darken_color(Renderer.COLOR_RGB_VALUES[i],100)),(4,2))
if player.is_dead():
board_image.blit(self.gui_images["out"],(15,34))
continue
# render items
x = 5
dy = 12
self.__render_info_board_item_row(x,20,5,GameMap.ITEM_BOMB,player,board_image)
self.__render_info_board_item_row(x,20 + dy,5,GameMap.ITEM_FLAME,player,board_image)
self.__render_info_board_item_row(x,20 + 2 * dy,9,GameMap.ITEM_SPEEDUP,player,board_image)
y = 20 + 3 * dy
items_to_check = [
GameMap.ITEM_SHOE,
GameMap.ITEM_BOXING_GLOVE,
GameMap.ITEM_THROWING_GLOVE,
GameMap.ITEM_SPRING,
GameMap.ITEM_MULTIBOMB,
GameMap.ITEM_DETONATOR,
GameMap.ITEM_DISEASE]
for item in items_to_check:
if player.get_item_count(item) or item == GameMap.ITEM_DISEASE and player.get_disease() != Player.DISEASE_NONE:
board_image.blit(self.icon_images[item],(x,y))
x += self.icon_images[item].get_size()[0] + 1
#----------------------------------------------------------------------------
def process_animation_events(self, animation_event_list):
for animation_event in animation_event_list:
self.animations[animation_event[0]].play(animation_event[1])
#----------------------------------------------------------------------------
## Renders text with outline, line breaks, formatting, etc.
def render_text(self, font, text_to_render, color, outline_color = (0,0,0), center = False):
text_lines = text_to_render.split("\n")
rendered_lines = []
width = height = 0
first_line = True
for text_line in text_lines:
line = text_line.lstrip().rstrip()
if len(line) == 0:
continue
line_without_format = re.sub(r"\^.......","",line) # remove all the markup in format ^#dddddd
new_rendered_line = pygame.Surface(font.size(line_without_format),flags=pygame.SRCALPHA)
x = 0
first = True
starts_with_format = line[0] == "^"
for subline in line.split("^"):
if len(subline) == 0:
continue
has_format = starts_with_format if first else True
first = False
text_color = color
if has_format:
text_color = pygame.Color(subline[:7])
subline = subline[7:]
new_rendered_subline = font.render(subline,True,outline_color) # create text with outline
new_rendered_subline.blit(new_rendered_subline,(0,2))
new_rendered_subline.blit(new_rendered_subline,(1,0))
new_rendered_subline.blit(new_rendered_subline,(-1,0))
new_rendered_subline.blit(font.render(subline,True,text_color),(0,1))
new_rendered_line.blit(new_rendered_subline,(x,0))
x += new_rendered_subline.get_size()[0]
rendered_lines.append(new_rendered_line)
if not first_line:
height += Renderer.MENU_LINE_SPACING
first_line = False
height += rendered_lines[-1].get_size()[1]
width = max(width,rendered_lines[-1].get_size()[0])
result = pygame.Surface((width,height),flags=pygame.SRCALPHA)
y_step = font.get_height() + Renderer.MENU_LINE_SPACING
for i in xrange(len(rendered_lines)):
result.blit(rendered_lines[i],(0 if not center else (width - rendered_lines[i].get_size()[0]) / 2,i * y_step))
return result
#----------------------------------------------------------------------------
## Updates images in self.menu_item_images (only if needed).
def update_menu_item_images(self, menu):
if self.menu_item_images == None:
self.menu_item_images = {} # format: (row, column) : (item text, image)
items = menu.get_items()
item_coordinates = []
for j in xrange(len(items)):
for i in xrange(len(items[j])):
item_coordinates.append((j,i))
if len(menu.get_text()) != 0:
item_coordinates.append(0) # this is the menu description text
for menu_coordinates in item_coordinates:
update_needed = False
if not (menu_coordinates in self.menu_item_images):
update_needed = True
if menu_coordinates == 0:
item_text = menu.get_text()
center_text = True
else:
item_text = items[menu_coordinates[0]][menu_coordinates[1]]
center_text = False
if not update_needed and item_text != self.menu_item_images[menu_coordinates][0]:
update_needed = True
if update_needed:
debug_log("updating menu item " + str(menu_coordinates))
new_image = self.render_text(self.font_normal,item_text,Renderer.MENU_FONT_COLOR,center = center_text)
# text itself
new_image.blit(new_image,(0,1))
self.menu_item_images[menu_coordinates] = (item_text,new_image)
#----------------------------------------------------------------------------
def render_menu(self, menu_to_render, game):
result = pygame.Surface(self.screen_resolution)
if self.menu_background_image == None:
self.menu_background_image = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"gui_menu_background.png"))
background_position = (self.screen_center[0] - self.menu_background_image.get_size()[0] / 2,self.screen_center[1] - self.menu_background_image.get_size()[1] / 2)
profiler.measure_start("menu rend. backg.")
result.blit(self.menu_background_image,background_position)
profiler.measure_stop("menu rend. backg.")
profiler.measure_start("menu rend. party")
if game.cheat_is_active(Game.CHEAT_PARTY):
for circle_info in self.party_circles: # draw circles
circle_coords = (self.screen_center[0] + circle_info[0][0],self.screen_center[1] + circle_info[0][1])
radius_coefficient = (math.sin(pygame.time.get_ticks() * circle_info[4] / 100.0 + circle_info[3]) + 1) / 2.0
circle_radius = int(circle_info[1] * radius_coefficient)
pygame.draw.circle(result,circle_info[2],circle_coords,circle_radius)
for player_info in self.party_players: # draw players
player_coords = (self.screen_center[0] + player_info[0][0],self.screen_center[1] + player_info[0][1])
player_direction = (int((pygame.time.get_ticks() + player_info[2]) / 150)) % 4
if not player_info[3]:
player_direction = 3 - player_direction
direction_string = ("up","right","down","left")[player_direction]
if int(pygame.time.get_ticks() / 500) % 2 == 0:
direction_string = "box " + direction_string
result.blit(self.player_images[player_info[1]][direction_string],player_coords)
for bomb_info in self.party_bombs:
result.blit(self.bomb_images[0],(bomb_info[0],bomb_info[1]))
bomb_info[0] += bomb_info[2]
bomb_info[1] += bomb_info[3]
if bomb_info[0] < 0: # border collision, change direction
bomb_info[2] = 1
elif bomb_info[0] > self.screen_resolution[0] - 50:
bomb_info[2] = -1
if bomb_info[1] < 0: # border collision, change direction
bomb_info[3] = 1
elif bomb_info[1] > self.screen_resolution[1] - 50:
bomb_info[3] = -1
profiler.measure_stop("menu rend. party")
version_position = (3,1)
result.blit(self.gui_images["version"],version_position)
profiler.measure_start("menu rend. item update")
self.update_menu_item_images(menu_to_render)
# render menu description text
y = self.screen_center[1] + Renderer.MENU_DESCRIPTION_Y_OFFSET
if len(menu_to_render.get_text()) != 0:
result.blit(self.menu_item_images[0][1],(self.screen_center[0] - self.menu_item_images[0][1].get_size()[0] / 2,y)) # menu description text image is at index 0
y += self.menu_item_images[0][1].get_size()[1] + Renderer.MENU_LINE_SPACING * 2
menu_items = menu_to_render.get_items()
columns = len(menu_items) # how many columns there are
column_x_space = 150
if columns % 2 == 0:
xs = [self.screen_center[0] + i * column_x_space - ((columns - 1) * column_x_space / 2) for i in xrange(columns)] # even number of columns
else:
xs = [self.screen_center[0] + (i - columns / 2) * column_x_space for i in xrange(columns)]
selected_coordinates = menu_to_render.get_selected_item()
items_y = y
profiler.measure_stop("menu rend. item update")
# render scrollbar if needed
rows = 0
for column in menu_items:
rows = max(rows,len(column))
if rows > Menu.MENU_MAX_ITEMS_VISIBLE:
x = xs[0] + Renderer.SCROLLBAR_RELATIVE_POSITION[0]
result.blit(self.gui_images["arrow up"],(x,items_y))
result.blit(self.gui_images["arrow down"],(x,items_y + Renderer.SCROLLBAR_HEIGHT))
scrollbar_position = int(items_y + selected_coordinates[0] / float(rows) * Renderer.SCROLLBAR_HEIGHT)
result.blit(self.gui_images["seeker"],(x,scrollbar_position))
mouse_coordinates = pygame.mouse.get_pos()
# render items
profiler.measure_start("menu rend. items")
for j in xrange(len(menu_items)):
y = items_y
for i in xrange(min(Menu.MENU_MAX_ITEMS_VISIBLE,len(menu_items[j]) - menu_to_render.get_scroll_position())):
item_image = self.menu_item_images[(j,i + menu_to_render.get_scroll_position())][1]
x = xs[j] - item_image.get_size()[0] / 2
if (i + menu_to_render.get_scroll_position(),j) == selected_coordinates:
# item is selected
scale = (8 + math.sin(pygame.time.get_ticks() / 40.0)) / 7.0 # make the pulsating effect
item_image = pygame.transform.scale(item_image,(int(scale * item_image.get_size()[0]),int(scale * item_image.get_size()[1])))
x = xs[j] - item_image.get_size()[0] / 2
pygame.draw.rect(result,(255,0,0),pygame.Rect(x - 4,y - 2,item_image.get_size()[0] + 8,item_image.get_size()[1] + 4))
result.blit(item_image,(x,y))
# did mouse go over the item?
if (not game.get_settings().control_by_mouse) and (self.previous_mouse_coordinates != mouse_coordinates) and (x <= mouse_coordinates[0] <= x + item_image.get_size()[0]) and (y <= mouse_coordinates[1] <= y + item_image.get_size()[1]):
item_coordinates = (i + menu_to_render.get_scroll_position(),j)
menu_to_render.mouse_went_over_item(item_coordinates)
y += Renderer.FONT_NORMAL_SIZE + Renderer.MENU_LINE_SPACING
profiler.measure_stop("menu rend. items")
mouse_events = game.get_player_key_maps().get_mouse_button_events()
for i in xrange(len(mouse_events)):
if mouse_events[i]:
menu_to_render.mouse_button_pressed(i)
self.previous_mouse_coordinates = mouse_coordinates
# render confirm dialog if prompting
if menu_to_render.get_state() == Menu.MENU_STATE_CONFIRM_PROMPT:
width = 120
height = 80
x = self.screen_center[0] - width / 2
y = self.screen_center[1] - height / 2
pygame.draw.rect(result,(0,0,0),pygame.Rect(x,y,width,height))
pygame.draw.rect(result,(255,255,255),pygame.Rect(x,y,width,height),1)
text_image = pygame.transform.rotate(self.gui_images["prompt"],math.sin(pygame.time.get_ticks() / 100) * 5)
x = self.screen_center[0] - text_image.get_size()[0] / 2
y = self.screen_center[1] - text_image.get_size()[1] / 2
result.blit(text_image,(x,y))
# map preview
profiler.measure_start("menu rend. preview")
if isinstance(menu_to_render,MapSelectMenu): # also not too nice
if menu_to_render.show_map_preview():
self.update_map_preview_image(menu_to_render.get_selected_map_name())
result.blit(self.preview_map_image,(self.screen_center[0] + 180,items_y))
profiler.measure_stop("menu rend. preview")
# draw cursor only if control by mouse is not allowed - wouldn't make sense
if not game.get_settings().control_by_mouse:
result.blit(self.gui_images["cursor"],pygame.mouse.get_pos())
return result
#----------------------------------------------------------------------------
def update_map_preview_image(self, map_filename):
if map_filename == "":
self.preview_map_name = ""
self.preview_map_image = None
return
if self.preview_map_name != map_filename:
debug_log("updating map preview of " + map_filename)
self.preview_map_name = map_filename
tile_size = 7
tile_half_size = tile_size / 2
map_info_border_size = 5
self.preview_map_image = pygame.Surface((tile_size * GameMap.MAP_WIDTH,tile_size * GameMap.MAP_HEIGHT + map_info_border_size + Renderer.MAP_TILE_HEIGHT))
with open(os.path.join(Game.MAP_PATH,map_filename)) as map_file:
map_data = map_file.read()
temp_map = GameMap(map_data,PlaySetup(),0,0)
for y in xrange(GameMap.MAP_HEIGHT):
for x in xrange(GameMap.MAP_WIDTH):
tile = temp_map.get_tile_at((x,y))
tile_kind = tile.kind
pos_x = x * tile_size
pos_y = y * tile_size
tile_special_object = tile.special_object
if tile_special_object == None:
if tile_kind == MapTile.TILE_BLOCK:
tile_color = (120,120,120)
elif tile_kind == MapTile.TILE_WALL:
tile_color = (60,60,60)
else: # floor
tile_color = (230,230,230)
else:
if tile_special_object == MapTile.SPECIAL_OBJECT_LAVA:
tile_color = (200,0,0)
elif tile_special_object == MapTile.SPECIAL_OBJECT_TELEPORT_A or tile_special_object == MapTile.SPECIAL_OBJECT_TELEPORT_B:
tile_color = (0,0,200)
elif tile_special_object == MapTile.SPECIAL_OBJECT_TRAMPOLINE:
tile_color = (0,200,0)
elif tile_kind == MapTile.TILE_FLOOR: # arrow
tile_color = (200,200,0)
else:
tile_color = (230,230,230)
pygame.draw.rect(self.preview_map_image,tile_color,pygame.Rect(pos_x,pos_y,tile_size,tile_size))
starting_positions = temp_map.get_starting_positions()
for player_index in xrange(len(starting_positions)):
draw_position = (int(starting_positions[player_index][0]) * tile_size + tile_half_size,int(starting_positions[player_index][1]) * tile_size + tile_half_size)
pygame.draw.rect(self.preview_map_image,tile_color,pygame.Rect(pos_x,pos_y,tile_size,tile_size))
pygame.draw.circle(self.preview_map_image,Renderer.COLOR_RGB_VALUES[player_index],draw_position,tile_half_size)
y = tile_size * GameMap.MAP_HEIGHT + map_info_border_size
column = 0
self.preview_map_image.blit(self.environment_images[temp_map.get_environment_name()][0],(0,y))
# draw starting item icons
starting_x = Renderer.MAP_TILE_WIDTH + 5
x = starting_x
pygame.draw.rect(self.preview_map_image,(255,255,255),pygame.Rect(x,y,Renderer.MAP_TILE_WIDTH,Renderer.MAP_TILE_HEIGHT))
starting_items = temp_map.get_starting_items()
for i in xrange(len(starting_items)):
item = starting_items[i]
if item in self.icon_images:
item_image = self.icon_images[item]
self.preview_map_image.blit(item_image,(x + 1,y + 1))
x += item_image.get_size()[0] + 1
column += 1
if column > 2:
column = 0
x = starting_x
y += 12
#----------------------------------------------------------------------------
def __prerender_map(self, map_to_render):
self.animation_events = [] # clear previous animation
debug_log("prerendering map...")
# following images are only needed here, so we dont store them to self
image_trampoline = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"other_trampoline.png"))
image_teleport = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"other_teleport.png"))
image_arrow_up = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"other_arrow_up.png"))
image_arrow_right = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"other_arrow_right.png"))
image_arrow_down = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"other_arrow_down.png"))
image_arrow_left = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"other_arrow_left.png"))
image_lava = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"other_lava.png"))
image_background = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"other_map_background.png"))
self.prerendered_map_background.blit(image_background,(0,0))
for j in xrange(GameMap.MAP_HEIGHT):
for i in xrange(GameMap.MAP_WIDTH):
render_position = (i * Renderer.MAP_TILE_WIDTH + Renderer.MAP_BORDER_WIDTH,j * Renderer.MAP_TILE_HEIGHT + + Renderer.MAP_BORDER_WIDTH)
self.prerendered_map_background.blit(self.environment_images[map_to_render.get_environment_name()][0],render_position)
tile = map_to_render.get_tile_at((i,j))
helper_mapping = {
MapTile.SPECIAL_OBJECT_TELEPORT_A: image_teleport,
MapTile.SPECIAL_OBJECT_TELEPORT_B: image_teleport,
MapTile.SPECIAL_OBJECT_TRAMPOLINE: image_trampoline,
MapTile.SPECIAL_OBJECT_ARROW_UP: image_arrow_up,
MapTile.SPECIAL_OBJECT_ARROW_RIGHT: image_arrow_right,
MapTile.SPECIAL_OBJECT_ARROW_DOWN: image_arrow_down,
MapTile.SPECIAL_OBJECT_ARROW_LEFT: image_arrow_left,
MapTile.SPECIAL_OBJECT_LAVA: image_lava
}
if tile.special_object in helper_mapping:
self.prerendered_map_background.blit(helper_mapping[tile.special_object],render_position)
game_info = map_to_render.get_game_number_info()
game_info_text = self.render_text(self.font_small,"game " + str(game_info[0]) + " of " + str(game_info[1]),(255,255,255))
self.prerendered_map_background.blit(game_info_text,((self.prerendered_map_background.get_size()[0] - game_info_text.get_size()[0]) / 2,self.prerendered_map_background.get_size()[1] - game_info_text.get_size()[1]))
self.prerendered_map = map_to_render
#----------------------------------------------------------------------------
##< Gets an info about how given player whould be rendered in format (image to render, sprite center, relative pixel offset, draw_shadow, overlay images).
def __get_player_render_info(self, player, game_map):
profiler.measure_start("map rend. player")
draw_shadow = True
relative_offset = [0,0]
overlay_images = []
if player.is_dead():
profiler.measure_stop("map rend. player")
return (None, (0,0), (0,0), False, [])
sprite_center = Renderer.PLAYER_SPRITE_CENTER
animation_frame = (player.get_state_time() / 100) % 4
color_index = player.get_number() if game_map.get_state() == GameMap.STATE_WAITING_TO_PLAY else player.get_team_number()
if player.is_in_air():
if player.get_state_time() < Player.JUMP_DURATION / 2:
quotient = abs(player.get_state_time() / float(Player.JUMP_DURATION / 2))
else:
quotient = 2.0 - abs(player.get_state_time() / float(Player.JUMP_DURATION / 2))
scale = (1 + 0.5 * quotient)
player_image = self.player_images[color_index]["down"]
image_to_render = pygame.transform.scale(player_image,(int(scale * player_image.get_size()[0]),int(scale * player_image.get_size()[1])))
draw_shadow = False
relative_offset[0] = -1 * (image_to_render.get_size()[0] / 2 - Renderer.PLAYER_SPRITE_CENTER[0]) # offset caused by scale
relative_offset[1] = -1 * int(math.sin(quotient * math.pi / 2.0) * Renderer.MAP_TILE_HEIGHT * GameMap.MAP_HEIGHT) # height offset
elif player.is_teleporting():
image_to_render = self.player_images[color_index][("up","right","down","left")[animation_frame]]
elif player.is_boxing() or player.is_throwing():
if not player.is_throwing() and animation_frame == 0:
helper_string = ""
else:
helper_string = "box "
helper_string += ("up","right","down","left")[player.get_direction_number()]
image_to_render = self.player_images[color_index][helper_string]
else:
helper_string = ("up","right","down","left")[player.get_direction_number()]
if player.is_walking():
image_to_render = self.player_images[color_index]["walk " + helper_string][animation_frame]
else:
image_to_render = self.player_images[color_index][helper_string]
if player.get_disease() != Player.DISEASE_NONE:
overlay_images.append(self.other_images["disease"][animation_frame % 2])
profiler.measure_stop("map rend. player")
return (image_to_render,sprite_center,relative_offset,draw_shadow,overlay_images)
#----------------------------------------------------------------------------
##< Same as __get_player_render_info, but for bombs.
def __get_bomb_render_info(self, bomb, game_map):
profiler.measure_start("map rend. bomb")
sprite_center = Renderer.BOMB_SPRITE_CENTER
animation_frame = (bomb.time_of_existence / 100) % 4
relative_offset = [0,0]
overlay_images = []
if bomb.has_detonator():
overlay_images.append(self.other_images["antena"])
if bomb.time_of_existence < Bomb.DETONATOR_EXPIRATION_TIME:
animation_frame = 0 # bomb won't pulse if within detonator expiration time
if bomb.movement == Bomb.BOMB_FLYING:
normalised_distance_travelled = bomb.flight_info.distance_travelled / float(bomb.flight_info.total_distance_to_travel)
helper_offset = -1 * bomb.flight_info.total_distance_to_travel + bomb.flight_info.distance_travelled
relative_offset = [
int(bomb.flight_info.direction[0] * helper_offset * Renderer.MAP_TILE_WIDTH),
int(bomb.flight_info.direction[1] * helper_offset * Renderer.MAP_TILE_HALF_HEIGHT)]
relative_offset[1] -= int(math.sin(normalised_distance_travelled * math.pi) * bomb.flight_info.total_distance_to_travel * Renderer.MAP_TILE_HEIGHT / 2) # height in air
image_to_render = self.bomb_images[animation_frame]
if bomb.has_spring:
overlay_images.append(self.other_images["spring"])
profiler.measure_stop("map rend. bomb")
return (image_to_render,sprite_center,relative_offset,True,overlay_images)
#----------------------------------------------------------------------------
def render_map(self, map_to_render):
result = pygame.Surface(self.screen_resolution)
self.menu_background_image = None # unload unneccessarry images
self.menu_item_images = None
self.preview_map_name = ""
self.preview_map_image = None
self.update_info_boards(map_to_render.get_players())
if map_to_render != self.prerendered_map: # first time rendering this map, prerender some stuff
self.__prerender_map(map_to_render)
profiler.measure_start("map rend. backg.")
result.blit(self.prerendered_map_background,self.map_render_location)
profiler.measure_stop("map rend. backg.")
# order the players and bombs by their y position so that they are drawn correctly
profiler.measure_start("map rend. sort")
ordered_objects_to_render = []
ordered_objects_to_render.extend(map_to_render.get_players())
ordered_objects_to_render.extend(map_to_render.get_bombs())
ordered_objects_to_render.sort(key = lambda what: 1000 if (isinstance(what,Bomb) and what.movement == Bomb.BOMB_FLYING) else what.get_position()[1]) # flying bombs are rendered above everything else
profiler.measure_stop("map rend. sort")
# render the map by lines:
tiles = map_to_render.get_tiles()
environment_images = self.environment_images[map_to_render.get_environment_name()]
y = Renderer.MAP_BORDER_WIDTH + self.map_render_location[1]
y_offset_block = Renderer.MAP_TILE_HEIGHT - environment_images[1].get_size()[1]
y_offset_wall = Renderer.MAP_TILE_HEIGHT - environment_images[2].get_size()[1]
line_number = 0
object_to_render_index = 0
flame_animation_frame = (pygame.time.get_ticks() / 100) % 2
for line in tiles:
x = (GameMap.MAP_WIDTH - 1) * Renderer.MAP_TILE_WIDTH + Renderer.MAP_BORDER_WIDTH + self.map_render_location[0]
while True: # render players and bombs in the current line
if object_to_render_index >= len(ordered_objects_to_render):
break
object_to_render = ordered_objects_to_render[object_to_render_index]
if object_to_render.get_position()[1] > line_number + 1:
break
if isinstance(object_to_render,Player):
image_to_render, sprite_center, relative_offset, draw_shadow, overlay_images = self.__get_player_render_info(object_to_render, map_to_render)
else: # bomb
image_to_render, sprite_center, relative_offset, draw_shadow, overlay_images = self.__get_bomb_render_info(object_to_render, map_to_render)
if image_to_render == None:
object_to_render_index += 1
continue
if draw_shadow:
render_position = self.tile_position_to_pixel_position(object_to_render.get_position(),Renderer.SHADOW_SPRITE_CENTER)
render_position = (
(render_position[0] + Renderer.MAP_BORDER_WIDTH + relative_offset[0]) % self.prerendered_map_background.get_size()[0] + self.map_render_location[0],
render_position[1] + Renderer.MAP_BORDER_WIDTH + self.map_render_location[1])
result.blit(self.other_images["shadow"],render_position)
render_position = self.tile_position_to_pixel_position(object_to_render.get_position(),sprite_center)
render_position = ((render_position[0] + Renderer.MAP_BORDER_WIDTH + relative_offset[0]) % self.prerendered_map_background.get_size()[0] + self.map_render_location[0],render_position[1] + Renderer.MAP_BORDER_WIDTH + relative_offset[1] + self.map_render_location[1])
result.blit(image_to_render,render_position)
for additional_image in overlay_images:
result.blit(additional_image,render_position)
object_to_render_index += 1
for tile in reversed(line): # render tiles in the current line
profiler.measure_start("map rend. tiles")
if not tile.to_be_destroyed: # don't render a tile that is being destroyed
if tile.kind == MapTile.TILE_BLOCK:
result.blit(environment_images[1],(x,y + y_offset_block))
elif tile.kind == MapTile.TILE_WALL:
result.blit(environment_images[2],(x,y + y_offset_wall))
elif tile.item != None:
result.blit(self.item_images[tile.item],(x,y))
if len(tile.flames) != 0: # if there is at least one flame, draw it
sprite_name = tile.flames[0].direction
result.blit(self.flame_images[flame_animation_frame][sprite_name],(x,y))
# for debug: uncomment this to see danger values on the map
# pygame.draw.rect(result,(int((1 - map_to_render.get_danger_value(tile.coordinates) / float(GameMap.SAFE_DANGER_VALUE)) * 255.0),0,0),pygame.Rect(x + 10,y + 10,30,30))
x -= Renderer.MAP_TILE_WIDTH
profiler.measure_stop("map rend. tiles")
x = (GameMap.MAP_WIDTH - 1) * Renderer.MAP_TILE_WIDTH + Renderer.MAP_BORDER_WIDTH + self.map_render_location[0]
y += Renderer.MAP_TILE_HEIGHT
line_number += 1
# update animations
profiler.measure_start("map rend. anim")
for animation_index in self.animations:
self.animations[animation_index].draw(result)
profiler.measure_stop("map rend. anim")
# draw info boards
profiler.measure_start("map rend. boards")
players_by_numbers = map_to_render.get_players_by_numbers()
x = self.map_render_location[0] + 12
y = self.map_render_location[1] + self.prerendered_map_background.get_size()[1] + 20
for i in players_by_numbers:
if players_by_numbers[i] == None or self.player_info_board_images[i] == None:
continue
if players_by_numbers[i].is_dead():
movement_offset = (0,0)
else:
movement_offset = (int(math.sin(pygame.time.get_ticks() / 64.0 + i) * 2),int(4 * math.sin(pygame.time.get_ticks() / 128.0 - i)))
result.blit(self.player_info_board_images[i],(x + movement_offset[0],y + movement_offset[1]))
x += self.gui_images["info board"].get_size()[0] - 2
profiler.measure_stop("map rend. boards")
profiler.measure_start("map rend. earthquake")
if map_to_render.earthquake_is_active(): # shaking effect
random_scale = random.uniform(0.99,1.01)
result = pygame.transform.rotate(result,random.uniform(-4,4))
profiler.measure_stop("map rend. earthquake")
if map_to_render.get_state() == GameMap.STATE_WAITING_TO_PLAY:
third = GameMap.START_GAME_AFTER / 3
countdown_image_index = max(3 - map_to_render.get_map_time() / third,1)
countdown_image = self.gui_images["countdown"][countdown_image_index]
countdown_position = (self.screen_center[0] - countdown_image.get_size()[0] / 2,self.screen_center[1] - countdown_image.get_size()[1] / 2)
result.blit(countdown_image,countdown_position)
return result
#==============================================================================
class AI(object):
REPEAT_ACTIONS = (100,300) ##< In order not to compute actions with every single call to
# play(), actions will be stored in self.outputs and repeated
# for next random(REPEAT_ACTIONS[0],REPEAT_ACTIONS[1]) ms - saves
# CPU time and prevents jerky AI movement.
#----------------------------------------------------------------------------
def __init__(self, player, game_map):
self.player = player
self.game_map = game_map
self.outputs = [] ##< holds currently active outputs
self.recompute_compute_actions_on = 0
self.do_nothing = False ##< this can turn AI off for debugging purposes
self.didnt_move_since = 0
#----------------------------------------------------------------------------
def tile_is_escapable(self, tile_coordinates):
if not self.game_map.tile_is_walkable(tile_coordinates) or self.game_map.tile_has_flame(tile_coordinates):
return False
tile = self.game_map.get_tile_at(tile_coordinates)
if tile.special_object == MapTile.SPECIAL_OBJECT_LAVA:
return False
return True
#----------------------------------------------------------------------------
## Returns a two-number tuple of x, y coordinates, where x and y are
# either -1, 0 or 1, indicating a rough general direction in which to
# move in order to prevent AI from walking in nonsensical direction (towards
# outside of the map etc.).
def decide_general_direction(self):
players = self.game_map.get_players()
enemy_players = filter(lambda p: p.is_enemy(self.player) and not p.is_dead(), players)
enemy_player = enemy_players[0] if len(enemy_players) > 0 else self.player
my_tile_position = self.player.get_tile_position()
another_player_tile_position = enemy_player.get_tile_position()
dx = another_player_tile_position[0] - my_tile_position[0]
dy = another_player_tile_position[1] - my_tile_position[1]
dx = min(max(-1,dx),1)
dy = min(max(-1,dy),1)
return (dx,dy)
#----------------------------------------------------------------------------
## Rates all 4 directions from a specified tile (up, right, down, left) with a number
# that says how many possible safe tiles are there accesible in that direction in
# case a bomb is present on the specified tile. A tuple of four integers is returned
# with numbers for each direction - the higher number, the better it is to run to
# safety in that direction. 0 means there is no escape and running in that direction
# means death.
def rate_bomb_escape_directions(self, tile_coordinates):
# up right down left
axis_directions = ((0,-1), (1,0), (0,1), (-1,0))
perpendicular_directions = ((1,0), (0,1), (1,0), (0,1))
result = [0,0,0,0]
for direction in (0,1,2,3):
for i in xrange(1,self.player.get_flame_length() + 2):
axis_tile = (tile_coordinates[0] + i * axis_directions[direction][0],tile_coordinates[1] + i * axis_directions[direction][1])
if not self.tile_is_escapable(axis_tile):
break
perpendicular_tile1 = (axis_tile[0] + perpendicular_directions[direction][0],axis_tile[1] + perpendicular_directions[direction][1])
perpendicular_tile2 = (axis_tile[0] - perpendicular_directions[direction][0],axis_tile[1] - perpendicular_directions[direction][1])
if i > self.player.get_flame_length() and self.game_map.get_danger_value(axis_tile) >= GameMap.SAFE_DANGER_VALUE:
result[direction] += 1
if self.tile_is_escapable(perpendicular_tile1) and self.game_map.get_danger_value(perpendicular_tile1) >= GameMap.SAFE_DANGER_VALUE:
result[direction] += 1
if self.tile_is_escapable(perpendicular_tile2) and self.game_map.get_danger_value(perpendicular_tile2) >= GameMap.SAFE_DANGER_VALUE:
result[direction] += 1
return tuple(result)
#----------------------------------------------------------------------------
## Returns an integer score in range 0 - 100 for given file (100 = good, 0 = bad).
def rate_tile(self, tile_coordinates):
danger = self.game_map.get_danger_value(tile_coordinates)
if danger == 0:
return 0
score = 0
if danger < 1000:
score = 20
elif danger < 2500:
score = 40
else:
score = 60
tile_item = self.game_map.get_tile_at(tile_coordinates).item
if tile_item != None:
if tile_item != GameMap.ITEM_DISEASE:
score += 20
else:
score -= 10
top = (tile_coordinates[0],tile_coordinates[1] - 1)
right = (tile_coordinates[0] + 1,tile_coordinates[1])
down = (tile_coordinates[0],tile_coordinates[1] + 1)
left = (tile_coordinates[0] - 1,tile_coordinates[1])
if self.game_map.tile_has_lava(top) or self.game_map.tile_has_lava(right) or self.game_map.tile_has_lava(down) or self.game_map.tile_has_lava(left):
score -= 5 # don't go near lava
if self.game_map.tile_has_bomb(tile_coordinates):
if not self.player.can_box():
score -= 5
return score
#----------------------------------------------------------------------------
def is_trapped(self):
neighbour_tiles = self.player.get_neighbour_tile_coordinates()
trapped = True
for tile_coordinates in neighbour_tiles:
if self.game_map.tile_is_walkable(tile_coordinates):
trapped = False
break
return trapped
#----------------------------------------------------------------------------
def number_of_blocks_next_to_tile(self, tile_coordinates):
count = 0
for tile_offset in ((0,-1),(1,0),(0,1),(-1,0)): # for each neigbour file
helper_tile = self.game_map.get_tile_at((tile_coordinates[0] + tile_offset[0],tile_coordinates[1] + tile_offset[1]))
if helper_tile != None and helper_tile.kind == MapTile.TILE_BLOCK:
count += 1
return count
#----------------------------------------------------------------------------
## Returns a tuple in format: (nearby_enemies, nearby allies).
def players_nearby(self):
current_position = self.player.get_tile_position()
allies = 0
enemies = 0
for player in self.game_map.get_players():
if player.is_dead() or player == self.player:
continue
player_position = player.get_tile_position()
if abs(current_position[0] - player_position[0]) <= 1 and abs(current_position[1] - player_position[1]) <= 1:
if player.is_enemy(self.player):
enemies += 1
else:
allies += 1
return (enemies,allies)
#----------------------------------------------------------------------------
## Decides what moves to make and returns a list of event in the same
# format as PlayerKeyMaps.get_current_actions().
def play(self):
if self.do_nothing or self.player.is_dead():
return []
current_time = self.game_map.get_map_time()
if current_time < self.recompute_compute_actions_on or self.player.get_state() == Player.STATE_IN_AIR or self.player.get_state() == Player.STATE_TELEPORTING:
return self.outputs # only repeat actions
# start decisions here:
# moevement decisions:
self.outputs = []
current_tile = self.player.get_tile_position()
trapped = self.is_trapped()
escape_direction_ratings = self.rate_bomb_escape_directions(current_tile)
# consider possible actions and find the one with biggest score:
if trapped:
# in case the player is trapped spin randomly and press box in hope to free itself
chosen_movement_action = random.choice((PlayerKeyMaps.ACTION_UP,PlayerKeyMaps.ACTION_RIGHT,PlayerKeyMaps.ACTION_DOWN,PlayerKeyMaps.ACTION_LEFT))
elif self.game_map.tile_has_bomb(current_tile):
# standing on a bomb, find a way to escape
# find maximum
best_rating = escape_direction_ratings[0]
best_action = PlayerKeyMaps.ACTION_UP
if escape_direction_ratings[1] > best_rating:
best_rating = escape_direction_ratings[1]
best_action = PlayerKeyMaps.ACTION_RIGHT
if escape_direction_ratings[2] > best_rating:
best_rating = escape_direction_ratings[2]
best_action = PlayerKeyMaps.ACTION_DOWN
if escape_direction_ratings[3] > best_rating:
best_rating = escape_direction_ratings[3]
best_action = PlayerKeyMaps.ACTION_LEFT
chosen_movement_action = best_action
else: # not standing on a bomb
# should I not move?
maximum_score = self.rate_tile(current_tile)
best_direction_actions = [None]
general_direction = self.decide_general_direction()
# up # right # down # left
tile_increment = ((0,-1), (1,0), (0,1), (-1,0))
action = (PlayerKeyMaps.ACTION_UP, PlayerKeyMaps.ACTION_RIGHT, PlayerKeyMaps.ACTION_DOWN, PlayerKeyMaps.ACTION_LEFT)
# should I move up, right, down or left?
for direction in (0,1,2,3):
score = self.rate_tile((current_tile[0] + tile_increment[direction][0],current_tile[1] + tile_increment[direction][1]))
# count in the general direction
extra_score = 0
if tile_increment[direction][0] == general_direction[0]:
extra_score += 2
if tile_increment[direction][1] == general_direction[1]:
extra_score += 2
score += extra_score
if score > maximum_score:
maximum_score = score
best_direction_actions = [action[direction]]
elif score == maximum_score:
best_direction_actions.append(action[direction])
chosen_movement_action = random.choice(best_direction_actions)
if chosen_movement_action != None:
if self.player.get_disease() == Player.DISEASE_REVERSE_CONTROLS:
chosen_movement_action = PlayerKeyMaps.get_opposite_action(chosen_movement_action)
self.outputs.append((self.player.get_number(),chosen_movement_action))
self.didnt_move_since = self.game_map.get_map_time()
if self.game_map.get_map_time() - self.didnt_move_since > 10000: # didn't move for 10 seconds or more => force move
chosen_movement_action = random.choice((PlayerKeyMaps.ACTION_UP,PlayerKeyMaps.ACTION_RIGHT,PlayerKeyMaps.ACTION_DOWN,PlayerKeyMaps.ACTION_LEFT))
self.outputs.append((self.player.get_number(),chosen_movement_action))
# bomb decisions
bomb_laid = False
if self.game_map.tile_has_bomb(current_tile):
# should I throw?
if self.player.can_throw() and max(escape_direction_ratings) == 0:
self.outputs.append((self.player.get_number(),PlayerKeyMaps.ACTION_BOMB_DOUBLE))
elif self.player.get_bombs_left() > 0 and (self.player.can_throw() or self.game_map.get_danger_value(current_tile) > 2000 and max(escape_direction_ratings) > 0):
# should I lay bomb?
chance_to_put_bomb = 100 # one in how many
players_near = self.players_nearby()
if players_near[0] > 0 and players_near[1] == 0: # enemy nearby and no ally nearby
chance_to_put_bomb = 5
else:
block_tile_ratio = self.game_map.get_number_of_block_tiles() / float(GameMap.MAP_WIDTH * GameMap.MAP_HEIGHT)
if block_tile_ratio < 0.4: # if there is not many tiles left, put bombs more often
chance_to_put_bomb = 80
elif block_tile_ratio < 0.2:
chance_to_put_bomb = 20
number_of_block_neighbours = self.number_of_blocks_next_to_tile(current_tile)
if number_of_block_neighbours == 1:
chance_to_put_bomb = 3
elif number_of_block_neighbours == 2 or number_of_block_neighbours == 3:
chance_to_put_bomb = 2
do_lay_bomb = random.randint(0,chance_to_put_bomb) == 0
if do_lay_bomb:
bomb_laid = True
if random.randint(0,2) == 0 and self.should_lay_multibomb(chosen_movement_action): # lay a single bomb or multibomb?
self.outputs.append((self.player.get_number(),PlayerKeyMaps.ACTION_BOMB_DOUBLE))
else:
self.outputs.append((self.player.get_number(),PlayerKeyMaps.ACTION_BOMB))
# should I box?
if self.player.can_box() and not self.player.detonator_is_active():
if trapped or self.game_map.tile_has_bomb(self.player.get_forward_tile_position()):
self.outputs.append((self.player.get_number(),PlayerKeyMaps.ACTION_SPECIAL))
if bomb_laid: # if bomb was laid, the outputs must be recomputed fast in order to prevent laying bombs to other tiles
self.recompute_compute_actions_on = current_time + 10
else:
self.recompute_compute_actions_on = current_time + random.randint(AI.REPEAT_ACTIONS[0],AI.REPEAT_ACTIONS[1])
# should I detonate the detonator?
if self.player.detonator_is_active():
if random.randint(0,2) == 0 and self.game_map.get_danger_value(current_tile) >= GameMap.SAFE_DANGER_VALUE:
self.outputs.append((self.player.get_number(),PlayerKeyMaps.ACTION_SPECIAL))
return self.outputs
#----------------------------------------------------------------------------
def should_lay_multibomb(self, movement_action):
if self.player.can_throw(): # multibomb not possible with throwing glove
return False
multibomb_count = self.player.get_multibomb_count()
if multibomb_count > 1: # multibomb possible
current_tile = self.player.get_tile_position()
player_direction = movement_action if movement_action != None else self.player.get_direction_number()
# by laying multibomb one of the escape routes will be cut off, let's check
# if there would be any escape routes left
escape_direction_ratings = list(self.rate_bomb_escape_directions(current_tile))
escape_direction_ratings[player_direction] = 0
if max(escape_direction_ratings) == 0:
return False
direction_vector = self.player.get_direction_vector()
multibomb_safe = True
for i in xrange(multibomb_count):
if not self.game_map.tile_is_walkable(current_tile) or not self.game_map.tile_is_withing_map(current_tile):
break
if self.game_map.get_danger_value(current_tile) < 3000 or self.game_map.tile_has_lava(current_tile):
multibomb_safe = False
break
current_tile = (current_tile[0] + direction_vector[0],current_tile[1] + direction_vector[1])
if multibomb_safe:
return True
return False
#==============================================================================
class Settings(StringSerializable):
POSSIBLE_SCREEN_RESOLUTIONS = (
(960,720),
(1024,768),
(1280,720),
(1280,1024),
(1366,768),
(1680,1050),
(1920,1080)
)
SOUND_VOLUME_THRESHOLD = 0.01
CONTROL_MAPPING_DELIMITER = "CONTROL MAPPING"
#----------------------------------------------------------------------------
def __init__(self, player_key_maps):
self.player_key_maps = player_key_maps
self.reset()
#----------------------------------------------------------------------------
def reset(self):
self.sound_volume = 0.7
self.music_volume = 0.2
self.screen_resolution = Settings.POSSIBLE_SCREEN_RESOLUTIONS[0]
self.fullscreen = False
self.control_by_mouse = False
self.player_key_maps.reset()
#----------------------------------------------------------------------------
def save_to_string(self):
result = ""
result += "sound volume: " + str(self.sound_volume) + "\n"
result += "music volume: " + str(self.music_volume) + "\n"
result += "screen resolution: " + str(self.screen_resolution[0]) + "x" + str(self.screen_resolution[1]) + "\n"
result += "fullscreen: " + str(self.fullscreen) + "\n"
result += "control by mouse: " + str(self.control_by_mouse) + "\n"
result += Settings.CONTROL_MAPPING_DELIMITER + "\n"
result += self.player_key_maps.save_to_string() + "\n"
result += Settings.CONTROL_MAPPING_DELIMITER + "\n"
return result
#----------------------------------------------------------------------------
def load_from_string(self, input_string):
self.reset()
helper_position = input_string.find(Settings.CONTROL_MAPPING_DELIMITER)
if helper_position >= 0:
helper_position1 = helper_position + len(Settings.CONTROL_MAPPING_DELIMITER)
helper_position2 = input_string.find(Settings.CONTROL_MAPPING_DELIMITER,helper_position1)
debug_log("loading control mapping")
settings_string = input_string[helper_position1:helper_position2].lstrip().rstrip()
self.player_key_maps.load_from_string(settings_string)
input_string = input_string[:helper_position] + input_string[helper_position2 + len(Settings.CONTROL_MAPPING_DELIMITER):]
lines = input_string.split("\n")
for line in lines:
helper_position = line.find(":")
if helper_position < 0:
continue
key_string = line[:helper_position]
value_string = line[helper_position + 1:].lstrip().rstrip()
if key_string == "sound volume":
self.sound_volume = float(value_string)
elif key_string == "music volume":
self.music_volume = float(value_string)
elif key_string == "screen resolution":
helper_tuple = value_string.split("x")
self.screen_resolution = (int(helper_tuple[0]),int(helper_tuple[1]))
elif key_string == "fullscreen":
self.fullscreen = True if value_string == "True" else False
elif key_string == "control by mouse":
self.control_by_mouse = True if value_string == "True" else False
#----------------------------------------------------------------------------
def sound_is_on(self):
return self.sound_volume > Settings.SOUND_VOLUME_THRESHOLD
#----------------------------------------------------------------------------
def music_is_on(self):
return self.music_volume > Settings.SOUND_VOLUME_THRESHOLD
#----------------------------------------------------------------------------
def current_resolution_index(self):
return next((i for i in xrange(len(Settings.POSSIBLE_SCREEN_RESOLUTIONS)) if self.screen_resolution == Settings.POSSIBLE_SCREEN_RESOLUTIONS[i]),0)
#==============================================================================
class Game(object):
# colors used for players and teams
COLOR_WHITE = 0
COLOR_BLACK = 1
COLOR_RED = 2
COLOR_BLUE = 3
COLOR_GREEN = 4
COLOR_CYAN = 5
COLOR_YELLOW = 6
COLOR_ORANGE = 7
COLOR_BROWN = 8
COLOR_PURPLE = 9
COLOR_NAMES = [
"white",
"black",
"red",
"blue",
"green",
"cyan",
"yellow",
"orange",
"brown",
"purple"
]
STATE_PLAYING = 0
STATE_EXIT = 1
STATE_MENU_MAIN = 2
STATE_MENU_SETTINGS = 3
STATE_MENU_ABOUT = 4
STATE_MENU_PLAY_SETUP = 5
STATE_MENU_MAP_SELECT = 6
STATE_MENU_CONTROL_SETTINGS = 7
STATE_MENU_PLAY = 8
STATE_MENU_RESULTS = 9
STATE_GAME_STARTED = 10
CHEAT_PARTY = 0
CHEAT_ALL_ITEMS = 1
CHEAT_PLAYER_IMMORTAL = 2
VERSION_STR = "0.95"
NUMBER_OF_CONTROLLED_PLAYERS = 4 ##< maximum number of non-AI players on one PC
RESOURCE_PATH = "resources"
MAP_PATH = "maps"
SETTINGS_FILE_PATH = "settings.txt"
#----------------------------------------------------------------------------
def __init__(self):
pygame.mixer.pre_init(22050,-16,2,512) # set smaller audio buffer size to prevent audio lag
pygame.init()
pygame.font.init()
pygame.mixer.init()
self.frame_number = 0
self.player_key_maps = PlayerKeyMaps()
self.settings = Settings(self.player_key_maps)
self.game_number = 0
if os.path.isfile(Game.SETTINGS_FILE_PATH):
debug_log("loading settings from file " + Game.SETTINGS_FILE_PATH)
self.settings.load_from_file(Game.SETTINGS_FILE_PATH)
self.settings.save_to_file(Game.SETTINGS_FILE_PATH) # save the reformatted settings file (or create a new one)
pygame.display.set_caption("Bombman")
self.renderer = Renderer()
self.apply_screen_settings()
self.sound_player = SoundPlayer()
self.sound_player.change_music()
self.apply_sound_settings()
self.apply_other_settings()
self.map_name = ""
self.random_map_selection = False
self.game_map = None
self.play_setup = PlaySetup()
self.menu_main = MainMenu(self.sound_player)
self.menu_settings = SettingsMenu(self.sound_player,self.settings,self)
self.menu_about = AboutMenu(self.sound_player)
self.menu_play_setup = PlaySetupMenu(self.sound_player,self.play_setup)
self.menu_map_select = MapSelectMenu(self.sound_player)
self.menu_play = PlayMenu(self.sound_player)
self.menu_controls = ControlsMenu(self.sound_player,self.player_key_maps,self)
self.menu_results = ResultMenu(self.sound_player)
self.ais = []
self.state = Game.STATE_MENU_MAIN
self.immortal_players_numbers = []
self.active_cheats = set()
#----------------------------------------------------------------------------
def deactivate_all_cheats(self):
self.active_cheats = set()
debug_log("all cheats deactivated")
#----------------------------------------------------------------------------
def activate_cheat(self, what_cheat):
self.active_cheats.add(what_cheat)
debug_log("cheat activated")
#----------------------------------------------------------------------------
def deactivate_cheat(self, what_cheat):
if what_cheat in self.active_cheats:
self.active_cheats.remove(what_cheat)
#----------------------------------------------------------------------------
def cheat_is_active(self, what_cheat):
return what_cheat in self.active_cheats
#----------------------------------------------------------------------------
def get_player_key_maps(self):
return self.player_key_maps
#----------------------------------------------------------------------------
def get_settings(self):
return self.settings
#----------------------------------------------------------------------------
def apply_screen_settings(self):
display_flags = 0
if self.settings.fullscreen:
display_flags += pygame.FULLSCREEN
self.screen = pygame.display.set_mode(self.settings.screen_resolution,display_flags)
screen_center = (Renderer.get_screen_size()[0] / 2,Renderer.get_screen_size()[1] / 2)
pygame.mouse.set_pos(screen_center)
self.renderer.update_screen_info()
#----------------------------------------------------------------------------
def apply_sound_settings(self):
self.sound_player.set_music_volume(self.settings.music_volume)
self.sound_player.set_sound_volume(self.settings.sound_volume)
#----------------------------------------------------------------------------
def apply_other_settings(self):
self.player_key_maps.allow_control_by_mouse(self.settings.control_by_mouse)
#----------------------------------------------------------------------------
def save_settings(self):
self.settings.save_to_file(Game.SETTINGS_FILE_PATH)
#----------------------------------------------------------------------------
def __check_cheat(self, cheat_string, cheat = None):
if self.player_key_maps.string_was_typed(cheat_string):
if cheat != None:
self.activate_cheat(cheat)
else:
self.deactivate_all_cheats()
self.player_key_maps.clear_typing_buffer()
#----------------------------------------------------------------------------
## Manages the menu actions and sets self.active_menu.
def manage_menus(self):
new_state = self.state
prevent_input_processing = False
# cheack if any cheat was typed:
self.__check_cheat("party",game.CHEAT_PARTY)
self.__check_cheat("herecomedatboi",game.CHEAT_ALL_ITEMS)
self.__check_cheat("leeeroy",game.CHEAT_PLAYER_IMMORTAL)
self.__check_cheat("revert")
self.player_key_maps.get_current_actions() # this has to be called in order for player_key_maps to update mouse controls properly
# ================ MAIN MENU =================
if self.state == Game.STATE_MENU_MAIN:
self.active_menu = self.menu_main
if self.active_menu.get_state() == Menu.MENU_STATE_CONFIRM:
new_state = [
Game.STATE_MENU_PLAY_SETUP,
Game.STATE_MENU_SETTINGS,
Game.STATE_MENU_ABOUT,
Game.STATE_EXIT
] [self.active_menu.get_selected_item()[0]]
# ================ PLAY MENU =================
elif self.state == Game.STATE_MENU_PLAY:
self.active_menu = self.menu_play
if self.active_menu.get_state() == Menu.MENU_STATE_CANCEL:
new_state = Game.STATE_PLAYING
elif self.active_menu.get_state() == Menu.MENU_STATE_CONFIRM:
if self.active_menu.get_selected_item() == (0,0):
new_state = Game.STATE_PLAYING
for player in self.game_map.get_players():
player.wait_for_bomb_action_release()
elif self.active_menu.get_selected_item() == (1,0):
new_state = Game.STATE_MENU_MAIN
self.sound_player.change_music()
self.deactivate_all_cheats()
# ============== SETTINGS MENU ===============
elif self.state == Game.STATE_MENU_SETTINGS:
self.active_menu = self.menu_settings
if self.active_menu.get_state() == Menu.MENU_STATE_CANCEL:
new_state = Game.STATE_MENU_MAIN
elif self.active_menu.get_state() == Menu.MENU_STATE_CONFIRM:
if self.active_menu.get_selected_item() == (5,0):
new_state = Game.STATE_MENU_CONTROL_SETTINGS
elif self.active_menu.get_selected_item() == (7,0):
new_state = Game.STATE_MENU_MAIN
# ========== CONTROL SETTINGS MENU ===========
elif self.state == Game.STATE_MENU_CONTROL_SETTINGS:
self.active_menu = self.menu_controls
self.active_menu.update(self.player_key_maps) # needs to be called to scan for pressed keys
if self.active_menu.get_state() == Menu.MENU_STATE_CANCEL:
new_state = Game.STATE_MENU_SETTINGS
elif self.active_menu.get_state() == Menu.MENU_STATE_CONFIRM:
if self.active_menu.get_selected_item() == (0,0):
new_state = Game.STATE_MENU_SETTINGS
# ================ ABOUT MENU =================
elif self.state == Game.STATE_MENU_ABOUT:
self.active_menu = self.menu_about
if self.active_menu.get_state() in (Menu.MENU_STATE_CONFIRM,Menu.MENU_STATE_CANCEL):
new_state = Game.STATE_MENU_MAIN
# ============== PLAY SETUP MENU ==============
elif self.state == Game.STATE_MENU_PLAY_SETUP:
self.active_menu = self.menu_play_setup
if self.active_menu.get_state() == Menu.MENU_STATE_CANCEL:
new_state = Game.STATE_MENU_MAIN
elif self.active_menu.get_state() == Menu.MENU_STATE_CONFIRM:
if self.active_menu.get_selected_item() == (0,1):
new_state = Game.STATE_MENU_MAP_SELECT
elif self.active_menu.get_selected_item() == (0,0):
new_state = Game.STATE_MENU_MAIN
# ============== MAP SELECT MENU ==============
elif self.state == Game.STATE_MENU_MAP_SELECT:
self.active_menu = self.menu_map_select
if self.active_menu.get_state() == Menu.MENU_STATE_CANCEL:
new_state = Game.STATE_MENU_PLAY_SETUP
elif self.active_menu.get_state() == Menu.MENU_STATE_CONFIRM:
self.map_name = self.active_menu.get_selected_map_name()
self.random_map_selection = self.active_menu.random_was_selected()
self.game_number = 1 # first game
new_state = Game.STATE_GAME_STARTED
self.deactivate_cheat(Game.CHEAT_PARTY)
# ================ RESULT MENU ================
elif self.state == Game.STATE_MENU_RESULTS:
self.active_menu = self.menu_results
if self.active_menu.get_state() in (Menu.MENU_STATE_CONFIRM,Menu.MENU_STATE_CANCEL):
new_state = Game.STATE_MENU_MAIN
if new_state != self.state: # going to new state
self.state = new_state
self.active_menu.leaving()
self.active_menu.process_inputs(self.player_key_maps.get_current_actions())
#----------------------------------------------------------------------------
def acknowledge_wins(self, winner_team_number, players):
for player in players:
if player.get_team_number() == winner_team_number:
player.set_wins(player.get_wins() + 1)
#----------------------------------------------------------------------------
def run(self):
time_before = pygame.time.get_ticks()
show_fps_in = 0
pygame_clock = pygame.time.Clock()
while True: # main loop
profiler.measure_start("main loop")
dt = min(pygame.time.get_ticks() - time_before,100)
time_before = pygame.time.get_ticks()
pygame_events = []
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.state = Game.STATE_EXIT
pygame_events.append(event)
self.player_key_maps.process_pygame_events(pygame_events,self.frame_number)
if self.state == Game.STATE_PLAYING:
self.renderer.process_animation_events(self.game_map.get_and_clear_animation_events()) # play animations
self.sound_player.process_events(self.game_map.get_and_clear_sound_events()) # play sounds
profiler.measure_start("map rend.")
self.screen.blit(self.renderer.render_map(self.game_map),(0,0))
profiler.measure_stop("map rend.")
profiler.measure_start("sim.")
self.simulation_step(dt)
profiler.measure_stop("sim.")
if self.game_map.get_state() == GameMap.STATE_GAME_OVER:
self.game_number += 1
if self.game_number > self.play_setup.get_number_of_games():
previous_winner = self.game_map.get_winner_team()
self.acknowledge_wins(previous_winner,self.game_map.get_players())
self.menu_results.set_results(self.game_map.get_players())
self.game_map = None
self.state = Game.STATE_MENU_RESULTS # show final results
self.deactivate_all_cheats()
else:
self.state = Game.STATE_GAME_STARTED # new game
elif self.state == Game.STATE_GAME_STARTED:
debug_log("starting game " + str(self.game_number))
previous_winner = -1
if self.game_number != 1:
previous_winner = self.game_map.get_winner_team()
kill_counts = [0 for i in xrange(10)]
win_counts = [0 for i in xrange(10)]
if self.game_map != None:
for player in self.game_map.get_players():
kill_counts[player.get_number()] = player.get_kills()
win_counts[player.get_number()] = player.get_wins()
map_name_to_load = self.map_name if not self.random_map_selection else self.menu_map_select.get_random_map_name()
with open(os.path.join(Game.MAP_PATH,map_name_to_load)) as map_file:
map_data = map_file.read()
self.game_map = GameMap(map_data,self.play_setup,self.game_number,self.play_setup.get_number_of_games(),self.cheat_is_active(Game.CHEAT_ALL_ITEMS))
player_slots = self.play_setup.get_slots()
if self.cheat_is_active(Game.CHEAT_PLAYER_IMMORTAL):
self.immortal_players_numbers = []
for i in xrange(len(player_slots)):
if player_slots[i] != None and player_slots[i][0] >= 0: # cheat: if not AI
self.immortal_players_numbers.append(i) # make the player immortal
self.ais = []
for i in xrange(len(player_slots)):
if player_slots[i] != None and player_slots[i][0] < 0: # indicates AI
self.ais.append(AI(self.game_map.get_players_by_numbers()[i],self.game_map))
for player in self.game_map.get_players():
player.set_kills(kill_counts[player.get_number()])
player.set_wins(win_counts[player.get_number()])
self.acknowledge_wins(previous_winner,self.game_map.get_players()) # add win counts
self.sound_player.change_music()
self.state = Game.STATE_PLAYING
elif self.state == Game.STATE_EXIT:
break
else: # in menu
self.manage_menus()
profiler.measure_start("menu rend.")
self.screen.blit(self.renderer.render_menu(self.active_menu,self),(0,0))
profiler.measure_stop("menu rend.")
pygame.display.flip()
pygame_clock.tick()
if show_fps_in <= 0:
if DEBUG_FPS:
debug_log("fps: " + str(pygame_clock.get_fps()))
show_fps_in = 255
else:
show_fps_in -= 1
self.frame_number += 1
profiler.measure_stop("main loop")
if DEBUG_PROFILING:
debug_log(profiler.get_profile_string())
profiler.end_of_frame()
#----------------------------------------------------------------------------
## Filters a list of performed actions so that there are no actions of
# human players that are not participating in the game.
def filter_out_disallowed_actions(self, actions):
player_slots = self.play_setup.get_slots()
result = filter(lambda a: (player_slots[a[0]] != None and player_slots[a[0]] >=0) or (a[1] == PlayerKeyMaps.ACTION_MENU), actions)
return result
#----------------------------------------------------------------------------
def simulation_step(self, dt):
actions_being_performed = self.filter_out_disallowed_actions(self.player_key_maps.get_current_actions())
for action in actions_being_performed:
if action[0] == -1: # menu key pressed
self.state = Game.STATE_MENU_PLAY
return
profiler.measure_start("sim. AIs")
for i in xrange(len(self.ais)):
actions_being_performed = actions_being_performed + self.ais[i].play()
profiler.measure_stop("sim. AIs")
players = self.game_map.get_players()
profiler.measure_start("sim. inputs")
for player in players:
player.react_to_inputs(actions_being_performed,dt,self.game_map)
profiler.measure_stop("sim. inputs")
profiler.measure_start("sim. map update")
self.game_map.update(dt,self.immortal_players_numbers)
profiler.measure_stop("sim. map update")
#----------------------------------------------------------------------------
## Sets up a test game for debugging, so that the menus can be avoided.
def setup_test_game(self, setup_number = 0):
if setup_number == 0:
self.map_name = "classic"
self.random_map_selection = False
self.game_number = 1
self.state = Game.STATE_GAME_STARTED
elif setup_number == 1:
self.play_setup.player_slots = [(-1,i) for i in xrange(10)]
self.random_map_selection = True
self.game_number = 1
self.state = Game.STATE_GAME_STARTED
else:
self.play_setup.player_slots = [((i,i) if i < 4 else None) for i in xrange(10)]
self.map_name = "classic"
self.game_number = 1
self.state = Game.STATE_GAME_STARTED
#==============================================================================
if __name__ == "__main__":
profiler = Profiler() # profiler object is global, for simple access
game = Game()
if len(sys.argv) > 1:
if "--test" in sys.argv: # allows to quickly init a game
game.setup_test_game(0)
elif "--test2" in sys.argv:
game.setup_test_game(1)
elif "--test3" in sys.argv:
game.setup_test_game(2)
game.run()
```
#### File: jemisonf/bombman-cs-362/menu_test.py
```python
from bombman import Menu
from stub_sound_player import StubSoundPlayer
import unittest
class MenuTestCase(unittest.TestCase):
def setUp(self):
self.sound_player = StubSoundPlayer()
self.menu = Menu(self.sound_player)
self.menu.items = [("one", "two")]
def testLeaving(self):
""" Test if the leaving function sets the menu_left flag """
menu = self.menu
menu.leaving()
assert menu.menu_left
def testPromptIfNeeded(self):
""" Test if the user is prompted when prompt if needed is called """
menu = self.menu
menu.state = Menu.MENU_STATE_CONFIRM
menu.confirm_prompt_result = None
self.assertEqual(menu.get_selected_item(), (0,0))
menu.prompt_if_needed((0,0))
self.assertEqual(menu.get_state(), Menu.MENU_STATE_CONFIRM_PROMPT)
def testPromptIfNeededWithConditionsNotMet(self):
""" Test if the user is not prompted when the right conditions are not met """
menu = self.menu
menu.state = Menu.MENU_STATE_CONFIRM
menu.confirm_prompt_result = True
self.assertEqual(menu.get_selected_item(), (0,0))
menu.prompt_if_needed((0,0))
self.assertNotEqual(menu.get_state(), Menu.MENU_STATE_CONFIRM_PROMPT)
# TODO this appears to be an actual bug
def testScrollDown(self):
""" Test scrolling down from initial state """
menu = self.menu
menu.scroll(False) # scroll down
scroll_pos_down = menu.get_scroll_position()
self.assertEqual(scroll_pos_down, 1)
def testScrollUp(self):
""" Test scrolling up from modified state """
menu = self.menu
menu.scroll_position = 1
menu.scroll(True)
self.assertEqual(menu.get_scroll_position(), 0)
if __name__ == "__main__":
unittest.main()
```
#### File: jemisonf/bombman-cs-362/player_test.py
```python
from playerClass import Player, Positionable
import unittest
print "Running Player class tests"
class PlayerTestCase(unittest.TestCase):
def setUp(self):
testPlayer = Player()
def testNumber(self):
""" tests to see if a player's number is correctly updated """
numTestP = Player()
numTestP.number = 0
assert(numTestP.number == 0), "player number was not correctly set!"
def testKillCount(self):
""" tests to see if set_kills and get_kills is correctly updated """
killTestP = Player()
killTestP.set_kills(3)
assert(killTestP.get_kills() == 3), "player kills not successfully updated"
def testWinCount(self):
""" tests to see if set_wins and get_wins is correctly updated """
winTestP = Player()
winTestP.set_wins(7)
assert(winTestP.get_wins() == 7), "Player wins not successfully updated"
def testWalk(self):
""" tests to see if walking is correctly updated """
walker = Player()
walker.state = 4 # walking upwards
assert(walker.is_walking() == True)
walker.state = 5 # walking right
assert(walker.is_walking() == True)
walker.state = 6 # walking downwards
assert(walker.is_walking() == True)
walker.state = 7 # walking left
assert(walker.is_walking() == True)
# what if we are idle
walker.state = 2 # idle down state
assert(walker.is_walking() == False)
def testEnemyDetection(self):
""" tests to see if the function is_enemy correctly checks this """
guineaPig = Player()
testCaseP = Player()
guineaPig.team_number = 1
testCaseP.team_number = 2
assert(guineaPig.is_enemy(testCaseP) == True)
guineaPig.team_number = 2
assert(guineaPig.is_enemy(testCaseP) != True)
def testDeath(self):
""" tests to see if death is properly reported by is_dead """
redshirt = Player()
redshirt.state = 10 # the state for the player being
assert(redshirt.is_dead() == True)
```
#### File: jemisonf/bombman-cs-362/stub_sound_player.py
```python
class StubSoundPlayer:
def __init__(self):
pass
def play_once(self, filename):
pass
def set_music_volume(self, new_volume):
pass
def change_music(self):
pass
def play_sound_event(self, sound_event):
pass
def process_events(self, sound_event_list):
pass
```
|
{
"source": "jemisonf/ingredient-phrase-tagger",
"score": 2
}
|
#### File: jemisonf/ingredient-phrase-tagger/server.py
```python
import json
import os
from ingredient_phrase_tagger.training import parse_ingredients
from flask import Flask, request
model_path = os.getenv("MODEL_PATH", "")
app = Flask(__name__)
@app.route("/ingredients", methods=["POST"])
def ingredients():
if request.method == "POST":
body = request.get_json()
crf_output = parse_ingredients._exec_crf_test(body["data"], model_path)
results = json.loads(parse_ingredients._convert_crf_output_to_json(crf_output.rstrip().split("\n")))
return {"data": results}
```
|
{
"source": "JEMIX-LTD/label-studio",
"score": 2
}
|
#### File: tests/data_manager/test_api_actions.py
```python
import pytest
import json
from ..utils import make_task, make_annotation, make_prediction, project_id
from projects.models import Project
@pytest.mark.parametrize(
"tasks_count, annotations_count, predictions_count",
[
[10, 2, 2],
],
)
@pytest.mark.django_db
def test_action_delete_all_tasks(tasks_count, annotations_count, predictions_count, business_client, project_id):
# create
payload = dict(project=project_id, data={"test": 1})
response = business_client.post(
"/api/dm/views/",
data=json.dumps(payload),
content_type="application/json",
)
assert response.status_code == 201, response.content
view_id = response.json()["id"]
project = Project.objects.get(pk=project_id)
for _ in range(0, tasks_count):
task_id = make_task({"data": {}}, project).id
print('TASK_ID: %s' % task_id)
for _ in range(0, annotations_count):
print('COMPLETION')
make_annotation({"result": []}, task_id)
for _ in range(0, predictions_count):
make_prediction({"result": []}, task_id)
business_client.post(f"/api/dm/actions?project={project_id}&id=delete_tasks",
json={'selectedItems': {"all": True, "excluded": []}})
assert project.tasks.count() == 0
@pytest.mark.parametrize(
"tasks_count, annotations_count, predictions_count",
[
[10, 2, 2],
],
)
@pytest.mark.django_db
def test_action_delete_all_annotations(tasks_count, annotations_count, predictions_count, business_client, project_id):
# create
payload = dict(project=project_id, data={"test": 1})
response = business_client.post(
"/api/dm/views/",
data=json.dumps(payload),
content_type="application/json",
)
assert response.status_code == 201, response.content
view_id = response.json()["id"]
project = Project.objects.get(pk=project_id)
for _ in range(0, tasks_count):
task_id = make_task({"data": {}}, project).id
print('TASK_ID: %s' % task_id)
for _ in range(0, annotations_count):
print('COMPLETION')
make_annotation({"result": []}, task_id)
for _ in range(0, predictions_count):
make_prediction({"result": []}, task_id)
# get next task - should be 0
status = business_client.post(f"/api/dm/actions?project={project_id}&id=next_task",
json={'selectedItems': {"all": True, "excluded": []}})
assert status.status_code == 404
business_client.post(f"/api/dm/actions?project={project_id}&id=delete_tasks_annotations",
json={'selectedItems': {"all": True, "excluded": []}})
# get next task - should be 1
status = business_client.post(f"/api/dm/actions?project={project_id}&id=next_task",
json={'selectedItems': {"all": True, "excluded": []}})
assert status.status_code == 200
```
|
{
"source": "jemjemejeremy/ha-lta",
"score": 3
}
|
#### File: custom_components/ha-lta/sensor.py
```python
from datetime import datetime, timedelta, timezone
import logging
import async_timeout
from dateutil import parser, tz
from ltapysg import get_bus_arrival
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import TIME_MINUTES
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(minutes=1)
CONF_API_KEY = "api_key"
CONF_BUS_STOP_CODE = "bus_stop_code"
BUS_ARRIVING = "ARR"
BUS_UNAVAILABLE = "NA"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_API_KEY): cv.string, vol.Required(CONF_BUS_STOP_CODE): cv.string}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Setup sensor and initialize platform with configuration values"""
hass.data["lta"] = {"buses": []}
def create_bus_sensor(
bus_number, bus_order, bus_latitude, bus_longitude, bus_timing
):
def convert_datetime(bustime):
"""Convert UTC 8+ datetime to the number of minutes before bus arrive"""
if bustime:
time_bus = parser.parse(bustime).astimezone(tz.UTC)
time_now = datetime.now(tz=timezone.utc)
time_diff = time_bus - time_now
time_diff_formatted = round(time_diff.total_seconds() / 60)
if time_diff_formatted <= 1:
return BUS_ARRIVING
else:
return time_diff_formatted
else:
return BUS_UNAVAILABLE
bus_dict = {
"unique_id": f"{config.get(CONF_BUS_STOP_CODE)}_{bus_number}_{bus_order}",
"attributes": {} if (bus_latitude == "0" and bus_longitude == "0") or (bus_latitude == "" and bus_longitude == "") else {
"latitude": bus_latitude,
"longitude": bus_longitude
},
"state": convert_datetime(bus_timing),
}
return bus_dict
async def async_update_data():
"""Poll API and update data to sensors"""
async with async_timeout.timeout(20):
sensors = []
buses = []
try:
data = await get_bus_arrival(
config.get(CONF_API_KEY), config.get(CONF_BUS_STOP_CODE)
)
for bus in data:
buses.append(bus["ServiceNo"])
if not hass.data["lta"]["buses"]:
hass.data["lta"]["buses"] = list(buses)
else:
if len(buses) > len(hass.data["lta"]["buses"]):
hass.data["lta"]["buses"] = list(buses)
for bus in hass.data["lta"]["buses"]:
test = next(
(x for x in data if x["ServiceNo"] == bus), {"ServiceNo": ""}
)
sensors.append(
create_bus_sensor(
bus,
"1",
test["NextBus"]["Latitude"],
test["NextBus"]["Longitude"],
test["NextBus"]["EstimatedArrival"],
)
)
sensors.append(
create_bus_sensor(
bus,
"2",
test["NextBus2"]["Latitude"],
test["NextBus2"]["Longitude"],
test["NextBus2"]["EstimatedArrival"],
)
)
sensors.append(
create_bus_sensor(
bus,
"3",
test["NextBus3"]["Latitude"],
test["NextBus3"]["Longitude"],
test["NextBus3"]["EstimatedArrival"],
)
)
except Exception as e:
print(e)
# _LOGGER.error(
# e
# # "Unable to interact with Datamall, ensure you have an internet connection and a proper bus stop code"
# )
for bus in hass.data["lta"]["buses"]:
sensors.append(create_bus_sensor(bus, "1", "", "", "",))
sensors.append(create_bus_sensor(bus, "2", "", "", "",))
sensors.append(create_bus_sensor(bus, "3", "", "", "",))
return sensors
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="sensor",
update_method=async_update_data,
update_interval=SCAN_INTERVAL,
)
await coordinator.async_refresh()
async_add_entities(
LandTransportSensor(coordinator, idx)
for idx, ent in enumerate(coordinator.data)
)
class LandTransportSensor(Entity):
"""
Sensor that reads bus arrival data from LTA's Datamall.
The Datamall provides transport related data.
"""
def __init__(self, coordinator, idx):
"""Initialize the sensor."""
self.coordinator = coordinator
self.idx = idx
self._attributes = coordinator.data[idx]["attributes"]
self._unique_id = coordinator.data[idx]["unique_id"]
self._state = coordinator.data[idx]["state"]
@property
def unique_id(self):
"""Return the unique id of the sensor."""
return self.coordinator.data[self.idx]["unique_id"]
@property
def icon(self):
"""Return the icon of the sensor."""
if(self.coordinator.last_update_success):
return "mdi:bus-clock"
else:
return "mdi:bus-alert"
@property
def device_state_attributes(self):
"""Return the attributes of the sensor"""
return self.coordinator.data[self.idx]["attributes"]
@property
def state(self):
"""Return the state of the sensor."""
return self.coordinator.data[self.idx]["state"]
@property
def should_poll(self):
return False
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return TIME_MINUTES
@property
def available(self):
"""Return the availability of sensor"""
return self.coordinator.last_update_success
async def async_added_to_hass(self):
self.async_on_remove(
self.coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self):
"""Update sensor data"""
await self.coordinator.async_request_refresh()
```
|
{
"source": "jeml-lang/jeml-py",
"score": 2
}
|
#### File: jeml-py/jeml/decoder.py
```python
import sys
from antlr4 import *
from jeml.engine.jemlLexer import jemlLexer
from jeml.engine.jemlParser import jemlParser
from jeml.engine.jemlListener import jemlListener
from jeml.engine.jemlErrorHandler import ErrorHandler
def parse(input, filename=None):
lexer = jemlLexer(input)
stream = CommonTokenStream(lexer)
parser = jemlParser(stream)
lexer.removeErrorListeners()
parser.removeErrorListeners()
lexer.addErrorListener(ErrorHandler(filename))
parser.addErrorListener(ErrorHandler(filename))
AST = parser.document()
listener = jemlListener()
walker = ParseTreeWalker()
walker.walk(listener, AST)
return listener.jeml
def from_string(string):
input = InputStream(string)
return parse(input)
def from_file(file):
input = FileStream(file)
return parse(input, file)
```
#### File: jeml-py/tests/to_string.py
```python
import os
import unittest
from jeml import from_string, from_file, to_string
class JEMLParsing(unittest.TestCase):
def setUp(self):
if "tests" not in os.getcwd():
os.chdir(os.getcwd() + "/tests/")
def test_dict_basic(self):
basic_dict = {'_val1': {'_val1-1': 1, '_val1-2': '_val1 string'}}
basic_string = to_string(basic_dict)
self.assertEqual(basic_string,
"""_val1 {
_val1-1 1
_val1-2 "_val1 string"
}""".strip())
def test_dict_nested_maps(self):
basic_dict_nested_maps = {'_val1': {'_val1-1': {'_val1-1-1': True},'_val1-2': {'_val1-2-1': {},'_val1-2-2': {}}}}
basic_dict_nested_string = to_string(basic_dict_nested_maps)
self.assertEqual(basic_dict_nested_string,
"""
_val1 {
_val1-1 {
_val1-1-1 true
}
_val1-2 {
_val1-2-1 {}
_val1-2-2 {}
}
}""".strip())
def test_dict_nested_list(self):
basic_dict_nested_lists = {'_val1': { '_val1-1': [[], [], [], [], []], '_val1-2': [[[[]]]] } }
basic_dict_nested_lists_string = to_string(basic_dict_nested_lists)
self.assertEqual(basic_dict_nested_lists_string,
"""
_val1 {
_val1-1 [[] [] [] [] []]
_val1-2 [[[[]]]]
}
""".strip())
if __name__ == '__main__':
unittest.main(verbosity=2)
```
|
{
"source": "JemLukeBingham/used_car_scraper",
"score": 3
}
|
#### File: JemLukeBingham/used_car_scraper/automart_spider.py
```python
import scrapy
from text_formatting import format_mileage, format_year, format_price
class AutomartSpider(scrapy.Spider):
name = 'automart'
start_urls = [
'https://www.automart.co.za/used-cars/',
]
def parse(self, response):
for result in response.xpath("//div[@class='search-results']"):
car = {}
make_model = result.xpath('div/div/p/em/text()').getall()
# require data has exact form [make, model]
if len(make_model) == 2:
car['make'] = make_model[0]
car['model'] = make_model[1]
else:
self.logger.warning("Received incorrect data: %r" % make_model)
car['dealer_phone'] = result.css("a#phnum::text").get()
car['mileage'] = format_mileage(result.xpath("div/div/div/i[@class='mi']/em/text()").get())
car['year'] = format_year(result.xpath("div/div/div/i[@class='ye']/em/em/text()").get())
car['price'] = format_price(result.xpath("div/div/span/em/text()").get())
car['link'] = response.url + result.xpath("div/div/a/@href").get()
car['province'] = result.xpath("div/div/div/i[@class='re']/em/em/text()").get()
car['description'] = result.xpath("div/div/h3/a/em/text()").get()
car['long_description'] = result.xpath("div/div/span/text()").get()
yield car
next_page = response.xpath("//nav/ul/li/a[@aria-label='Next']/@href").get()
if next_page is not None:
yield response.follow(next_page, self.parse)
```
#### File: JemLukeBingham/used_car_scraper/gumtree_spider.py
```python
import scrapy
from urllib.parse import urljoin
from text_formatting import format_mileage, format_year, format_price
class GumtreeSpider(scrapy.Spider):
name = 'gumtree'
base_url = 'https://www.gumtree.co.za/'
start_urls = [
urljoin(base_url, 's-cars-bakkies/v1c9077p1'),
]
def parse(self, response):
for result in response.xpath("//div[@class='view']/\
div[@id='srpAds']/\
div[@class='related-items']/\
div[@class='related-content']/\
div/div[@class='related-ad-content']"):
car = {}
price = result.xpath("div[@class='price']/span/span[@class='ad-price']/text()").get()
car['price'] = format_price(price)
car['description'] = result.xpath("div[@class='description-content']/\
span[@class='related-ad-description']/\
span[@class='description-text']/text()").get()
yield car
next_page = response.xpath("//div[@class='pagination-content']/span/a[@class=' icon-pagination-right']/@href").get()
if next_page is not None:
yield response.follow(urljoin(self.base_url, next_page), self.parse)
```
|
{
"source": "jemmypotter/Python",
"score": 3
}
|
#### File: jemmypotter/Python/ENGR 102.py
```python
critics={'<NAME>': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5, 'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5, 'The Night Listener': 3.0},
'<NAME>': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5, 'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0, 'You, Me and Dupree': 3.5},
'<NAME>': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0, 'Superman Returns': 3.5, 'The Night Listener': 4.0},
'<NAME>': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0, 'The Night Listener': 4.5, 'Superman Returns': 4.0, 'You, Me and Dupree': 2.5},
'<NAME>': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, 'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0, 'You, Me and Dupree': 2.0},
'<NAME>': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, 'The Night Listener': 3.0, 'Superman Returns': 5.0, 'You, Me and Dupree': 3.5},
'Toby': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0,'Superman Returns':4.0}}
# a dictionary for movie critics and their ratings
print (critics['<NAME>'])#output is {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5, 'Just My Luck': 3.0, 'Superman Returns': 3.5, 'The Night Listener': 3.0, 'You, Me and Dupree': 2.5}
print (critics['<NAME>']['Lady in the Water']) #output is 2.5
from math import sqrt
#returns a distance based similarity score for person1 and person2
def sim_distance(prefs,person1,person2):
si={} #get the list of shared items
for item in prefs[person1]:
if item in prefs[person2]:
si[item]=1 #this means their ratings are identical
if len(si)==0:
return 0 #if they are no ratings than it will be count as 0
#sum_of_squares=sum([pow(prefs[person1][item]-prefs[person2][item],2)
def distance(dict,per1,per2):
shared_items={}
for item in dict[per1]: #an item is in the dict of person 1
if item in dict[per2]: #if same item is in the dict of person 2
shared_items[item]=1 #the value will be 1
if len(shared_items)==0:
return 0
inital_dis=sum([pow(dict[per1][item]-dict[per2][item],2)
for item in dict[per1] if item in dict[per2] ])
all_sum=sqrt(inital_dis)
return 1/(1+all_sum)
print (distance(critics,'<NAME>','Toby'))
print (distance(critics, '<NAME>', '<NAME>'))
# Perason correlation score
def sim_pearson(dict,pers1,pers2):
si={}
for item in dict[pers1]:#an item is in the dict for person 1
if item in dict[pers2]: #the item is also is in the dict for person 2
si[item]=1 #the value will be 1
n=len(si)
if n==0: #if there is no commen item than the value will be 0
return 0
#adding all the preferences
sum1=sum([dict[pers1][item] for item in si])
sum2=sum([dict[pers2][item] for item in si])
sum1sq=sum([pow(dict[pers1][item],2) for item in si])
sum2sq=sum([pow(dict[pers2][item],2) for item in si])
All_Sum=sum([dict[pers1][item]*dict[pers2][item] for item in si])
num=All_Sum-(sum1*sum2/n)
den=sqrt((sum1sq-pow(sum1,2)/n)*(sum2sq-pow(sum2,2)/n))
if den==0:
return 0
r= num/den
return r
print (sim_pearson(critics,'<NAME>','Toby'))
#returns the best matches for person from the critics dict
#number of results and similarity function are optinal params
```
#### File: jemmypotter/Python/engr_102.py
```python
import re
#from clusters import *
from tkinter import *
from tkinter.ttk import Combobox
import tkinter.filedialog as tkFileDialog
from PIL import Image
from PIL import ImageTk
# Declare district start line
DISTRICT_START = 'Kaynak: YSK'
# Declare constants for the view class
DENDROGRAM_FILE_NAME = 'clusters.jpg'
DISTRICT_BUTTON = 0
POLITICAL_PARTY_BUTTON = 1
# GUI class
class View:
# Constructor
def __init__(self, data_center):
# This variable keeps track of the previously pressed button. If the cluster districts button is
# pressed previously, refine analysis button will call cluster_districts method of the data center class.
# If the cluster political parties button is pressed previously, refine analysis button will call
# cluster_political_parties method from the data center class.
self.last_button_pressed = 0
# Save the data center instance to self
self.data_center = data_center
# Set the root view
self.root = Tk()
# Declare instance variables
# Window height width for the resolution 1366x768
self.window_width = 900.0
self.window_height = 650.0
# Declare the GUI components that should be accessible from different methods inside this class
# Declare row_3 and row_4 frames
self.row_3 = None
self.row_4 = None
# Declare districts listbox
self.districts_listbox = None
# Declare canvas to show the dendrogram image
self.canvas = None
# Declare the image to be displayed in canvas
self.dendrogram_image = None
# Declare threshold combobox
self.threshold_combobox = None
# Declare input file name
self.input_filename = ""
# Define the GUI
self.define_gui()
# Shows the view
def show(self):
# Set the window title
self.root.title("Clustering")
# Hide the root window
self.root.withdraw()
self.root.update_idletasks()
# Calculate width and height of the window
# The GUI is designed on a system where the resolution is 1366x768,
# for this program to scale properly in other resolutions, default width and height of the system is multiplied
# by the following ratios
width = self.root.winfo_screenwidth() * (self.window_width / 1366)
height = self.root.winfo_screenheight() * (self.window_height / 768)
# Calculate the position of the window in order to center it
x = (self.root.winfo_screenwidth() / 2.0) - (width / 2.0)
y = (self.root.winfo_screenheight() / 2.0) - (height / 2.0)
# Apply the calculated geometry
self.root.geometry("%dx%d+%d+%d" % (width, height, x, y))
# Show the root window
self.root.deiconify()
# Show window
Tk.mainloop(self.root)
# Defines how the user interface will look like
def define_gui(self):
# Create and place the label
label = Label(text='Election Data Analysis Tool v. 1.0', font=("Helvetica", 12, 'bold'), bg='red', fg='white')
label.grid(row=0, column=0, sticky=EW)
# Expand the label along the row
Grid.columnconfigure(self.root, 0, weight=1)
# Create and place the load data button
load_election_data_button = Button(text='Load Election Data',
height=2, width=40, command=self.load_election_data_button_pressed)
load_election_data_button.grid(row=1, column=0, padx=10, pady=10)
# Create a frame in row 2 so pack() can be used inside of it
row_2 = Frame(self.root)
row_2.grid(row=2)
# Create cluster district button in the frame
cluster_district_button = Button(row_2, text='Cluster Districts', height=3, width=40,
command=self.cluster_districts_button_pressed)
# Place the button to the side left of the frame
cluster_district_button.pack(side=LEFT)
# Create cluster parties button in the frame
cluster_parties_button = Button(row_2, text='Cluster Political Parties', height=3, width=40,
command=self.cluster_parties_button_pressed)
# Place the button to the side right of the frame
cluster_parties_button.pack(side=RIGHT)
# Create a frame in row 3 so pack() can be used inside of it
self.row_3 = row_3 = Frame(self.root)
# Create and place the X scrollbar of the canvas
canvas_x_scrollbar = Scrollbar(row_3, orient=HORIZONTAL)
canvas_x_scrollbar.pack(side=BOTTOM, fill=X)
# Create and place the Y scrollbar of the canvas
canvas_y_scrollbar = Scrollbar(row_3, orient=VERTICAL)
canvas_y_scrollbar.pack(side=RIGHT, fill=Y)
# Create the canvas which will display the dendrogram image
self.canvas = canvas = Canvas(row_3, xscrollcommand=canvas_x_scrollbar.set,
yscrollcommand=canvas_y_scrollbar.set, width=770, height=300)
# Place the canvas inside the frame
canvas.pack(fill=BOTH, expand=YES)
# Set up the x and y scrollbars for the canvas
canvas_x_scrollbar.config(command=canvas.xview)
canvas_y_scrollbar.config(command=canvas.yview)
# Create a frame in row 4 so pack() can be used inside of it
self.row_4 = row_4 = Frame(self.root)
# Create the districts label
districts_label = Label(row_4, text='Districts:')
districts_label.pack(side=LEFT)
# Create a scrollbar for the districts listbox
districts_scrollbar = Scrollbar(row_4)
# Create and place the listbox for districts
districts_listbox = Listbox(row_4, yscrollcommand=districts_scrollbar.set, height=9, selectmode=EXTENDED)
districts_listbox.pack(side=LEFT)
# Save the listbox to the instance variable
self.districts_listbox = districts_listbox
# Configure the listbox in order to keep the selected item after the user clicks on elsewhere
districts_listbox.configure(exportselection=False)
# Place and set the scrollbar for the districts listbox
districts_scrollbar.pack(side=LEFT, fill=Y)
districts_scrollbar.config(command=districts_listbox.yview)
# Create and place the threshold label
threshold_label = Label(row_4, text='Threshold:')
threshold_label.pack(side=LEFT)
# Create and place the threshold combobox
threshold_combobox = Combobox(row_4,
values=['0%', '1%', '10%', '20%', '30%', '40%', '50%'], width=6, state="readonly")
threshold_combobox.pack(side=LEFT)
# Save the combobox to the instance variable
self.threshold_combobox = threshold_combobox
# Set the current element in the combobox as the first element
threshold_combobox.current(0)
# Create and place the refine analysis button
refine_analysis_button = Button(row_4, text='Refine Analysis', height=2, width=40,
command=self.refine_analysis_button_pressed)
refine_analysis_button.pack(side=LEFT)
# Method to handle presses to the load election data button
def load_election_data_button_pressed(self):
# Get the selected file name from the tkDialog
self.input_filename = tkFileDialog.askopenfilename(initialdir='/', title='Select file',
filetypes=(('text files', '*.txt'), ('all files', '*.*')))
# If the user has selected a file name
if self.input_filename != '':
# Parse the file
self.data_center.parse_input(self.input_filename)
# Add the parsed districts obtained from the data center to the districts listbox
for district_name in self.data_center.district_names:
self.districts_listbox.insert(END, district_name)
# Method to handle presses to the cluster districts button
def cluster_districts_button_pressed(self):
# Set the previously pressed button
self.last_button_pressed = DISTRICT_BUTTON
# Clear the selection of the districts listbox
self.districts_listbox.selection_clear(0, END)
# Check if there is an input file
if self.input_filename != '':
# Try to create a dendrogram
try:
# Command the data center to create a dendrogram image of the clustered districts with
# the selected districts and the threshold
self.data_center.cluster_districts([], threshold=int(self.threshold_combobox.get()[:-1]))
# Open the dendrogram image using PIL
dendrogram_image = Image.open(DENDROGRAM_FILE_NAME)
# Convert image to the Tkinter loadable format using PhotoImage and save its instance to the view class
# in order to prevent it from getting garbage collected
self.dendrogram_image = ImageTk.PhotoImage(dendrogram_image)
# Load image into the canvas
self.canvas.create_image(0, 0, image=self.dendrogram_image, anchor='nw')
# If the selection is not suitable for a dendrogram
except (ZeroDivisionError, IndexError):
# Clear the canvas
self.canvas.delete("all")
finally:
# Set up the canvas' scroll region
self.canvas.config(scrollregion=self.canvas.bbox(ALL))
# Place analysis components to the root grid
self.place_analysis_on_grid()
# Method to handle presses to the cluster parties button
def cluster_parties_button_pressed(self):
# Set the previously pressed button
self.last_button_pressed = POLITICAL_PARTY_BUTTON
# Clear the selection of the districts listbox
self.districts_listbox.selection_clear(0, END)
# Check if there is an input file
if self.input_filename != '':
# Try to create a dendrogram
try:
# Command the data center to create a dendrogram image of the clustered political parties with
# the selected districts and the threshold
self.data_center.cluster_political_parties([], threshold=int(self.threshold_combobox.get()[:-1]))
# Open the dendrogram image using PIL
dendrogram_image = Image.open(DENDROGRAM_FILE_NAME)
# Convert image to the Tkinter loadable format using PhotoImage and save its instance to the view class
# in order to prevent it from getting garbage collected
self.dendrogram_image = ImageTk.PhotoImage(dendrogram_image)
# Load image into the canvas
self.canvas.create_image(0, 0, image=self.dendrogram_image, anchor='nw')
# If the selection is not suitable for a dendrogram
except (ZeroDivisionError, IndexError):
# Clear the canvas
self.canvas.delete("all")
finally:
# Set up the canvas' scroll region
self.canvas.config(scrollregion=self.canvas.bbox(ALL))
# Place analysis components to the root grid
self.place_analysis_on_grid()
# Method to handle presses to the refine analysis button
def refine_analysis_button_pressed(self):
# Get selected districts from the districts listbox
selected_districts = [self.data_center.district_names[index] for index in self.districts_listbox.curselection()]
# Try to create a dendrogram
try:
# If the last pressed button is cluster districts
if self.last_button_pressed == DISTRICT_BUTTON:
# Command the data center to create a dendrogram image of clustered districts with
# the selected districts and the threshold
self.data_center.cluster_districts(selected_districts,
int(self.threshold_combobox.get()[:-1]))
# If the last pressed button is cluster political parties
else:
# Command the data center to create a dendrogram image of the clustered political parties with
# the selected districts and the threshold
self.data_center.cluster_political_parties(selected_districts,
int(self.threshold_combobox.get()[:-1]))
# Open the dendrogram image using PIL
dendrogram_image = Image.open(DENDROGRAM_FILE_NAME)
# Convert image to the Tkinter loadable format using PhotoImage and save its instance to the view class
# in order to prevent it from getting garbage collected
self.dendrogram_image = ImageTk.PhotoImage(dendrogram_image)
# Load image into the canvas
self.canvas.create_image(0, 0, image=self.dendrogram_image, anchor='nw')
# If the selection is not suitable for a dendrogram
except (ZeroDivisionError, IndexError):
# Clear the canvas
self.canvas.delete("all")
finally:
# Set up the canvas' scroll region
self.canvas.config(scrollregion=self.canvas.bbox(ALL))
# Method to add row_3 and row_4 frames to the root grid
def place_analysis_on_grid(self):
self.row_3.grid(row=3)
self.row_4.grid(row=4)
# District class which holds data about a district
class District:
# Constructor
def __init__(self, name):
# Declare instance variables
self.name = name
self._election_results = {}
# Method to add a political party acronym and vote percentage to the district
def add_political_party(self, acronym, vote_percentage):
# Add given values to the election results dictionary
self._election_results[acronym] = vote_percentage
# Returns the given political party's vote percentage in this district
def get_political_party_percentage(self, acronym):
# Try to get vote percentage of the given political party from the election results of self
try:
return self._election_results[acronym]
# If there is no vote percentage for the given party in this district, return 0
except KeyError:
return 0.0
# Political party class which holds data about a political party
class PoliticalParty:
# Constructor
def __init__(self, acronym):
# Declare instance variables
self.acronym = acronym
self._election_results = {}
self.vote_count = 0
# Method to add a district name and vote percentage to the political party
def add_district(self, name, vote_percentage, count):
# Add given values to the election results dictionary
self._election_results[name] = vote_percentage
self.vote_count += count
# Returns the vote percentage of self for the given district
def get_district_percentage(self, district_name):
# Try to get vote percentage of the given district from the election results of self
try:
return self._election_results[district_name]
# If there is no vote percentage for the given district in this political party, return 0
except KeyError:
return 0.0
# Class to be used as data center for the whole project
class DataCenter:
# Constructor
def __init__(self):
# Declare instance variables
self.districts = {}
self.political_parties = {}
self.district_names = []
self.political_party_names = []
self.total_vote_count = 0
self.political_party_vote_percentages = {}
# Parses the input file using the given name and populates necessary data in self
def parse_input(self, txt_filename):
# While the file is open
with open(txt_filename, 'r') as txt_file:
# Get a list of lines excluding line terminators
lines = [line.rstrip('\n') for line in txt_file]
# Regular expression to match lines starting with two capital letters
political_party_expression = re.compile(r'(^[A-Z][A-Z].*)')
# Declare district name variable
district_name = ''
# For each line in the input file
for i in range(len(lines)):
# If the line corresponds to a district start
if DISTRICT_START in lines[i]:
# Get the district name
district_name = lines[i+1]
# Instantiate a district object with the parsed district name and save it to the districts dictionary
self.districts[district_name] = District(district_name)
# If the line contains a vote percentage of a political party and the party is not BGMSZ
elif political_party_expression.search(lines[i]) and 'BGMSZ' not in lines[i]:
# Split the line by tab character
split_list = lines[i].split('\t')
# Get acronym and the percentage and vote count
acronym = split_list[0]
percentage = float(split_list[-1][1:])
count = int(split_list[-2].replace('.', ''))
# Try to add a district to an instance of a political party stored in the dictionary
try:
self.political_parties[acronym].add_district(district_name, percentage, count)
# If the political party does not exist in the dictionary
except KeyError:
# Instantiate a political party object with the parsed acronym and save it to the dictionary
self.political_parties[acronym] = PoliticalParty(acronym)
# Add the parsed district to the created political party object
self.political_parties[acronym].add_district(district_name, percentage, count)
# Add the parsed political party acronym and the vote percentage to the previously parsed district
self.districts[district_name].add_political_party(acronym, percentage)
# Add total vote count in the district to total count in self
elif 'Toplam\t' in lines[i]:
self.total_vote_count += int(lines[i].split('\t')[-1].replace('.', ''))
# Create political party names and district names from the dictionary keys
self.political_party_names = list(self.political_parties.keys())
self.district_names = sorted(list(self.districts.keys()))
# For each political party
for political_party_name in self.political_party_names:
# Calculate its total vote percentage
self.political_party_vote_percentages[political_party_name] = \
(self.political_parties[political_party_name].vote_count / float(self.total_vote_count)) * 100.0
# Method that clusters districts using the given district list and threshold. This method creates a
# dendrogram image file to be read into the canvas in view. Throws ZeroDivisionError
def cluster_districts(self, selected_districts, threshold=0):
# Assert that dictionaries are filled
assert self.districts != {} and self.political_parties != {}
# If no district is selected by the user, cluster all districts
if not selected_districts:
selected_districts = self.district_names
# Filter out political party names using their total vote percentages and the given threshold value
political_party_names = [political_party for political_party in self.political_party_names
if self.political_party_vote_percentages[political_party] >= threshold]
# Initialize the matrix to be clustered. Rows correspond to districts, columns correspond to political parties
cluster_matrix = [[0.0]*len(political_party_names) for i in range(len(selected_districts))]
# For each row
for i in range(len(selected_districts)):
# For each column
for j in range(len(political_party_names)):
# Each cell gets filled by the vote percentage of the party j in the district i
cluster_matrix[i][j] = self.districts[selected_districts[i]]\
.get_political_party_percentage(political_party_names[j])
# Create a hierarchical cluster using euclidean distance
cluster = hcluster(cluster_matrix, distance=sim_distance)
# Create the dendrogram image
drawdendrogram(cluster, selected_districts)
# Method that clusters political parties using the given district list and threshold. This method creates a
# dendrogram image file to be read into the canvas in view. Throws ZeroDivisionError
def cluster_political_parties(self, selected_districts, threshold=0):
# Assert that dictionaries are filled
assert self.districts != {} and self.political_parties != {}
# If no district is selected by the user, cluster all districts
if not selected_districts:
selected_districts = self.district_names
# Filter out political party names using their total vote percentages and the given threshold value
political_party_names = [political_party for political_party in self.political_party_names
if self.political_party_vote_percentages[political_party] >= threshold]
# Initialize the matrix to be clustered. Rows correspond to political parties, columns correspond to districts.
cluster_matrix = [[0.0] * len(selected_districts) for i in range(len(political_party_names))]
# For each row
for i in range(len(political_party_names)):
# For each column
for j in range(len(selected_districts)):
# Each cell gets filled by the vote percentage of the party i in the district j
cluster_matrix[i][j] = self.political_parties[political_party_names[i]] \
.get_district_percentage(selected_districts[j])
# Create a hierarchical cluster using euclidean distance
cluster = hcluster(cluster_matrix, distance=sim_distance)
# Create the dendrogram image
drawdendrogram(cluster, political_party_names)
# Main method
def main():
# Initialize the data center
data_center = DataCenter()
# Initialize and show the GUI
gui = View(data_center)
gui.show()
# Execute the main method
if __name__ == "__main__":
main()
```
#### File: jemmypotter/Python/Followers.py
```python
import xlrd
from tkinter.filedialog import askopenfilename
def path():
p=askopenfilename()
return p
def part():
p=path()
workbook=xlrd.open_workbook(p)
worksheet=workbook.sheet_by_index(0)
parts=[]
for i in range(9, 21):
party = worksheet.cell_value(10, i)
parts.append(party)
return parts
def dists():
path1=path()
wb=xlrd.open_workbook(path1)
ws=wb.sheet_by_index(0)
di=[]
for i in range(11, 50):
district = ws.cell_value(i, 2)
di.append(district)
return di
print(dists())
def check():
path1=path()
parties=part()
districts=dists()
whole={}
workbook=xlrd.open_workbook(path1)
worksheet=workbook.sheet_by_index(0)
vote1 = []
vote2 = []
vote3 = []
vote4 = []
vote5 = []
vote6 = []
vote7 = []
vote8 = []
vote9 = []
vote10 = []
vote11 = []
vote12 = []
for i in range(11,50):
vote_saadet=worksheet.cell_value(i,9)
vote_btp=worksheet.cell_value(i,10)
vote_tkp=worksheet.cell_value(i,11)
vote_vatan=worksheet.cell_value(i,12)
vote_bbp=worksheet.cell_value(i,13)
vote_chp=worksheet.cell_value(i,14)
vote_ak=worksheet.cell_value(i,15)
vote_dp=worksheet.cell_value(i,16)
vote_mhp=worksheet.cell_value(i,17)
vote_iyi=worksheet.cell_value(i,18)
vote_hdp=worksheet.cell_value(i,19)
vote_dsp=worksheet.cell_value(i,20)
vote1.append(vote_saadet)
vote2.append(vote_btp)
vote3.append(vote_tkp)
vote4.append(vote_vatan)
vote5.append(vote_bbp)
vote6.append(vote_chp)
vote7.append(vote_ak)
vote8.append(vote_dp)
vote9.append(vote_mhp)
vote10.append(vote_iyi)
vote11.append(vote_hdp)
vote12.append(vote_dsp)
whole[parties[0]] = vote1
whole[parties[1]] = vote2
whole[parties[2]] = vote3
whole[parties[3]] = vote4
whole[parties[4]] = vote5
whole[parties[5]] = vote6
whole[parties[6]] = vote7
whole[parties[7]] = vote8
whole[parties[8]] = vote9
whole[parties[9]] = vote10
whole[parties[10]] = vote11
whole[parties[11]] = vote12
return whole
#print(check())
def check2():
path1=path()
parties=part()
districts=dists()
workbook=xlrd.open_workbook(path1)
worksheet=workbook.sheet_by_index(0)
whole={}
vote1 = []
vote2 = []
vote3 = []
vote4 = []
vote5 = []
vote6 = []
vote7 = []
vote8 = []
vote9 = []
vote10 = []
vote11 = []
vote12 = []
for i in range(11,50):
vote_saadet=worksheet.cell_value(i,9)
vote_btp=worksheet.cell_value(i,10)
vote_tkp=worksheet.cell_value(i,11)
vote_vatan=worksheet.cell_value(i,12)
vote_bbp=worksheet.cell_value(i,13)
vote_chp=worksheet.cell_value(i,14)
vote_ak=worksheet.cell_value(i,15)
vote_dp=worksheet.cell_value(i,16)
vote_mhp=worksheet.cell_value(i,17)
vote_iyi=worksheet.cell_value(i,18)
vote_hdp=worksheet.cell_value(i,19)
vote_dsp=worksheet.cell_value(i,20)
vote1.append(vote_saadet)
vote2.append(vote_btp)
vote3.append(vote_tkp)
vote4.append(vote_vatan)
vote5.append(vote_bbp)
vote6.append(vote_chp)
vote7.append(vote_ak)
vote8.append(vote_dp)
vote9.append(vote_mhp)
vote10.append(vote_iyi)
vote11.append(vote_hdp)
vote12.append(vote_dsp)
vote=[vote1,vote2,vote3,vote4,vote5,vote6,vote7,vote8,vote9,vote10,vote11,vote12]
whole[parties[0]] = vote1
whole[parties[1]] = vote2
whole[parties[2]] = vote3
whole[parties[3]] = vote4
whole[parties[4]] = vote5
whole[parties[5]] = vote6
whole[parties[6]] = vote7
whole[parties[7]] = vote8
whole[parties[8]] = vote9
whole[parties[9]] = vote10
whole[parties[10]] = vote11
whole[parties[11]] = vote12
for k in parties:
for val in range(0,39):
whole[k][val]=districts[val]
print(whole)
check2()
```
#### File: jemmypotter/Python/Hafsa_Ulusal.py
```python
user_account=0
UserName=''
Password=0
def log_in():
global UserName
global Password
usernames=['Ahmet','Zeynep']
passwords=['<PASSWORD>','<PASSWORD>']
username = input('Username:')
while username not in usernames:
print('Username typed is not correct. Please try again')
username=input('Username:')
while username in usernames:
if username==usernames[0]:
UserName=username
password=input('Password:')
if password==passwords[0]:
Password=password
print('Welcome Ahmet !')
break
else:
print('Password you typoed is not correct. Please try again')
log_in()
break
elif username==usernames[1]:
UserName=username
password=input('Password:')
if password==passwords[1]:
Password=password
print('Welcome <PASSWORD> !')
break
else:
print('Password you typed is not correct please try again')
log_in()
break
else:
print('Username you typed is not correct please try again.')
log_in()
def first_menu():
print('--- Welcome to SEHIR Bank V.0.1 --- ')
print('1.Login\n2.exit')
first_choice=int(input('Please choose what you want to do:'))
if first_choice==1:
log_in()
elif first_choice==2:
print('You are about to exit...')
print('You have exitted')
exit()
first_menu()
def main_menu():
global UserName
global Password
global user_account
print('1.Withdraw Money\n2.Deposit Money\n3.Transfer Money\n4.My Account Information\n5.Logout')
second_choice=int(input('Please enter the number of the service:'))
while second_choice<6 and second_choice>0:
if second_choice==1:
withdraw_amount=int(input('Please enter the amount you want to withdraw:'))
if withdraw_amount>user_account:
print("You don't have " + str(withdraw_amount) + ' TL in your account')
main_menu()
elif withdraw_amount<=user_account:
print(str(withdraw_amount) + ' TL withdrawn from your account\nGoing back to the main menu...')
user_account=user_account-withdraw_amount
main_menu()
break
elif second_choice==2:
deposit_amount=int(input('Please enter the amount you want to drop:'))
user_account=user_account+deposit_amount
print(str(deposit_amount) + 'TL added to your account')
print('Going back to the main menu')
main_menu()
break
if second_choice==3:
transfer_amount=int(input('Please enter the amount you want to transfer:'))
if transfer_amount>user_account:
print("Sorry you don't have enough money to complete this transaction:")
print('1.Go back to the main menu\n2.Transfer again')
mini_menu=int(input('>>>'))
if mini_menu==1:
main_menu()
elif main_menu==2:
continue
elif transfer_amount<=user_account:
print('You have succesfully tranfered')
user_account=user_account-transfer_amount
main_menu()
elif second_choice==4:
print('------- SEHIR Bank ------- ')
print('Current date do it later\n---------------------------- ')
print('Your name:' + UserName)
print('Your password:' + Password)
print('Your amount(TL):' + str(user_account))
break
if second_choice==5:
first_menu()
break
else:
print('Invalid service number, Please try again.')
main_menu()
main_menu()
```
#### File: jemmypotter/Python/mp3.py
```python
from tkinter import *
from tkinter.filedialog import askopenfilename
import xlrd
import pandas as pd
from tkinter.ttk import Combobox
from PIL import Image,ImageTk
import clusters1
dendogram_file_name='clusters.jpg'
class PoliCluster:
def __init__(self,data_center):
self.data_center=data_center
self.root=Tk()
self.frame1=None
self.frame2=None
self.Frame3=None
self.canvas=None
self.initUI()
def interface(self):
self.root.title('Clustering')
self.root.geometry('730x600+420+70')
self.root.deiconify()
Tk.mainloop(self.root)
def initUI(self):
self.frame1=Frame(self.root)
self.frame2=Frame(self.root)
self.frame3=Frame(self.root)
self.frame1.pack(fill=BOTH)
self.main_label = Label(self.frame1, text='Election Data Analysis Tool v.1.0', bg='red', fg='white',font=('Times', 14, 'bold'))
self.loadDataB = Button(self.frame1, text='Load Election Data', height=2, width=27,command=self.load_data_button)
self.clusDisB = Button(self.frame1, text='Cluster Districts', height=4, width=16,command=self.cluster_distrcits_but)
self.clusPolB = Button(self.frame1, text='Cluster Political Parties', height=4, width=18)
self.frame2.pack(expand=True,fill=BOTH)
self.x_scroll=Scrollbar(self.frame2,orient=HORIZONTAL)
self.y_scroll=Scrollbar(self.frame2,orient=VERTICAL)
self.canvas=Canvas(self.frame2,xscrollcommand=self.x_scroll.set,yscrollcommand=self.y_scroll.set)
self.frame3.pack(expand=True)
self.dist_lab=Label(self.frame3,text='Districts')
self.dist_scroll=Scrollbar(self.frame3)
self.dist_listb=Listbox(self.frame3,yscrollcommand=self.dist_scroll.set,height=10,selectmode=EXTENDED)
self.combox_label=Label(self.frame3,text='Threshold')
self.combox = Combobox(self.frame3,values=['0%', '1%', '10%', '20%', '30%', '40%', '50%'], width=6, state="readonly")
self.refine_but=Button(self.frame3,text='Refine Analysis')
self.main_label.pack(fill=X, expand=True, anchor=N)
self.loadDataB.pack(expand=True, anchor=N)
self.clusDisB.pack(side=LEFT, expand=True, anchor=NE)
self.clusPolB.pack(side=LEFT, expand=True, anchor=NW)
self.x_scroll.pack(side=BOTTOM, fill=X)
self.y_scroll.pack(side=RIGHT, fill=Y)
self.canvas.pack(fill=BOTH, expand=True)
self.x_scroll.configure(command=self.canvas.xview)
self.y_scroll.configure(command=self.canvas.yview)
self.dist_lab.pack(side=LEFT)
self.dist_listb.pack(side=LEFT)
self.dist_scroll.pack(side=LEFT,fill=Y)
self.dist_scroll.configure(command=self.dist_listb.yview)
self.combox_label.pack(side=LEFT)
self.combox.pack(side=LEFT)
self.combox.current(0)
self.refine_but.pack(side=LEFT)
def load_data_button(self):
self.file_path = askopenfilename(initialdir='/', title='Select file',filetypes=(('excel files', '*.xlsx'), ('all files', '*.*')))
if self.file_path != '':
self.data_center.parse_data(self.file_path)
for name in self.data_center.districts:
self.dist_listb.insert(END, name)
def cluster_distrcits_but(self):
self.dist_listb.selection_clear(0,END)
if self.file_path != '':
try:
self.data_center.cluster_dists([],threshold=int(self.combox.get()[:-1]))
dendrogram_image = Image.open(DENDROGRAM_FILE_NAME)
self.dendrogram_image = ImageTk.PhotoImage(dendrogram_image)
self.canvas.create_image(0, 0, image=self.dendrogram_image, anchor='nw')
except (ZeroDivisionError, IndexError):
self.canvas.delete("all")
finally:
self.canvas.config(scrollregion=self.canvas.bbox(ALL))
self.place_analysis_on_grid()
class Data_Center:
def __init__(self):
self.whole={}
self.districts=[]
self.parties=[]
self.political_party_vote_percentages={}
vote=0
def parse_data(self,file_path):
workbook = xlrd.open_workbook(file_path)
worksheet = workbook.sheet_by_index(0)
try:
for i in range(9, 21):
party = worksheet.cell_value(10, i)
self.parties.append(party)
for i in range(11,50):
district=worksheet.cell_value(i,2)
self.districts.append(district)
for i in range(11,50):
vote=worksheet.cell_value(i,9)
vote+=vote
self.political_party_vote_percentages['SAADET']=float((8569494/vote))*100.0
for i in range(11, 50):
vote = worksheet.cell_value(i, 10)
vote += vote
self.political_party_vote_percentages['BTP'] = float((8569494 / vote)) * 100.0
for i in range(11, 50):
vote = worksheet.cell_value(i, 11)
vote += vote
self.political_party_vote_percentages['TKP'] = float((8569494 / vote)) * 100.0
for i in range(11, 50):
vote = worksheet.cell_value(i, 12)
vote += vote
self.political_party_vote_percentages['VATAN'] = float((8569494 / vote)) * 100.0
for i in range(11, 50):
vote = worksheet.cell_value(i, 13)
vote += vote
self.political_party_vote_percentages['BBP'] = float((8569494 / vote)) * 100.0
for i in range(11, 50):
vote = worksheet.cell_value(i, 14)
vote += vote
self.political_party_vote_percentages['AK PARTİ'] = float((8569494 / vote)) * 100.0
for i in range(11, 50):
vote = worksheet.cell_value(i, 15)
vote += vote
self.political_party_vote_percentages['CHP'] = float((8569494 / vote)) * 100.0
for i in range(11, 50):
vote = worksheet.cell_value(i, 16)
vote += vote
self.political_party_vote_percentages['DP'] = float((8569494 / vote)) * 100.0
for i in range(11, 50):
vote = worksheet.cell_value(i, 17)
vote += vote
self.political_party_vote_percentages['MHP'] = float((8569494 / vote)) * 100.0
for i in range(11, 50):
vote = worksheet.cell_value(i, 18)
vote += vote
self.political_party_vote_percentages['İYİ PARTİ'] = float((8569494 / vote))* 100.0
for i in range(11, 50):
vote = worksheet.cell_value(i, 19)
vote += vote
self.political_party_vote_percentages['HDP'] = float((8569494 / vote)) * 100.0
for i in range(11, 50):
vote = worksheet.cell_value(i, 20)
vote += vote
self.political_party_vote_percentages['DSP'] = float((8569494 / vote)) * 100.0
except ZeroDivisionError:
vote=0.0
saadet = pd.read_excel(file_path, usecols=[9], skiprows=10)
btp = pd.read_excel(file_path, usecols=[10], skiprows=10)
tkp = pd.read_excel(file_path, usecols=[11], skiprows=10)
vatan = pd.read_excel(file_path, usecols=[12], skiprows=10)
bbp = pd.read_excel(file_path, usecols=[13], skiprows=10)
chp = pd.read_excel(file_path, usecols=[14], skiprows=10)
ak = pd.read_excel(file_path, usecols=[15], skiprows=10)
dp = pd.read_excel(file_path, usecols=[16], skiprows=10)
mhp = pd.read_excel(file_path, usecols=[17], skiprows=10)
iyi = pd.read_excel(file_path, usecols=[18], skiprows=10)
hdp = pd.read_excel(file_path, usecols=[19], skiprows=10)
dsp = pd.read_excel(file_path, usecols=[20], skiprows=10)
saadet_dict = saadet.to_dict()
btp_dict = btp.to_dict()
tkp_dict = tkp.to_dict()
vatan_dict = vatan.to_dict()
bbp_dict = bbp.to_dict()
chp_dict = chp.to_dict()
ak_dict = ak.to_dict()
dp_dict = dp.to_dict()
mhp_dict = mhp.to_dict()
iyi_dict = iyi.to_dict()
hdp_dict = hdp.to_dict()
dsp_dict = dsp.to_dict()
self.whole = (
saadet_dict, btp_dict, tkp_dict, vatan_dict, bbp_dict, chp_dict, ak_dict, dp_dict, mhp_dict, iyi_dict, hdp_dict,
dsp_dict)
def cluster_dists(self,selected_districts, threshold=0):
assert self.whole != {}
if not selected_districts:
selected_districts = self.districts
political_party_names=[political_party for political_party in self.parties
if self.political_party_vote_percentages[political_party] >= threshold]
cluster_matrix = [[0.0] * len(political_party_names) for i in range(len(selected_districts))]
for i in range(len(selected_districts)):
for j in range(len(political_party_names)):
cluster_matrix[i][j] = self.districts[selected_districts[i]] \
.get_political_party_percentage(political_party_names[j]),
cluster = hcluster(cluster_matrix, distance=sim_distance)
drawdendrogram(cluster, selected_districts)
def main():
data_center=Data_Center()
g=PoliCluster(data_center)
g.interface()
main()
#di['CHP']['Adalar']=43
```
#### File: jemmypotter/Python/python.py
```python
# i= i + 1
#for i in range(1,5):
# print i
#for i in range(5):
# print i
#def sum_numbers(a,b):
#result=0
#for in rage(a,b+1)
# result + = i
# #print result
#from swampy.TurtleWorld import *
#world= TurtleWorld()
#regina= Turtle()
#print regina
#def warmup(speed,lap):
# regina.set_delay(speed)
# for i in range(2*lap):
# fd(regina,100)
# lt(regina,90)
# fd(regina,70)
# lt(regina,90)
#warmup(0.07,4)
#def countDU(n):
# for j in range(n,0,-1):
# print j
# for i in range(n+1):
# print i
#countDU(10)
#length=5
#angle=3
#regina.set_delay(0.01)
#while i:
# lt(regina,1)
# def pattern():
# for i in range(30):
# fd(regina,length)
# lt(regina,angle)
# def angler():
# lt(regina,120)
# for i in range(30):
# fd(regina,length)
# lt(regina,angle)
#pattern()
#angler()
#def right_justify(s):
# print '' '' * len(5)*10 + s
#right_justify('engr 101')
#def summation():
# sum = 0
#for i in range(10):
#
# sum +=i
# print sum
#summation()
#def divide_by(a,b,c,d):
# (str(a)+str(b)+str(c)+str(d))/c
#
#divide_by(1,2,3,5)
#from swampy.TurtleWorld import *
#world= TurtleWorld()
#jerennaz= Turtle()
#print jerennaz
#fd(jerennaz,100)
#lt(jerennaz,30)
#fd(jerennaz,70)
#lt(jerennaz,45)
#wait_for_user()
#weather = 'rainy'
# activity = '?'
#
#
# if weather == 'rainy':
# activity = 'reading'
#
#
# if weather != 'rainy':
# activity = 'walking'
#
#
#
#
# print activity
#
#
# import antigravity
# def f1(n):
# if n==1:
# print True
# elif n==0:
# print False
# else:
# print 'Wrong input'
#
# x= raw_input('Enter a number:')
# x= int(x)
#
#
# f1(x)
# def aycatria(x,y,z):
# if x==y or x==z or y==z:
# print 'this is an isoscales triangle'
# aycatria(4,5,5)
#
#
#
# def is_isoscles():
# x= int(raw_input('enter 1st num:'))
# y= int(raw_input('enter 2nd num:'))
# z= int(raw_input('enter 3rd num:'))
# if x==y or x==z or y==z:
# print 'true'
# else:
# print 'false'
# is_isoscles()
# thing1=20
# if thing1>10:
# print 'this is first'
# if thing1<25:
# print 'this is second'
# else:
# print 'this is third'
# def compare(a,b):
# if a>b:
# print str(a) + ' is greater than ' + str(b)
# elif a<b:
# print str(a) + ' is less than ' + str(b)
# else:
# print str(a) + ' is equal to ' + str(b)
#
# compare(7,7)
# def data_type(a):
# if type(a) == int:
# print str(a) + ' is an integer '
# elif type(a) == str:
# print a + ' is a string '
# elif type(n) == int:
# print str(n) + ' is a string '
# elif type(n) == bool:
# print str(n) + ' is a boolean'
#data_type(True)
#
# def add_even_numbers(a,b):
# sum =0
# for i in range (a,b):
# if i%2 == 0:
# sum = sum + i
# print sum
#
# add_even_numbers(5,12)
#import antigravity
# import math
# def circle_area(radius):
# area = math.pi * radius**2
# return area
#
# a = circle_area(4)
# print a
# def void_function():
# print 2
#
#
# def fruitful_function():
# return 2
#
# my_variable = void_function()
# print my_variable
#
# my_variable = fruitful_function()
# print my_variable
# def square(x):
# result=x*x
# return result
#
#
# print square(5)*square(5)
#
# def division(a,b):
# result = a/b
# return result
# print division(10,3)
#
# def remainder(x,y):
# result = x%y
# return result
# print remainder(10,3)
# def another_func(c,d):
# print remainder(10,3)
# if
# else:
# print 'the number cannot be divided without remainder'
# print another_func(10,3)
# def fahrenheit():
# T_in_fahrenheit=(T_in_celsius*9/5)+32
#
# fahrenheit(20)
#
# import math
# def area(radius):
# A=math.pi*radius**2
# return A
#
# def distance_and_area(x1,y1,x2,y2):
# radius = ((x1-x2)**2 + (y1-y2)**2)**1/2
# print area(radius)
# return radius
#
# print distance_and_area(8,7,4,4)
#
# # def world_hist(word):
# # x= len(word)
# # print x*'*'
# # world_hist('the'
#
#
# print 'a' + 'b'
#
# print 'a'*3
#
# def my_function():
#
# A=10
# p=0.25
# n=1.25
# s=0.75
#
# print int(A - (5*p + 2*n)) / s
#
#
# my_function()
# def aritseq(a1,d ,n):
# an= a1 + (n-1)*d
# print an
# aritseq(3,2,99)
#
#
# def favourites(x,y,z):
# print "I like " + x
# print "I like " + y
# print "I like " + z
# favourites("orange","berry","kiwi")
# def time(h,t,s):
#
# m=h*60
# t=m*60
#
# print m+t+s
#
# time(h,t,s)
#
# gizemsays = raw_input('esrarinda etkisiyle isiklar parlar:')
# if gizemsays == 'cekilir nefesler' :
# print 'karanlikta yolum yonum yildizlar ve sesler'
# gizemsays = raw_input('ay gunesten daha guzel:')
# if gizemsays == 'geceler geceler' :
# print 'kafamin pesindeyken yasarim neyim neysem'
# def right_justify(s):
# print (' '* (70-len(s))+s)
#
# right_justify('allen')
# fruit = 'banana'
# letter = fruit[0]
# print letter
# #
#
# vegetable = 'tomato'
# letter = vegetable[1]
# print letter
# count = 0
# while (count < 9):
# print 'the count is:' , count
# count = count + 1
# print 'bye bitch!'
#
# for i in range(3,6):
# print i
# ntainsLetter(word,letter):
# return letter in word
# containsLetter('apple','p')
# def co
```
#### File: jemmypotter/Python/slides.py
```python
def quest(str_1,str_2):
for char in str_1 and str_2:
if char not in str_2:
print char
return True
if char not in str_1:
return False
print char
quest('hello','world')
```
|
{
"source": "JemmyYu/DataMining-CRF",
"score": 2
}
|
#### File: JemmyYu/DataMining-CRF/crf_data.py
```python
import re
import numpy as np
# Loss rather than supplement
def norm(seq, x2id, max_len=None):
ids = [x2id.get(x, 0) for x in seq]
if max_len is None:
return ids
if len(ids) >= max_len:
ids = ids[: max_len]
return ids
ids.extend([0] * (max_len - len(ids)))
return ids
# Log softmax
def log_softmax(vec):
max_val = np.max(vec)
log_sum_exp = np.log(np.sum(np.exp(vec - max_val)))
for i in range(np.size(vec)):
vec[i] -= log_sum_exp + max_val
return vec
class CrfData(object):
def __init__(self, train, test=None, testr=None):
self.train = train
self.data = []
self.tag = []
self.word2id = None
self.id2word = None
self.tag2id = {'': 0,
'B_ns': 1,
'I_ns': 2,
'B_nr': 3,
'I_nr': 4,
'B_nt': 5,
'I_nt': 6,
'o': 7}
self.id2tag = {v: k for k, v in self.tag2id.items()}
self.read_train_data()
self.map_word_id()
def read_train_data(self):
i = 0
with open(self.train, "r", encoding="utf-8-sig") as file:
for line in file.readlines():
if i >= 1000: break
else: i += 1
line = line.strip().split()
seq = ''
if len(line) != 0:
for word in line:
word = word.split('/')
if word[1] == 'o':
seq += ''.join([char + "/o " for char in word[0]])
else:
seq += ''.join([word[0][0] + "/B_" + word[1] + ' '] +
[char + "/I_" + word[1] + ' ' for char in word[0][1:]])
line = re.split('[,。;!:?、‘’”“]/[o]', seq.strip())
for subSeq in line:
subSeq = subSeq.strip().split()
if len(subSeq):
subData = []
subtag = []
noEntity = True
for word in subSeq:
word = word.split('/')
subData.append(word[0])
subtag.append(word[1])
noEntity &= (word[1] == 'o')
if not noEntity:
self.data.append(subData)
self.tag.append(subtag)
def map_word_id(self):
wordbag = sum(self.data, [])
wordset = set(wordbag)
wordict = {k: 0 for k in wordset}
for k in wordbag:
wordict[k] += 1
wordlst = sorted(wordict.items(), key=lambda x: x[1], reverse=True)
wordset = [x for x, _ in wordlst]
idset = range(1, len(wordset) + 1)
self.word2id = {k: v for k, v in zip(wordset, idset)}
self.id2word = {k: v for k, v in zip(idset, wordset)}
def get_train_data(self, max_len):
train_x = []
train_y = []
for data, tag in zip(self.data, self.tag):
train_x.append(norm(data, self.word2id, max_len))
train_y.append(norm(tag, self.tag2id, max_len))
return list(filter(None, train_x)), list(filter(None, train_y))
def log_likelihood(self):
row = len(self.word2id) + 1
col = len(self.tag2id)
loglikelihoods = np.zeros((row, col), dtype="float32")
wordbag = norm(sum(self.data, []), self.word2id)
tagbag = norm(sum(self.tag, []), self.tag2id)
for word, tag in zip(wordbag, tagbag):
loglikelihoods[word, tag] += 1
for j in range(col):
log_softmax(loglikelihoods[:, j])
return loglikelihoods
```
|
{
"source": "Jemoka/gregarious",
"score": 2
}
|
#### File: backend_old/embedding/engines.py
```python
import tensorflow as tf
import numpy as np
import keras
from keras.utils import to_categorical
from keras.models import Model
from keras.layers import Input, Dense, Embedding, Flatten, Add, LSTM, Masking, Concatenate, Conv1D, MaxPooling1D
from keras.optimizers import RMSprop, Adam, SGD, Adagrad
from keras import backend as K
class SemanticEmbedEngine(object):
def __init__(self):
self.trainer = None
self.embedder_a = None
self.embedder_b = None
@classmethod
def create(cls, embedSize, vocabSize, paddedSentSize, recurrentSize=None):
if not recurrentSize:
recurrentSize = embedSize
sentenceAInput = Input(shape=(paddedSentSize, vocabSize))
# maskA = Masking(mask_value=0.0)(sentenceAInput)
sentenceBInput = Input(shape=(paddedSentSize, vocabSize))
# maskB = Masking(mask_value=0.0)(sentenceBInput)
normal = keras.initializers.glorot_normal()
conv_A_a = Conv1D(recurrentSize, 5)
conv_A_a_built = conv_A_a(sentenceAInput)
conv_A_b = Conv1D(recurrentSize, 5)
conv_A_b_built = conv_A_b(conv_A_a_built)
conv_A_c = Conv1D(recurrentSize, 5)
conv_A_c_built = conv_A_c(MaxPooling1D()(conv_A_b_built))
# conv_A_flat = Flatten()(conv_A_c_built)
dense_A_a = Dense(embedSize, kernel_initializer=normal, activation="relu")
dense_A_a_built = dense_A_a(conv_A_c_built)
dense_A_b = Dense(embedSize, kernel_initializer=normal, activation="relu")
dense_A_b_built = dense_A_b(dense_A_a_built)
sentenceAEmbedded = Dense(embedSize, kernel_initializer=normal, activation="relu")
sentenceAEmbedded_built = sentenceAEmbedded(dense_A_b_built)
conv_B_a = Conv1D(recurrentSize, 5)
conv_B_a_built = conv_B_a(sentenceBInput)
conv_B_b = Conv1D(recurrentSize, 5)
conv_B_b_built = conv_B_b(conv_B_a_built)
conv_B_c = Conv1D(recurrentSize, 5)
conv_B_c_built = conv_B_c(conv_B_b_built)
# conv_B_flat = Flatten()(conv_B_c_built)
dense_B_a = Dense(embedSize, kernel_initializer=normal, activation="relu")
dense_B_a_built = dense_B_a(conv_B_c_built)
dense_B_b = Dense(embedSize, kernel_initializer=normal, activation="relu")
dense_B_b_built = dense_B_b(dense_B_a_built)
sentenceBEmbedded = Dense(embedSize, kernel_initializer=normal, activation="relu")
sentenceBEmbedded_built = sentenceBEmbedded(dense_B_b_built)
# Combining/Output
adder = Concatenate(axis=1)
added = adder([sentenceAEmbedded_built, sentenceBEmbedded_built])
recurrentA = LSTM(recurrentSize*2, return_sequences=True)
recurrentA_built = recurrentA(added)
recurrentB = LSTM(recurrentSize*2)
recurrentB_built = recurrentB(recurrentA_built)
combineEmbedded = Dense(embedSize, kernel_initializer=normal, activation="relu")
combineEmbedded_built = combineEmbedded(recurrentB_built)
score = Dense(1, kernel_initializer=normal, activation="relu")
score_built = score(combineEmbedded_built)
trainer = Model(inputs=[sentenceAInput, sentenceBInput], outputs=score_built)
optimizer = Adam(lr=4e-4)
trainer.compile(optimizer, 'mae')
sentenceAEmbedder = Model(inputs=sentenceAInput, outputs=sentenceAEmbedded_built)
sentenceBEmbedder = Model(inputs=sentenceBInput, outputs=sentenceBEmbedded_built)
engine = cls()
engine.trainer = trainer
engine.embedder_a = sentenceAEmbedder
engine.embedder_b = sentenceBEmbedder
return engine
def fit(self, sentenceVectors_a, sentenceVectors_b, similarities, batch_size=10, epochs=10, validation_split=0.0):
self.trainer.fit(x=[sentenceVectors_a, sentenceVectors_b], y=[similarities], batch_size=batch_size, epochs=epochs, validation_split=validation_split)
def predict_diff(self, sentenceVectors_a, sentenceVectors_b):
return self.trainer.predict([sentenceVectors_a, sentenceVectors_b])
def encode(self, sentenceVector, flavor="a"):
if flavor == "a":
return self.embedder_a.predict([sentenceVector])
elif flavor == "b":
return self.embedder_b.predict([sentenceVector])
else:
raise Exception(str(flavor)+" is not a valid flavor. Choose between a or b.")
```
|
{
"source": "Jemoka/Sort",
"score": 4
}
|
#### File: sort/sorters/shell.py
```python
from .sorter import Sorter
class ShellSorter(Sorter):
"""
(factor of 3) Shell Sort
-=-=-=-=-=-=-=
A much much much much much much faster algorithm that
not only takes the advantage of insertion sort, but
actually augments its advantage. It pre-sorts the data
into bigger chunks, then fine-tuning it.
Implemented in Java @pg 259 as Algorithm 2.2.
Publicly Callable Methods
================
load(data:list)->list Loads an array into the sorter's memory
sort()->list Sorts and returns loaded data, empties the sorter's memory
Class Callable (Unexposed) Methods
================
_swap(indxA, indxB) -> None Swaps two indexes of the array loaded
_check() -> bool Checks if array in memory is sorted
"""
def sort(self) -> list:
"""Sorts loaded data from least to most, and return sorted array"""
super().sort()
h = 1
while h < len(self.data)/3:
h = int(3*h)
# This code is not implemented correctly.
# Although it sorts, it does not do so in
# the expected speed.
# Please do not use this as your reference!
# If you know how to fix it, pull request'em.
while h >= 1:
for pointerIndx in range(1, len(self.data), h):
for checkIndx in reversed(range(0, pointerIndx, h)):
try:
if self.data[checkIndx] > self.data[checkIndx+h]:
self._swap(checkIndx, checkIndx+h)
elif self.data[checkIndx] <= self.data[checkIndx+h]:
break
except IndexError:
break
h = int(h/3)
# Check the sorting
assert self._check(), "This algorithm has done goof!"
# Return sort
return self.data
```
|
{
"source": "JEmonet67/objmpp-organoid-classification",
"score": 3
}
|
#### File: lib/objects/ObjetMpp.py
```python
class ObjetMpp:
def __init__(self,center,major_axis):
self.center = center
self.major_axis = int(round(major_axis))
def create_window(self,img,distmap):
bord_gauche = self.center.x - self.major_axis
bord_droit = self.center.x + self.major_axis
bord_haut = self.center.y - self.major_axis
bord_bas = self.center.y + self.major_axis
if bord_gauche < 0:
bord_gauche = 0
if bord_haut < 0:
bord_haut = 0
if bord_droit > distmap.shape[0]-1:
bord_droit = distmap.shape[0]-1
if bord_bas > distmap.shape[1]-1:
bord_bas = distmap.shape[1]-1
self.coordinates = [bord_gauche,bord_droit,bord_haut,bord_bas]
self.local_distmap = distmap[self.coordinates[2]:self.coordinates[3],
self.coordinates[0]:self.coordinates[1]]
self.local_img = img.matrix_img[self.coordinates[2]:self.coordinates[3],
self.coordinates[0]:self.coordinates[1]]
```
#### File: lib/segmentation/traitements_objet.py
```python
import numpy as np
import imageio as io
import matplotlib.pyplot as plt
import pandas as pd
import cv2 as cv
#Code pour binariser, éroder et fusionner les map des distances des objets.
def Erode_ellipses(list_distmap,path_file=False):
dim = list_distmap[0].shape
All_ell_erod = np.zeros([dim[0],dim[1]])
for distmap_ell in list_distmap:
distmap_ell_norm = cv.normalize(distmap_ell, np.zeros(distmap_ell.shape),0,1,cv.NORM_MINMAX)
distmap_ell_norm[distmap_ell_norm < 0.7] = 0
distmap_ell_norm[distmap_ell_norm != 0] = 1
All_ell_erod += distmap_ell_norm
if path_file != False:
All_ell_erod_norm = cv.normalize(All_ell_erod, np.zeros(All_ell_erod.shape),0, 255, cv.NORM_MINMAX)
io.imwrite(f"{path_file}/All_Ellipses_érodées.png",np.uint8(All_ell_erod_norm))
return All_ell_erod
# path_file_t = "/home/jerome/Stage_Classif_Organoid/Result_MPP/Organoïd/Images_KO/local_map_UBTD1-03_w24-DAPI_TIF_2020y06m09d14h48m55s317l"
# Erode_ellipses(path_file_t)
#Code pour binariser une image des régions.
def Binaryze_ellipses(path_regions):
All_ell = np.copy(plt.imread(path_regions))
All_ell[All_ell != 0] = 1
#io.imwrite(f"{path_output}/All_Ellipses.png",img_as_ubyte(reg))
return All_ell
def Dilate_ellipses(list_ells_sep):
dim = list_ells_sep[0].shape
all_ells_dilated = np.zeros([dim[0],dim[1]])
kernel = np.ones((5,5), np.uint8)
for ells in list_ells_sep:
all_ells_dilated = all_ells_dilated + cv.dilate(ells,kernel, iterations=4)
all_ells_dilated[all_ells_dilated!=0] = 255
all_ells_dilated = cv.normalize(all_ells_dilated, np.zeros(all_ells_dilated.shape),0, 1, cv.NORM_MINMAX)
all_ells_dilated = np.float32(all_ells_dilated)
return all_ells_dilated
#Code pour séparer chaque ellipses à partir d'une image des régions.
def Separate_ellipses(img_all_regions):
list_region_sep = []
list_objects = [objets for objets in np.unique(img_all_regions) if objets!=0]
for value_obj in list_objects:
img_region = np.copy(img_all_regions)
img_region[img_region != value_obj] = 0
img_region[img_region == value_obj] = 255
list_region_sep += [img_region]
# io.imwrite(f"{path_output}/Ellipse_{n_ell}.png", img_region)
# np.save(f"{path_output}/Ellipse_{n_ell}.npy", img_region)
return list_region_sep
def Separate_ells_watershed(img,regions, path_csv,path_output=False):
df_marks = pd.read_csv(path_csv)
list_center_y = df_marks["Center Col"]
list_center_x = df_marks["Center Row"]
for n_ell in range(1,len(list_center_x)+1):
reg = np.copy(regions)
center_y = list_center_y[n_ell-1]
center_x = list_center_x[n_ell-1]
value_center = reg[(center_x,center_y)]
reg[reg != value_center] = 0
reg[reg == value_center] = 255
img.list_reg_watersh += [reg]
if path_output!=False:
io.imwrite(f"{path_output}/Ellipse_{n_ell}.png", reg)
np.save(f"{path_output}/Ellipse_{n_ell}.npy", reg)
return img
# path_output_t = "/home/jerome/Stage_Classif_Organoid/Result_MPP/Organoïd/Images_KO/local_map_UBTD1-03_w24-DAPI_TIF_2020y06m09d14h48m55s317l/Test_Results/Img_marker_watershed_segmentation"
# path_csv_t = "/home/jerome/Stage_Classif_Organoid/Result_MPP/Organoïd/Images_KO/UBTD1-03_w24-DAPI_TIF-marks-2020y06m09d14h48m55s317l.csv"
# path_regions_t = "/home/jerome/Stage_Classif_Organoid/Result_MPP/Organoïd/Images_KO/local_map_UBTD1-03_w24-DAPI_TIF_2020y06m09d14h48m55s317l/Test_Results/Labels_méthode_2.png"
# Separate_ellipses(path_output_t,path_regions_t,path_csv_t)
#Code pour créer une map de distance à partir d'un objet.
def Distance_map(list_obj,path_file=False):
n_obj = 1
list_distmap = []
for obj in list_obj:
obj_norm = cv.normalize(obj, np.zeros(obj.shape),0, 255, cv.NORM_MINMAX)
obj_int8 = np.uint8(obj_norm)
dm_obj = cv.distanceTransform(obj_int8, cv.DIST_L2, 3)
dm_obj_norm = cv.normalize(dm_obj, np.zeros(dm_obj.shape),0, 255, cv.NORM_MINMAX)
dm_obj_int8 = np.uint8(dm_obj_norm)
list_distmap += [dm_obj_int8]
if path_file != False:
io.imwrite(f"{path_file}/local_map_watersh_{n_obj}.png",dm_obj_int8)
np.save(f"{path_file}/local_map_watersh_{n_obj}.npy",dm_obj_int8)
n_obj += 1
return list_distmap
```
|
{
"source": "jemorgan1000/ML_Final_Project",
"score": 3
}
|
#### File: jemorgan1000/ML_Final_Project/preprocessing.py
```python
import geopandas as gpd
import os.path as osp
import os
import pandas as pd
def get_stops(stop_path):
"""
Loads all the stops
:param stop_path: path to the shape file
:return: return gpd.GeoDataFrame
"""
stops = pd.read_csv(stop_path)
stops = gpd.GeoDataFrame(stops,geometry=gpd.points_from_xy(stops.stop_lon,stops.stop_lat))
return stops
def get_blocks(shape_path):
"""
This function loads the shape file containing the shapes
:param shape_path: path to the shape file
:return: gpd.GeoDataFrame
"""
blocks = gpd.read_file(shape_path)
return blocks
def get_trips(trips_path):
"""
Loads and cleans the file containing all of the rider files.
:param trips_path: path to the trips file
"""
trips = pd.read_csv(trips_path)
return trips
def add_commute_time(trips_df):
"""
Adds commute times to each of the trips
:param trips_df: pd.DataFrame
:return: returns Dataframe
"""
trips_df.loc[:,'start_time'] = pd.to_datetime(trips_df.start_time)
trips_df.loc[:,'end_time'] = pd.to_datetime(trips_df.end_time)
trips_df.insert(len(trips_df.columns),
"MORNING",
((trips_df.start_time.dt.hour >= 5) & (trips_df.start_time.dt.hour <= 10)) * 1)
trips_df.insert(len(trips_df.columns),
'AFTERNOON',
((trips_df.start_time.dt.hour >= 16) & (trips_df.start_time.dt.hour < 20)) * 1)
return trips_df
def create_UID(combined):
"""
:param combined: pd.DataFrame with combined data
:return: new data_frame with a UID for each Block
"""
UID = combined.apply(lambda x: (x.COUNTYFP10, x.TRACTCE10,x.BLOCKCE10),axis=1)
combined.insert(len(combined.columns),'UID',UID)
return combined
def preprocess_combo(combined):
"""
The number of stops
:param combined: gpd.GeoDataFrame like object
:return: the number of stops within a given census block
"""
stops_rides = combined.groupby("UID")['stop_id'].size()
stops_rides = stops_rides.reset_index()
stops_rides.rename({0:"STOPS"},axis=1,inplace=True)
rail_rides = combined.groupby("UID")["RAIL_STOP"].sum()
rail_rides = rail_rides.reset_index()
rail_rides.rename({0:"STOPS"},axis=1,inplace=True)
trips = combined.groupby("UID")['TRIPS'].sum()
trips = trips.reset_index()
stops_rides = stops_rides.merge(trips,on='UID')
stops_rides = stops_rides.merge(rail_rides,on='UID')
return stops_rides
def join_stops(blocks, stops):
"""
This function takes all of the blocks and
:param blocks: gpd.GeoDataFrame object containing all the shape files
:param stops: gpd.GeoDataFrame object containing all the stops files
"""
combined = gpd.sjoin(blocks,stops)
return combined
def join_trips(combined, trips):
"""
This function combines the whole thing an
"""
return combined.merge(trips,on='stop_id')
def preprocess_trips(trips):
"""
This function filters trips for our desired ones, without errors and in the morning
:param trips: pd.DataFrame
:return: data
"""
trips = trips[trips.error_bool == 0]
trips = add_commute_time(trips)
trips = trips[trips.MORNING == 1]
num_rides = trips.groupby('start_stop')['trip_id'].size()
stop_morn_rides = num_rides.reset_index()
stop_morn_rides.rename(columns = {'start_stop':'stop_id','trip_id':'TRIPS'},inplace=True)
return stop_morn_rides
def output_data(path, df):
"""
This file removes unwanted columns and outputs the final dataset
:param path:
:param df:
:return:
"""
df.to_csv(path,index=False)
def get_duration(path):
"""
loads the duration file
:param path: osp.path object
:return: pd.DataFrame containing travel times
"""
dur_df = pd.read_csv(path)
return dur_df
def make_dur_nums(dur_df):
"""
:param dur_df:
:return:
"""
pass
def main():
# setting up the path information
dir_path = os.getcwd()
data_path = osp.abspath(osp.join(dir_path,"Data/"))
stop_path = osp.join(data_path, "stops.csv")
shape_path = osp.join(data_path,'census.dbf')
trips_path = osp.join(data_path,'odx/trips.csv')
out_path = osp.join(data_path,'preprocessed_data2.csv')
#
stops = get_stops(stop_path)
blocks = get_blocks(shape_path)
combined = join_stops(blocks,stops)
trips = get_trips(trips_path)
num_rides = preprocess_trips(trips)
combined = join_trips(combined,num_rides)
combined = create_UID(combined)
final_df = preprocess_combo(combined)
#print(final_df.head())
output_data(out_path, final_df)
if __name__ == '__main__':
main()
```
|
{
"source": "jemorrison/jwst_reffiles",
"score": 3
}
|
#### File: jwst_reffiles/bad_pixel_mask/mkref_bad_pixel_mask.py
```python
import argparse
import copy
import os
import re
import sys
import types
from jwst_reffiles.plugin_wrapper import mkrefclass_template
from jwst_reffiles.bad_pixel_mask import bad_pixel_mask as bpm
from jwst_reffiles.utils.constants import RATE_FILE_SUFFIXES
from jwst_reffiles.utils.definitions import PIPE_STEPS
class mkrefclass(mkrefclass_template):
def __init__(self, *args, **kwargs):
mkrefclass_template.__init__(self, *args, **kwargs)
# Set the reflabel as the name of the imported module
self.reflabel = 'bad_pixel_mask'
# Set the reftype
self.reftype = 'bpm'
def extra_optional_arguments(self, parser):
"""Any arguments added here will give the option of overriding
the default argument values present in the config file. To override,
call these arguments from the command line in the call to mkrefs.py
"""
parser.add_argument('--dead_search', help='Whether or not to search for DEAD pixels')
parser.add_argument('--low_qe_and_open_search', help=('Whether or not to search for LOW_QE, OPEN, '
'and ADJ_OPEN pixels'))
parser.add_argument('--dead_search_type', help=('Type of search to use when looking for dead pixels. '
'Options are: sigma_rate, absolute_rate, and '
'zero_signal'))
parser.add_argument('--flat_mean_sigma_threshold', help=('Number of standard deviations to use when sigma-'
'clipping to calculate the mean slope image or the mean '
'across the detector'))
parser.add_argument('--flat_mean_normalization_method', help=('Specify how the mean image is normalized prior '
'to searching for bad pixels.'))
parser.add_argument('--smoothing_box_width', help=('Width in pixels of the box kernel to use to '
'compute the smoothed mean image'))
parser.add_argument('--smoothing_type', help='Type of smoothing to do ``Box2D `` or ``median`` filtering')
parser.add_argument('--dead_sigma_threshold', help=('Number of standard deviations below the mean at '
'which a pixel is considered dead.'))
parser.add_argument('--max_dead_norm_signal', help=('Maximum normalized signal rate of a pixel that is '
'considered dead'))
parser.add_argument('--run_dead_flux_check', help=('Whether or not to check for dead pixels using an absolute flux value'))
parser.add_argument('--dead_flux_check_files', nargs='+', help=('List of ramp (uncalibrated) files to use to check the '
'flux of average of last 4 groups. If None then the '
'ramp files are not read in and no flux_check is done.'))
parser.add_argument('--flux_check', type=int, help=('Tolerance on average signal in last 4 groups. If dead_flux_check is '
'a list of uncalibrated files, then the average of the last four groups '
'for all the integrations is determined. If this average > flux_check '
'then this pixel is not a dead pixel.'))
parser.add_argument('--max_low_qe_norm_signal', help=('The maximum normalized signal a pixel can have '
'and be considered low QE.'))
parser.add_argument('--max_open_adj_norm_signal', help=('The maximum normalized signal a pixel '
'adjacent to a low QE pixel can have in order '
'for the low QE pixel to be reclassified as '
'OPEN'))
parser.add_argument('--manual_flag_file', help=(('Name of file containing list of pixels to be added manually')))
parser.add_argument('--flat_do_not_use', help=('List of bad pixel types where the DO_NOT_USE flag should '
'also be applied (e.g. "["DEAD", "LOW_QE"]")'))
parser.add_argument('--dark_stdev_clipping_sigma', help=('Number of sigma to use when sigma-clipping the 2D array of '
'standard deviation values.'))
parser.add_argument('--dark_max_clipping_iters', type=int, help=('Maximum number of iterations to use when sigma '
'clipping to find the mean and standard deviation '
'values used when locating noisy pixels.'))
parser.add_argument('--dark_noisy_threshold', help=('Number of sigma above the mean noise (associated with the slope) '
'to use as a threshold for identifying noisy pixels.'))
parser.add_argument('--max_saturated_fraction', help=('Fraction of integrations within which a pixel must be fully '
'saturated before flagging it as HOT.'))
parser.add_argument('--max_jump_limit', type=int, help=('Maximum number of jumps a pixel can have in an integration '
'before it is flagged as a ``high jump`` pixel (which may be '
'flagged as noisy later).'))
parser.add_argument('--jump_ratio_threshold', help=('Cutoff for the ratio of jumps early in the ramp to jumps later in '
'the ramp. Pixels with a ratio greater than this value (and which '
'also have a high total number of jumps) will be flagged as potential '
'(I)RC pixels.'))
parser.add_argument('--early_cutoff_fraction', help=('Fraction of the integration to use when comparing the jump rate '
'early in the integration to that across the entire integration. '
'Must be <= 0.5'))
parser.add_argument('--pedestal_sigma_threshold', help=('Used when searching for RC pixels via the pedestal image. Pixels '
'with pedestal values more than ``pedestal_sigma_threshold`` above '
'the mean are flagged as potential RC pixels.'))
parser.add_argument('--rc_fraction_threshold', help=('Fraction of input files within which the pixel must be identified as '
'an RC pixel before it will be flagged as a permanent RC pixel.'))
parser.add_argument('--low_pedestal_fraction', help=('Fraction of input files within which a pixel must be identified as '
'a low pedestal pixel before it will be flagged as a permanent low '
'pedestal pixel.'))
parser.add_argument('--high_cr_fraction', help=('Fraction of input files within which a pixel must be flagged as having a '
'high number of jumps before it will be flagged as permanently noisy.'))
parser.add_argument('--flag_values', help=('Dictionary mapping the types of bad pixels searched for to the flag mnemonics '
'to use when creating the bad pixel file. Keys are the types of bad pixels searched '
'for, and values are lists that include mnemonics recognized by the jwst calibration '
'pipeline e.g. {"hot": ["HOT"], "rc": ["RC"], "low_pedestal": ["OTHER_BAD_PIXEL"], "high_cr": ["TELEGRAPH"]}'))
parser.add_argument('--dark_do_not_use', help=('List of bad pixel types to be flagged as DO_NOT_USE e.g. ["hot", "rc", "low_pedestal", "high_cr"]'))
parser.add_argument('--plot', help=('If True, produce and save intermediate results from noisy pixel search'))
parser.add_argument('--output_file', help=('Name of the CRDS-formatted bad pixel reference file to save the final bad pixel map into'))
parser.add_argument('--author', help=('CRDS-required name of the reference file author, to be placed '
'in the referece file header'))
parser.add_argument('--description', help=('CRDS-required description of the reference file, to be '
'placed in the reference file header'))
parser.add_argument('--pedigree', help=('CRDS-required pedigree of the data used to create the '
'reference file'))
parser.add_argument('--useafter', help=('CRDS-required date of earliest data with which this reference '
'file should be used. (e.g. "2019-04-01 00:00:00"'))
parser.add_argument('--history', help='Text to be placed in the HISTORY keyword of the output reference file')
parser.add_argument('--quality_check', help=("If True, the pipeline is run using the output reference "
"file to be sure the pipeline doens't crash"))
return(0)
def callalgorithm(self):
"""Call the bpm algorithm. The only requirement is that the output
reference file is saved as self.args.outputreffilename
mkrefs.py will supply the input files in self.inputimages['output_name'].
This will be a list containing the filenames to use as input. The
file types (e.g. dark, flat) associated with each filename are
contained in self.inputimages['imtype']. From this, you can specify
the appropriate file names in the call to your module.
"""
# Organize the input files into a group of darks and a group of
# flats
flatfiles = []
darkfiles = []
for row in self.inputimagestable:
if row['imlabel'] == 'D':
darkfiles.append(row['fitsfile'])
elif row['imlabel'] == 'F':
flatfiles.append(row['fitsfile'])
# Since this module is called after calib_prep, all of the requested
# outputs from the various pipeline steps should be present in the
# output directory. Create lists of these files. The files listed in
# self.inputimagestable['fitsfile'] should all be the same in terms
# of their calibration state. So we should only have to check one in
# order to know what state all of them are.
dark_slope_files = []
dark_uncal_files = []
dark_jump_files = []
dark_fitopt_files = []
directory, filename = os.path.split(darkfiles[0])
# Get the suffix of the input file so we know the calibration state
suffix = None
for ramp_suffix in RATE_FILE_SUFFIXES:
if ramp_suffix in filename:
dark_slope_files = copy.deepcopy(darkfiles)
suffix = ramp_suffix
if suffix is None:
suffix = filename.split('_')[-1]
suffix = suffix.replace('.fits', '')
if suffix == 'uncal':
dark_uncal_files = copy.deepcopy(darkfiles)
elif suffix == 'jump':
dark_jump_files = copy.deepcopy(darkfiles)
else:
raise ValueError('Unexpected suffixes for input dark files.')
# Create lists of the needed calibration state files
if len(dark_slope_files) > 0:
dark_uncal_files = [elem.replace(suffix, '_uncal') for elem in dark_slope_files]
dark_jump_files = [elem.replace(suffix, '_jump') for elem in dark_slope_files]
dark_fitopt_files = [elem.replace(suffix, '_fitopt') for elem in dark_slope_files]
elif len(dark_uncal_files) > 0:
dark_slope_files = [elem.replace(suffix, '_1_ramp_fit') for elem in dark_uncal_files]
dark_jump_files = [elem.replace(suffix, '_jump') for elem in dark_uncal_files]
dark_fitopt_files = [elem.replace(suffix, '_fitopt') for elem in dark_uncal_files]
elif len(dark_jump_files) > 0:
dark_uncal_files = [elem.replace(suffix, '_uncal') for elem in dark_jump_files]
dark_slope_files = [elem.replace(suffix, '_1_ramp_fit') for elem in dark_jump_files]
dark_fitopt_files = [elem.replace(suffix, '_fitopt') for elem in dark_jump_files]
# Repeat for flat field files
flat_slope_files = []
flat_uncal_files = []
directory, filename = os.path.split(flatfiles[0])
# Get the suffix of the input file so we know the calibration state
suffix = None
for ramp_suffix in RATE_FILE_SUFFIXES:
if ramp_suffix in filename:
flat_slope_files = copy.deepcopy(flatfiles)
suffix = ramp_suffix
if suffix is None:
suffix = filename.split('_')[-1]
suffix = suffix.replace('.fits', '')
if suffix == 'uncal':
flat_uncal_files = copy.deepcopy(flatfiles)
else:
raise ValueError('Unexpected suffixes for input flat field files.')
# Create lists of the needed calibration state files
if len(flat_slope_files) > 0:
flat_uncal_files = [elem.replace(suffix, '_uncal') for elem in flat_slope_files]
elif len(flat_uncal_files) > 0:
flat_slope_files = [elem.replace(suffix, '_1_ramp_fit') for elem in flat_uncal_files]
# The bad pixel mask module needs to use the file with the individual
# slopes (_1_ramp_fit.fits), rather than the mean slope (_0_ramp_fit.fits),
# for exposures with more than one integration per exposure. But for
# exposures with only one integration, only the _0_ramp_fit file will be
# produced. So go through the lists of slope files and check to see
# which versions are present, and adjust the lists accordingly.
# Call the wrapped module and provide the proper arguments from the
# self.parameters dictionary.
bpm.bad_pixels(flat_slope_files=flat_slope_files,
dead_search=self.parameters['dead_search'],
low_qe_and_open_search=self.parameters['low_qe_and_open_search'],
dead_search_type=self.parameters['dead_search_type'],
flat_mean_sigma_threshold=self.parameters['flat_mean_sigma_threshold'],
flat_mean_normalization_method=self.parameters['flat_mean_normalization_method'],
smoothing_box_width=self.parameters['smoothing_box_width'],
smoothing_type=self.parameters['smoothing_type'],
dead_sigma_threshold=self.parameters['dead_sigma_threshold'],
max_dead_norm_signal=self.parameters['max_dead_norm_signal'],
run_dead_flux_check=self.parameters['run_dead_flux_check'],
dead_flux_check_files=flat_uncal_files,
flux_check=self.parameters['flux_check'],
max_low_qe_norm_signal=self.parameters['max_low_qe_norm_signal'],
max_open_adj_norm_signal=self.parameters['max_open_adj_norm_signal'],
manual_flag_file=self.parameters['manual_flag_file'],
flat_do_not_use=self.parameters['flat_do_not_use'],
dark_slope_files=dark_slope_files,
dark_uncal_files=dark_uncal_files,
dark_jump_files=dark_jump_files,
dark_fitopt_files=dark_fitopt_files,
dark_stdev_clipping_sigma=self.parameters['dark_stdev_clipping_sigma'],
dark_max_clipping_iters=self.parameters['dark_max_clipping_iters'],
dark_noisy_threshold=self.parameters['dark_noisy_threshold'],
max_saturated_fraction=self.parameters['max_saturated_fraction'],
max_jump_limit=self.parameters['max_jump_limit'],
jump_ratio_threshold=self.parameters['jump_ratio_threshold'],
early_cutoff_fraction=self.parameters['early_cutoff_fraction'],
pedestal_sigma_threshold=self.parameters['pedestal_sigma_threshold'],
rc_fraction_threshold=self.parameters['rc_fraction_threshold'],
low_pedestal_fraction=self.parameters['low_pedestal_fraction'],
high_cr_fraction=self.parameters['high_cr_fraction'],
flag_values=self.parameters['flag_values'],
dark_do_not_use=self.parameters['dark_do_not_use'],
plot=self.parameters['plot'],
output_file=self.args.outputreffilename,
author=self.parameters['author'],
description=self.parameters['description'],
pedigree=self.parameters['pedigree'],
useafter=self.parameters['useafter'],
history=self.parameters['history'],
quality_check=self.parameters['quality_check'])
return(0)
if __name__ == '__main__':
"""This should not need to be changed. This will read in the config
files, import the script, generate the self.parameters dictionary, and
run the argument parser above.
"""
mkref = mkrefclass()
mkref.make_reference_file()
```
|
{
"source": "jemorrison/prospector",
"score": 2
}
|
#### File: prospect/io/write_results.py
```python
import os, time, warnings
import pickle, json, base64
import numpy as np
try:
import h5py
_has_h5py_ = True
except(ImportError):
_has_h5py_ = False
__all__ = ["githash", "write_pickles", "write_hdf5",
"chain_to_struct"]
unserial = json.dumps('Unserializable')
def pick(obj):
"""create a serialized object that can go into hdf5 in py2 and py3, and can be read by both
"""
return np.void(pickle.dumps(obj, 0))
#def run_command(cmd):
# """Open a child process, and return its exit status and stdout.
# """
# import subprocess
# child = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE,
# stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# out = [s for s in child.stdout]
# w = child.wait()
# return os.WEXITSTATUS(w), out
def githash(**extras):
"""Pull out the git hash history for Prospector here.
"""
try:
from .._version import __version__, __githash__
bgh = __version__, __githash__
except(ImportError):
warnings.warn("Could not obtain prospector version info", RuntimeWarning)
bgh = "Can't get version number."
return bgh
def paramfile_string(param_file=None, **extras):
try:
with open(param_file, "r") as pfile:
pstr = pfile.read()
except:
warnings.warn("Could not store paramfile text", RuntimeWarning)
pstr = ''
return pstr
def write_hdf5(hfile, run_params, model, obs, sampler, optimize_result_list,
tsample=0.0, toptimize=0.0, sampling_initial_center=[],
**extras):
"""Write output and information to an HDF5 file object (or
group).
"""
try:
# If ``hfile`` is not a file object, assume it is a filename and open
hf = h5py.File(hfile, "a")
except(AttributeError,TypeError):
hf = hfile
except(NameError):
warnings.warn("HDF5 file could not be opened, as h5py could not be imported.")
return
# ----------------------
# Sampling info
try:
# emcee
a = sampler.acceptance_fraction
write_emcee_h5(hf, sampler, model, sampling_initial_center, tsample)
except(AttributeError):
# dynesty or nestle
if sampler is None:
sdat = hf.create_group('sampling')
elif 'eff' in sampler:
write_dynesty_h5(hf, sampler, model, tsample)
else:
write_nestle_h5(hf, sampler, model, tsample)
# -----------------
# Optimizer info
if optimize_result_list is not None:
out = optresultlist_to_ndarray(optimize_result_list)
mdat = hf.create_dataset('optimization', data=out)
# ----------------------
# High level parameter and version info
write_h5_header(hf, run_params, model)
hf.attrs['optimizer_duration'] = json.dumps(toptimize)
hf.flush()
# ----------------------
# Observational data
write_obs_to_h5(hf, obs)
# Store the githash last after flushing since getting it might cause an
# uncatchable crash
bgh = githash(**run_params)
hf.attrs['prospector_version'] = json.dumps(bgh)
hf.close()
def write_emcee_h5(hf, sampler, model, sampling_initial_center, tsample):
"""Write emcee information to the provided HDF5 file in the `sampling`
group.
"""
try:
sdat = hf['sampling']
except(KeyError):
sdat = hf.create_group('sampling')
if 'chain' not in sdat:
sdat.create_dataset('chain',
data=sampler.chain)
lnp = sampler.lnprobability
if ((lnp.shape[0] != lnp.shape[1]) &
(lnp.T.shape == sampler.chain.shape[:-1])):
# hack to deal with emcee3rc lnprob transposition
lnp = lnp.T
sdat.create_dataset('lnprobability', data=lnp)
sdat.create_dataset('acceptance',
data=sampler.acceptance_fraction)
sdat.create_dataset('sampling_initial_center',
data=sampling_initial_center)
sdat.create_dataset('initial_theta',
data=model.initial_theta.copy())
# JSON Attrs
sdat.attrs['rstate'] = pick(sampler.random_state)
sdat.attrs['sampling_duration'] = json.dumps(tsample)
sdat.attrs['theta_labels'] = json.dumps(list(model.theta_labels()))
hf.flush()
def write_nestle_h5(hf, nestle_out, model, tsample):
"""Write nestle results to the provided HDF5 file in the `sampling` group.
"""
try:
sdat = hf['sampling']
except(KeyError):
sdat = hf.create_group('sampling')
sdat.create_dataset('chain',
data=nestle_out['samples'])
sdat.create_dataset('weights',
data=nestle_out['weights'])
sdat.create_dataset('lnlikelihood',
data=nestle_out['logl'])
sdat.create_dataset('lnprobability',
data=(nestle_out['logl'] +
model.prior_product(nestle_out['samples'])))
sdat.create_dataset('logvol',
data=nestle_out['logvol'])
sdat.create_dataset('logz',
data=np.atleast_1d(nestle_out['logz']))
sdat.create_dataset('logzerr',
data=np.atleast_1d(nestle_out['logzerr']))
sdat.create_dataset('h_information',
data=np.atleast_1d(nestle_out['h']))
# JSON Attrs
for p in ['niter', 'ncall']:
sdat.attrs[p] = json.dumps(nestle_out[p])
sdat.attrs['theta_labels'] = json.dumps(list(model.theta_labels()))
sdat.attrs['sampling_duration'] = json.dumps(tsample)
hf.flush()
def write_dynesty_h5(hf, dynesty_out, model, tsample):
"""Write nestle results to the provided HDF5 file in the `sampling` group.
"""
try:
sdat = hf['sampling']
except(KeyError):
sdat = hf.create_group('sampling')
sdat.create_dataset('chain',
data=dynesty_out['samples'])
sdat.create_dataset('weights',
data=np.exp(dynesty_out['logwt']-dynesty_out['logz'][-1]))
sdat.create_dataset('logvol',
data=dynesty_out['logvol'])
sdat.create_dataset('logz',
data=np.atleast_1d(dynesty_out['logz']))
sdat.create_dataset('logzerr',
data=np.atleast_1d(dynesty_out['logzerr']))
sdat.create_dataset('information',
data=np.atleast_1d(dynesty_out['information']))
sdat.create_dataset('lnlikelihood',
data=dynesty_out['logl'])
sdat.create_dataset('lnprobability',
data=(dynesty_out['logl'] +
model.prior_product(dynesty_out['samples'])))
sdat.create_dataset('efficiency',
data=np.atleast_1d(dynesty_out['eff']))
sdat.create_dataset('niter',
data=np.atleast_1d(dynesty_out['niter']))
sdat.create_dataset('samples_id',
data=np.atleast_1d(dynesty_out['samples_id']))
# JSON Attrs
sdat.attrs['ncall'] = json.dumps(dynesty_out['ncall'].tolist())
sdat.attrs['theta_labels'] = json.dumps(list(model.theta_labels()))
sdat.attrs['sampling_duration'] = json.dumps(tsample)
hf.flush()
def write_h5_header(hf, run_params, model):
"""Write header information about the run.
"""
serialize = {'run_params': run_params,
'model_params': [functions_to_names(p.copy())
for p in model.config_list],
'paramfile_text': paramfile_string(**run_params)}
for k, v in list(serialize.items()):
try:
hf.attrs[k] = json.dumps(v) #, cls=NumpyEncoder)
except(TypeError):
# Should this fall back to pickle.dumps?
hf.attrs[k] = pick(v)
warnings.warn("Could not JSON serialize {}, pickled instead".format(k),
RuntimeWarning)
except:
hf.attrs[k] = unserial
warnings.warn("Could not serialize {}".format(k), RuntimeWarning)
hf.flush()
def write_obs_to_h5(hf, obs):
"""Write observational data to the hdf5 file
"""
try:
odat = hf.create_group('obs')
except(ValueError):
# We already have an 'obs' group
return
for k, v in list(obs.items()):
if k == 'filters':
try:
v = [f.name for f in v]
except:
pass
if isinstance(v, np.ndarray):
odat.create_dataset(k, data=v)
else:
try:
odat.attrs[k] = json.dumps(v) #, cls=NumpyEncoder)
except(TypeError):
# Should this fall back to pickle.dumps?
odat.attrs[k] = pick(v)
warnings.warn("Could not JSON serialize {}, pickled instead".format(k))
except:
odat.attrs[k] = unserial
warnings.warn("Could not serialize {}".format(k))
hf.flush()
def optresultlist_to_ndarray(results):
npar, nout = len(results[0].x), len(results[0].fun)
dt = [("success", np.bool), ("message", "S50"), ("nfev", np.int),
("x", (np.float, npar)), ("fun", (np.float, nout))]
out = np.zeros(len(results), dtype=np.dtype(dt))
for i, r in enumerate(results):
for f in out.dtype.names:
out[i][f] = r[f]
return out
def chain_to_struct(chain, model=None, names=None):
"""Given a (flat)chain (or parameter dictionary) and a model, convert the
chain to a structured array
:param chain:
A chain, ndarry of shape (nsamples, ndim) or a dictionary of
parameters, values of which are numpy datatypes.
:param model:
A ProspectorParams instance
:returns struct:
A structured ndarray of parameter values.
"""
indict = type(chain) == dict
if indict:
return dict_to_struct(chain)
else:
n = np.prod(chain.shape[:-1])
assert model.ndim == chain.shape[-1]
if model is not None:
model.set_parameters(chain[0])
names = model.free_params
dt = [(p, model.params[p].dtype, model.params[p].shape)
for p in names]
else:
dt = [(str(p), "<f8", (1,)) for p in names]
struct = np.zeros(n, dtype=np.dtype(dt))
for i, p in enumerate(names):
if model is not None:
inds = model.theta_index[p]
else:
inds = slice(i, i+1, None)
struct[p] = chain[..., inds].reshape(-1, len(inds))
return struct
def dict_to_struct(indict):
dt = [(p, indict[p].dtype, indict[p].shape)
for p in indict.keys()]
struct = np.zeros(1, dtype=np.dtype(dt))
for i, p in enumerate(indict.keys()):
struct[p] = chain[p]
return struct[p]
def write_pickles(run_params, model, obs, sampler, powell_results,
outroot=None, tsample=None, toptimize=None,
post_burnin_center=None, post_burnin_prob=None,
sampling_initial_center=None, simpleout=False, **extras):
"""Write results to two different pickle files. One (``*_mcmc``) contains
only lists, dictionaries, and numpy arrays and is therefore robust to
changes in object definitions. The other (``*_model``) contains the actual
model object (and minimization result objects) and is therefore more
fragile.
"""
if outroot is None:
tt = int(time.time())
outroot = '{1}_{0}'.format(tt, run_params['outfile'])
bgh = githash(**run_params)
paramfile_text = paramfile_string(**run_params)
write_model_pickle(outroot + '_model', model, bgh=bgh, powell=powell_results,
paramfile_text=paramfile_text)
if simpleout and _has_h5py_:
return
# write out a simple chain as a pickle. This isn't really necessary since
# the hd5 usually works
results = {}
# Useful global info and parameters
results['run_params'] = run_params
results['obs'] = obs
results['model_params'] = [functions_to_names(p.copy()) for p in model.config_list]
results['theta_labels'] = list(model.theta_labels())
# Parameter value at variopus phases
results['initial_theta'] = model.initial_theta
results['sampling_initial_center'] = sampling_initial_center
results['post_burnin_center'] = post_burnin_center
results['post_burnin_prob'] = post_burnin_prob
# Chain and ancillary sampling info
results['chain'] = sampler.chain
results['lnprobability'] = sampler.lnprobability
results['acceptance'] = sampler.acceptance_fraction
results['rstate'] = sampler.random_state
results['sampling_duration'] = tsample
results['optimizer_duration'] = toptimize
results['prospector_version'] = bgh
results['paramfile_text'] = paramfile_text
with open(outroot + '_mcmc', "wb") as out:
pickle.dump(results, out)
def write_model_pickle(outname, model, bgh=None, powell=None, **kwargs):
model_store = {}
model_store['powell'] = powell
model_store['model'] = model
model_store['prospector_version'] = bgh
for k, v in kwargs.items():
try:
model_store[k] = v
except:
pass
with open(outname, "wb") as out:
pickle.dump(model_store, out)
def functions_to_names(p):
"""Replace prior and dust functions (or objects) with the names of those
functions (or pickles).
"""
for k, v in list(p.items()):
if callable(v):
try:
p[k] = [v.__name__, v.__module__]
except(AttributeError):
p[k] = pickle.dumps(v, protocol=2)
return p
```
#### File: prospect/models/priors.py
```python
import numpy as np
import scipy.stats
__all__ = ["Prior", "TopHat", "Normal", "ClippedNormal",
"LogNormal", "LogUniform", "Beta",
"StudentT", "SkewNormal"]
class Prior(object):
"""Encapsulate the priors in an object. Each prior should have a
distribution name and optional parameters specifying scale and location
(e.g. min/max or mean/sigma). These can be aliased at instantiation using
the ``parnames`` keyword. When called, the argument should be a variable
and the object should return the ln-prior-probability of that value.
.. code-block:: python
ln_prior_prob = Prior()(value)
Should be able to sample from the prior, and to get the gradient of the
prior at any variable value. Methods should also be avilable to give a
useful plotting range and, if there are bounds, to return them.
:param parnames:
A list of names of the parameters, used to alias the intrinsic
parameter names. This way different instances of the same Prior can
have different parameter names, in case they are being fit for....
"""
def __init__(self, parnames=[], name='', **kwargs):
"""Constructor.
:param parnames:
A list of names of the parameters, used to alias the intrinsic
parameter names. This way different instances of the same Prior
can have different parameter names, in case they are being fit for....
"""
if len(parnames) == 0:
parnames = self.prior_params
assert len(parnames) == len(self.prior_params)
self.alias = dict(zip(self.prior_params, parnames))
self.params = {}
self.name = name
self.update(**kwargs)
def __repr__(self):
argstring = ['{}={}'.format(k, v) for k, v in list(self.params.items())]
return '{}({})'.format(self.__class__, ",".join(argstring))
def update(self, **kwargs):
"""Update `params` values using alias.
"""
for k in self.prior_params:
try:
self.params[k] = kwargs[self.alias[k]]
except(KeyError):
pass
# FIXME: Should add a check for unexpected kwargs.
def __len__(self):
"""The length is set by the maximum size of any of the prior_params.
Note that the prior params must therefore be scalar of same length as
the maximum size of any of the parameters. This is not checked.
"""
return max([np.size(self.params.get(k, 1)) for k in self.prior_params])
def __call__(self, x, **kwargs):
"""Compute the value of the probability desnity function at x and
return the ln of that.
:param x:
Value of the parameter, scalar or iterable of same length as the
Prior object.
:param kwargs: optional
All extra keyword arguments are sued to update the `prior_params`.
:returns lnp:
The natural log of the prior probability at x, scalar or ndarray of
same length as the prior object.
"""
if len(kwargs) > 0:
self.update(**kwargs)
pdf = self.distribution.pdf
try:
p = pdf(x, *self.args, loc=self.loc, scale=self.scale)
except(ValueError):
# Deal with `x` vectors of shape (nsamples, len(prior))
# for pdfs that don't broadcast nicely.
p = [pdf(_x, *self.args, loc=self.loc, scale=self.scale)
for _x in x]
p = np.array(p)
with np.errstate(invalid='ignore'):
lnp = np.log(p)
return lnp
def sample(self, nsample=None, **kwargs):
"""Draw a sample from the prior distribution.
:param nsample: (optional)
Unused
"""
if len(kwargs) > 0:
self.update(**kwargs)
return self.distribution.rvs(*self.args, size=len(self),
loc=self.loc, scale=self.scale)
def unit_transform(self, x, **kwargs):
"""Go from a value of the CDF (between 0 and 1) to the corresponding
parameter value.
:param x:
A scalar or vector of same length as the Prior with values between
zero and one corresponding to the value of the CDF.
:returns theta:
The parameter value corresponding to the value of the CDF given by
`x`.
"""
if len(kwargs) > 0:
self.update(**kwargs)
return self.distribution.ppf(x, *self.args,
loc=self.loc, scale=self.scale)
def inverse_unit_transform(self, x, **kwargs):
"""Go from the parameter value to the unit coordinate using the cdf.
"""
if len(kwargs) > 0:
self.update(**kwargs)
return self.distribution.cdf(x, *self.args,
loc=self.loc, scale=self.scale)
def gradient(self, theta):
raise(NotImplementedError)
@property
def loc(self):
"""This should be overridden.
"""
return 0
@property
def scale(self):
"""This should be overridden.
"""
return 1
@property
def args(self):
return []
@property
def range(self):
raise(NotImplementedError)
@property
def bounds(self):
raise(NotImplementedError)
def serialize(self):
raise(NotImplementedError)
class TopHat(Prior):
"""A simple uniform prior, described by two parameters
:param mini:
Minimum of the distribution
:param maxi:
Maximum of the distribution
"""
prior_params = ['mini', 'maxi']
distribution = scipy.stats.uniform
@property
def scale(self):
return self.params['maxi'] - self.params['mini']
@property
def loc(self):
return self.params['mini']
@property
def range(self):
return (self.params['mini'], self.params['maxi'])
def bounds(self, **kwargs):
if len(kwargs) > 0:
self.update(**kwargs)
return self.range
class Normal(Prior):
"""A simple gaussian prior.
:param mean:
Mean of the distribution
:param sigma:
Standard deviation of the distribution
"""
prior_params = ['mean', 'sigma']
distribution = scipy.stats.norm
@property
def scale(self):
return self.params['sigma']
@property
def loc(self):
return self.params['mean']
@property
def range(self):
nsig = 4
return (self.params['mean'] - nsig * self.params['sigma'],
self.params['mean'] + self.params['sigma'])
def bounds(self, **kwargs):
#if len(kwargs) > 0:
# self.update(**kwargs)
return (-np.inf, np.inf)
class ClippedNormal(Prior):
"""A Gaussian prior clipped to some range.
:param mean:
Mean of the normal distribution
:param sigma:
Standard deviation of the normal distribution
:param mini:
Minimum of the distribution
:param maxi:
Maximum of the distribution
"""
prior_params = ['mean', 'sigma', 'mini', 'maxi']
distribution = scipy.stats.truncnorm
@property
def scale(self):
return self.params['sigma']
@property
def loc(self):
return self.params['mean']
@property
def range(self):
return (self.params['mini'], self.params['maxi'])
@property
def args(self):
a = (self.params['mini'] - self.params['mean']) / self.params['sigma']
b = (self.params['maxi'] - self.params['mean']) / self.params['sigma']
return [a, b]
def bounds(self, **kwargs):
if len(kwargs) > 0:
self.update(**kwargs)
return self.range
class LogUniform(Prior):
"""Like log-normal, but the distribution of natural log of the variable is
distributed uniformly instead of normally.
:param mini:
Minimum of the distribution
:param maxi:
Maximum of the distribution
"""
prior_params = ['mini', 'maxi']
distribution = scipy.stats.reciprocal
@property
def args(self):
a = self.params['mini']
b = self.params['maxi']
return [a, b]
@property
def range(self):
return (self.params['mini'], self.params['maxi'])
def bounds(self, **kwargs):
if len(kwargs) > 0:
self.update(**kwargs)
return self.range
class Beta(Prior):
"""A Beta distribution.
:param mini:
Minimum of the distribution
:param maxi:
Maximum of the distribution
:param alpha:
:param beta:
"""
prior_params = ['mini', 'maxi', 'alpha', 'beta']
distribution = scipy.stats.beta
@property
def scale(self):
return self.params.get('maxi', 1) - self.params.get('mini', 0)
@property
def loc(self):
return self.params.get('mini', 0)
@property
def args(self):
a = self.params['alpha']
b = self.params['beta']
return [a, b]
@property
def range(self):
return (self.params.get('mini',0), self.params.get('maxi',1))
def bounds(self, **kwargs):
if len(kwargs) > 0:
self.update(**kwargs)
return self.range
class LogNormal(Prior):
"""A log-normal prior, where the natural log of the variable is distributed
normally. Useful for parameters that cannot be less than zero.
Note that ``LogNormal(np.exp(mode) / f) == LogNormal(np.exp(mode) * f)``
and ``f = np.exp(sigma)`` corresponds to "one sigma" from the peak.
:param mode:
Natural log of the variable value at which the probability density is
highest.
:param sigma:
Standard deviation of the distribution of the natural log of the
variable.
"""
prior_params = ['mode', 'sigma']
distribution = scipy.stats.lognorm
@property
def args(self):
return [self.params["sigma"]]
@property
def scale(self):
return np.exp(self.params["mode"] + self.params["sigma"]**2)
@property
def loc(self):
return 0
@property
def range(self):
nsig = 4
return (np.exp(self.params['mode'] + (nsig * self.params['sigma'])),
np.exp(self.params['mode'] - (nsig * self.params['sigma'])))
def bounds(self, **kwargs):
return (0, np.inf)
class LogNormalLinpar(Prior):
"""A log-normal prior, where the natural log of the variable is distributed
normally. Useful for parameters that cannot be less than zero.
LogNormal(mode=x, sigma=y) is equivalent to
LogNormalLinpar(mode=np.exp(x), sigma_factor=np.exp(y))
:param mode:
The (linear) value of the variable where the probability density is
highest. Must be > 0.
:param sigma_factor:
The (linear) factor describing the dispersion of the log of the
variable. Must be > 0
"""
prior_params = ['mode', 'sigma_factor']
distribution = scipy.stats.lognorm
@property
def args(self):
return [np.log(self.params["sigma_factor"])]
@property
def scale(self):
k = self.params["sigma_factor"]**np.log(self.params["sigma_factor"])
return self.params["mode"] * k
@property
def loc(self):
return 0
@property
def range(self):
nsig = 4
return (self.params['mode'] * (nsig * self.params['sigma_factor']),
self.params['mode'] / (nsig * self.params['sigma_factor']))
def bounds(self, **kwargs):
return (0, np.inf)
class SkewNormal(Prior):
"""A normal distribution including a skew parameter
:param location:
Center (*not* mean, mode, or median) of the distribution.
The center will approach the mean as skew approaches zero.
:param sigma:
Standard deviation of the distribution
:param skew:
Skewness of the distribution
"""
prior_params = ['location', 'sigma', 'skew']
distribution = scipy.stats.skewnorm
@property
def args(self):
return [self.params['skew']]
@property
def scale(self):
return self.params['sigma']
@property
def loc(self):
return self.params['location']
@property
def range(self):
nsig = 4
return (self.params['location'] - nsig * self.params['sigma'],
self.params['location'] + nsig * self.params['sigma'])
def bounds(self, **kwargs):
return (-np.inf, np.inf)
class StudentT(Prior):
"""A Student's T distribution
:param mean:
Mean of the distribution
:param scale:
Size of the distribution, analogous to the standard deviation
:param df:
Number of degrees of freedom
"""
prior_params = ['mean', 'scale', 'df']
distribution = scipy.stats.t
@property
def args(self):
return [self.params['df']]
@property
def scale(self):
return self.params['scale']
@property
def loc(self):
return self.params['mean']
@property
def range(self):
nsig = 4
return (self.params['location'] - nsig * self.params['scale'],
self.params['location'] + nsig * self.params['scale'])
def bounds(self, **kwargs):
return (-np.inf, np.inf)
```
|
{
"source": "jemrobinson/capital-gains-calculator",
"score": 3
}
|
#### File: capital-gains-calculator/capital_gains_calculator/disposal.py
```python
from .transaction import Transaction
from .utils import as_money, abs_divide
class Disposal(Transaction):
"""A combined purchase and sale"""
def __init__(
self,
date_time,
currency,
units,
purchase_total,
purchase_fees,
purchase_taxes,
sale_total,
sale_fees,
sale_taxes,
):
super().__init__(date_time=date_time, currency=currency, units=units)
self.purchase_total = as_money(purchase_total, currency)
self.purchase_fees = as_money(purchase_fees, currency)
self.purchase_taxes = as_money(purchase_taxes, currency)
self.sale_total = as_money(sale_total, currency)
self.sale_fees = as_money(sale_fees, currency)
self.sale_taxes = as_money(sale_taxes, currency)
self.type = "DISPOSAL"
@property
def subtotal(self):
"""Subtotal is not a valid property for this class"""
raise NotImplementedError(
"Subtotal is not a valid property for the Disposal class"
)
# return Money(-1, self.currency)
@property
def unit_price_sold(self):
"""The unit price at which the units were sold"""
return abs_divide(self.sale_total, self.units)
@property
def unit_price_bought(self):
"""The unit price at which the units were bought"""
return abs_divide(self.purchase_total, self.units)
@property
def gain(self):
"""The capital gain made upon sale"""
return self.sale_total - self.purchase_total
@property
def is_null(self):
"""Whether this is a null transaction"""
return (self.sale_total == self.currency.zero) and (
self.purchase_total == self.currency.zero
)
def __str__(self):
return f"Transaction: {self.type:8s} date = {self.date}, units = {self.units}, purchase_total = {self.purchase_total}, sale_total = {self.sale_total}, gain = {self.gain}"
class BedAndBreakfast(Disposal):
"""A disposal where the buying/selling are within 30 days"""
def __init__(self, disposal):
super().__init__(
disposal.datetime,
disposal.currency,
disposal.units,
disposal.purchase_total,
disposal.purchase_fees,
disposal.purchase_taxes,
disposal.sale_total,
disposal.sale_fees,
disposal.sale_taxes,
)
@property
def subtotal(self):
"""Subtotal is not a valid property for this class"""
raise NotImplementedError(
"Subtotal is not a valid property for the BedAndBreakfast class"
)
```
#### File: capital-gains-calculator/capital_gains_calculator/excess_reportable_income.py
```python
from .purchase import Purchase
class ExcessReportableIncome(Purchase):
"""Excess reportable income from accumulation shares treated as a purchase of 0 additional shares"""
def __init__(self, date_time, currency, amount, **kwargs):
super().__init__(
date_time=date_time,
currency=currency,
subtotal=amount,
units=0,
fees=0,
taxes=0,
**kwargs
)
self.type = "ERI"
```
#### File: capital-gains-calculator/capital_gains_calculator/pooled_purchase.py
```python
from .disposal import Disposal, BedAndBreakfast
from .purchase import Purchase
class PooledPurchase(Purchase):
"""Combination of several transactions"""
def __init__(self, currency, **kwargs):
kwargs["date_time"] = kwargs.get("date_time", "0001-01-01")
super().__init__(currency=currency, **kwargs)
self.type = "POOL"
@classmethod
def from_purchase(cls, purchase, currency):
"""Create a PooledPurchase from a Purchase"""
if not purchase:
return cls(currency)
return cls(
date_time=purchase.datetime,
currency=currency,
units=purchase.units,
subtotal=purchase.subtotal,
fees=purchase.fees,
taxes=purchase.taxes,
)
def add_purchase(self, purchase):
"""Add a purchase to the pool"""
if not isinstance(purchase, Purchase):
raise ValueError(f"{purchase} is not a valid Purchase!")
self.datetime = max([self.datetime, purchase.datetime])
self.units = self.units + purchase.units
self.subtotal_ = self.subtotal + purchase.subtotal
self.fees = self.fees + purchase.fees
self.taxes = self.taxes + purchase.taxes
def add_disposal(self, disposal):
"""Add a disposal to the pool"""
if not isinstance(disposal, Disposal):
raise ValueError(f"{disposal} is not a valid Purchase!")
self.datetime = max([self.datetime, disposal.datetime])
self.units = self.units - disposal.units
self.subtotal_ = self.subtotal - disposal.purchase_total
self.fees = self.fees + disposal.fees
self.taxes = self.taxes + disposal.taxes
def add_bed_and_breakfast(self, bed_and_breakfast):
"""Add a bed-and-breakfast to the pool"""
if not isinstance(bed_and_breakfast, BedAndBreakfast):
raise ValueError(f"{bed_and_breakfast} is not a valid BedAndBreakfast!")
self.datetime = max([self.datetime, bed_and_breakfast.datetime])
self.subtotal_ = self.subtotal + bed_and_breakfast.gain
```
#### File: capital-gains-calculator/capital_gains_calculator/purchase.py
```python
from .transaction import Transaction
class Purchase(Transaction):
"""Transaction where a security is bought"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.type = "BOUGHT"
@property
def subtotal(self):
return self.subtotal_
```
#### File: capital-gains-calculator/capital_gains_calculator/transaction.py
```python
from decimal import Decimal
# Local imports
from .utils import as_money, as_datetime, abs_divide
class Transaction:
"""Transaction where money is exchanged for a security"""
def __init__(
self, date_time, currency, units=0, subtotal=0, fees=0, taxes=0, note=""
):
self.datetime = as_datetime(date_time)
self.currency = currency
self.units = Decimal(units)
self.subtotal_ = as_money(subtotal, currency)
self.fees = as_money(fees, currency)
self.taxes = as_money(taxes, currency)
self.note = str(note)
self.type = None
@property
def date(self):
"""Date of transaction"""
return self.datetime.date()
@property
def unit_price(self):
"""Base price paid per unit in this transaction"""
return abs_divide(self.subtotal, self.units)
@property
def unit_fees(self):
"""Fees paid per unit in this transaction"""
return abs_divide(self.fees, self.units)
@property
def unit_taxes(self):
"""Taxes paid per unit in this transaction"""
return abs_divide(self.taxes, self.units)
@property
def unit_price_inc(self):
"""Total price paid per unit in this transaction"""
return abs_divide(self.total, self.units)
@property
def charges(self):
"""Total charges paid in this transaction"""
return self.fees + self.taxes
@property
def subtotal(self):
"""Subtotal must be implemented by child classes"""
raise NotImplementedError
@property
def total(self):
"""Total paid in this transaction"""
return self.charges + self.subtotal
@property
def is_null(self):
"""Whether this is a null transaction"""
return self.total == self.currency.zero
def __str__(self):
return f"Transaction: {self.type:8s} date = {self.date}, units = {self.units}, unit_price = {self.unit_price}, subtotal = {self.subtotal}, fees = {self.fees}, taxes = {self.taxes}, total = {self.total}"
```
|
{
"source": "jemrobinson/shifl",
"score": 3
}
|
#### File: shui/classes/fileinfo.py
```python
from tqdm import tqdm
import requests
class FileInfo:
"""Class to relate local and remote information about a Spark/Hadoop file"""
def __init__(self, remote_url, local_path):
self.url = remote_url
self.path = local_path
@property
def name(self):
"""Get the name of the local file"""
return self.path.name
@property
def is_hashfile(self):
"""Boolean indicating whether this is a hashfile"""
return self.path.suffix == ".sha512"
def download(self):
"""Download this Spark/Hadoop version from a remote URL to a local path"""
response = requests.get(self.url, stream=True, allow_redirects=True)
total_size = int(response.headers.get("content-length"))
with open(self.path, "wb") as output_file:
with tqdm(total=total_size, unit="B", unit_scale=True) as progress_bar:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
output_file.write(chunk)
progress_bar.update(len(chunk))
def is_hash_for(self, other):
"""Boolean indicating whether this is the hashfile corresponding to another file"""
return self.is_hashfile and self.path.stem == other.path.name
def remove(self):
"""Remove the local file"""
if self.path.is_file():
self.path.unlink()
```
#### File: shui/classes/file_with_hash.py
```python
import hashlib
class FileWithHash:
"""Class to contain file information for a file and its associated hash file"""
def __init__(self, file_, hashfile):
if not hashfile.is_hashfile:
raise ValueError(f"{hashfile.name} is not a hashfile!")
if not hashfile.is_hash_for(file_):
raise ValueError(
f"{hashfile.name} is not the correct hashfile for {file_.name}!"
)
self.file = file_
self.hashfile = hashfile
def __iter__(self):
yield self.file
yield self.hashfile
def remove(self):
"""Remove tarball and SHA512 hash"""
for fileinfo in self:
fileinfo.remove()
def verify(self):
"""Verify that a file matches its SHA512 hash"""
# Get the file hash
file_hash = hashlib.sha512()
buffer_size = 524288 # read in chunks of 512kb
with self.file.path.open("rb") as input_file:
input_bytes = True
while input_bytes:
input_bytes = input_file.read(buffer_size)
file_hash.update(input_bytes)
calculated_hash = file_hash.hexdigest().lower()
# Read the reference hash
with self.hashfile.path.open("r") as input_hash:
reference_hash = (
"".join(input_hash.readlines())
.replace("\n", " ")
.replace(" ", "")
.split(":")[1]
.strip()
.lower()
)
return calculated_hash == reference_hash
```
#### File: shui/commands/versions_command.py
```python
from cleo import Command
from shui.functions import get_versions
class VersionsCommand(Command):
"""
Get available Spark and Hadoop versions
versions
{--latest : Show only the latest available version}
"""
def handle(self):
versions = get_versions()
if self.option("latest"):
versions = [sorted(versions)[-1]]
for version in versions:
self.line(f"Available version: {version}")
```
|
{
"source": "jemrobinson/Stormspotter",
"score": 2
}
|
#### File: assets/aad/aadserviceprincipal.py
```python
from dataclasses import dataclass, field
from typing import List
from .aadobject import AADObject
from stormspotter.ingestor.utils.resources import *
@dataclass
class AADServicePrincipal(AADObject):
resource = "servicePrincipals"
node_label: str = AADSPN_NODE_LABEL
query_parameters: List = field(default_factory= lambda: [])
api_version: str = "1.6-internal"
def parse(self, tenant_id, value, context):
if not value["microsoftFirstParty"]:
owners = self.expand(tenant_id, value["objectId"], "owners", context)
owner_ids = [owner['objectId'] for owner in owners["value"]]
value["owners"] = owner_ids
else:
value["owners"] = []
return value
#context.neo4j.insert_asset(obj, AADOBJECT_NODE_LABEL, obj["objectid"], [AADSPN_NODE_LABEL])
#if obj["owners"]:
# for owner in value["owners"]:
# context.neo4j.create_relationship(owner["objectId"],
# AADOBJECT_NODE_LABEL, obj["objectid"],
# AADSPN_NODE_LABEL, AAD_TO_ASSET)
```
#### File: ingestor/utils/cloud.py
```python
import configparser
from msrestazure.azure_cloud import Cloud, CloudEndpoints, CloudSuffixes, AZURE_PUBLIC_CLOUD, AZURE_GERMAN_CLOUD, AZURE_CHINA_CLOUD, AZURE_US_GOV_CLOUD
class CloudContext:
"""
Specifies which endpoints based on cloud instance for authentication purposes
"""
def __init__(self, cloud, config=None):
configuration = configparser.ConfigParser()
if config:
# read config file and make Cloud object
configuration.read(config)
name = configuration['NAME']['Cloud']
self.cloud = Cloud(
name,
endpoints=CloudEndpoints(
management=configuration['ENDPOINTS']['Management'],
resource_manager=configuration['ENDPOINTS']['Resource_Manager'],
sql_management=configuration['ENDPOINTS']['SQL_Management'],
batch_resource_id=configuration['ENDPOINTS']['Batch_ResourceId'],
gallery=configuration['ENDPOINTS']['Gallery'],
active_directory=configuration['ENDPOINTS']['AD'],
active_directory_resource_id=configuration['ENDPOINTS']['AD_ResourceId'],
active_directory_graph_resource_id=configuration['ENDPOINTS']['AD_Graph_ResourceId']
),
suffixes=CloudSuffixes(
storage_endpoint=configuration['SUFFIXES']['Storage_Endpoint'],
keyvault_dns=configuration['SUFFIXES']['Keyvault_DNS'],
sql_server_hostname=configuration['SUFFIXES']['SQLServer_Hostname'],
)
)
else:
if cloud == 'GERMANY':
self.cloud = AZURE_GERMAN_CLOUD
elif cloud == 'CHINA':
self.cloud = AZURE_CHINA_CLOUD
elif cloud == 'USGOV':
self.cloud = AZURE_US_GOV_CLOUD
elif cloud == 'PUBLIC':
self.cloud = AZURE_PUBLIC_CLOUD
```
|
{
"source": "jemsbhai/battlenotes",
"score": 3
}
|
#### File: jemsbhai/battlenotes/note_recognition.py
```python
from pydub import AudioSegment
import pydub.scipy_effects
import numpy as np
import scipy
import matplotlib.pyplot as plt
from solo_generation_esac import *
from utils import frequency_spectrum, \
calculate_distance, \
classify_note_attempt_1, \
classify_note_attempt_2, \
classify_note_attempt_3
def main(file, note_arr=None, plot_starts=False, plot_fft_indices=[]):
actual_notes = []
if note_arr:
actual_notes = note_arr
song = AudioSegment.from_file(file)
#song = song.high_pass_filter(80, order=4)
starts = predict_note_starts(song, plot_starts)
predicted_notes = predict_notes(song, starts, plot_fft_indices)
print("")
if actual_notes:
print("Actual Notes")
print(actual_notes)
print("Predicted Notes")
print(predicted_notes)
if actual_notes:
lev_distance = calculate_distance(predicted_notes, actual_notes)
score = abs(len(actual_notes) - lev_distance)/len(actual_notes)
print("Levenshtein distance: {}/{}".format(lev_distance, len(actual_notes)))
return score
# Very simple implementation, just requires a minimum volume and looks for left edges by
# comparing with the prior sample, also requires a minimum distance between starts
# Future improvements could include smoothing and/or comparing multiple samples
#
# song: pydub.AudioSegment
# plot: bool, whether to show a plot of start times
# actual_starts: []float, time into song of each actual note start (seconds)
#
# Returns perdicted starts in ms
def predict_note_starts(song, plot):
# Size of segments to break song into for volume calculations
SEGMENT_MS = 50
# Minimum volume necessary to be considered a note
VOLUME_THRESHOLD = -27.8
# The increase from one sample to the next required to be considered a note
EDGE_THRESHOLD = 0.09
# Throw out any additional notes found in this window
MIN_MS_BETWEEN = 100
# Filter out lower frequencies to reduce noise
#song = song.high_pass_filter(80, order=4)
# dBFS is decibels relative to the maximum possible loudness
volume = [segment.dBFS for segment in song[::SEGMENT_MS]]
predicted_starts = []
for i in range(1, len(volume)):
if volume[i] > VOLUME_THRESHOLD and volume[i] - volume[i - 1] > EDGE_THRESHOLD:
ms = i * SEGMENT_MS
# Ignore any too close together
if len(predicted_starts) == 0 or ms - predicted_starts[-1] >= MIN_MS_BETWEEN:
predicted_starts.append(ms)
#predicted_starts.append(ms)
#for i in range(len(predicted_starts)-2):
# if predicted_starts[i+1] - predicted_starts[i] <= MIN_MS_BETWEEN:
# predicted_starts.remove(predicted_starts[i])
# Plot the volume over time (sec)
if plot:
x_axis = np.arange(len(volume)) * (SEGMENT_MS / 1000)
plt.plot(x_axis, volume)
# Add vertical lines for predicted note starts and actual note starts
for ms in predicted_starts:
plt.axvline(x=(ms / 1000), color="g", linewidth=0.5, linestyle=":")
plt.show()
return predicted_starts
def predict_notes(song, starts, plot_fft_indices):
predicted_notes = []
for i, start in enumerate(starts):
sample_from = start + 50
sample_to = start + 200
if i < len(starts) - 1:
sample_to = min(starts[i + 1], sample_to)
segment = song[sample_from:sample_to]
freqs, freq_magnitudes = frequency_spectrum(segment)
predicted = classify_note_attempt_2(freqs, freq_magnitudes)
predicted_notes.append(predicted or "U")
# Print general info
print("")
print("Note: {}".format(i))
print("Predicted start: {}".format(start))
length = sample_to - sample_from
print("Sampled from {} to {} ({} ms)".format(sample_from, sample_to, length))
print("Frequency sample period: {}hz".format(freqs[1]))
# Print peak info
peak_indicies, props = scipy.signal.find_peaks(freq_magnitudes, height=0.015)
print("Peaks of more than 1.5 percent of total frequency contribution:")
for j, peak in enumerate(peak_indicies):
freq = freqs[peak]
magnitude = props["peak_heights"][j]
print("{:.1f}hz with magnitude {:.3f}".format(freq, magnitude))
if i in plot_fft_indices:
plt.plot(freqs, freq_magnitudes, "b")
plt.xlabel("Freq (Hz)")
plt.ylabel("|X(freq)|")
plt.show()
return predicted_notes
if __name__ == "__main__":
main("untitled.wav", note_arr=["C", "D", "E", "F", "G", "A"], plot_starts=True)
```
#### File: jemsbhai/battlenotes/solo_generation_esac.py
```python
import pandas as pd
import sqlite3
from music21 import *
from music21.converter.subConverters import ConverterMidi, ConverterMusicXML
import random
import os
import re
def get_solo_melody(difficulty):
con = sqlite3.connect("esac.db")
solo_info = "esac_info2"
# Load the solo data into a DataFrame
solo_info_df = pd.read_sql_query(f"SELECT * from {solo_info}", con)
# Get random solo with appropriate difficulty
#solo = solo_info_df[abs(solo_info_df['avg_events_per_bar'] - difficulty) < 1].sample(1)
solo = solo_info_df[abs(solo_info_df['notes_per_melody'] - difficulty*10) < 10].sample(1)
#solo_id = 'A0128A'
solo_id = solo.iloc[0]['esacid']
mel_id = solo.iloc[0]['melid']
print(solo_id)
return solo_id, mel_id
def get_abc_file(esacid):
user_input = 'esac'
directory = os.listdir(user_input)
searchstring = f'N: {esacid}\n'
file = ''
for fname in directory:
if os.path.isfile(user_input + os.sep + fname):
# Full path
f = open(user_input + os.sep + fname, 'r', errors='ignore')
if searchstring in f.read():
file = fname
print(file)
f.close()
break
f.close()
nextbreak = 0
# find line number of esacid
with open(user_input + os.sep + file, 'r', errors='ignore') as myFile:
for num, line in enumerate(myFile, 1):
if searchstring in line and nextbreak==0:
print('found at line:', num)
linenum = num
nextbreak = 1
elif '\n' == line and nextbreak == 1:
nextbreak = num
print('next break:', nextbreak)
with open(user_input + os.sep + file, 'r', errors='ignore') as myFile:
opusnum = myFile.readlines()[linenum-3][2:-1]
print('opus', opusnum)
with open(user_input + os.sep + file, 'r', errors='ignore') as myFile:
musictext = myFile.readlines()[linenum+6:nextbreak-1]
print('music text', musictext)
return file, opusnum
def display_music(file, opusnum):
s = converter.parse(f'esac/{file}', number=opusnum)
s.metadata.title = ''
us = environment.UserSettings()
us['autoDownload'] = 'allow'
us['lilypondPath'] = 'C:/Program Files (x86)/LilyPond/usr/bin/lilypond.exe'
us['musescoreDirectPNGPath'] = 'C:/Program Files/MuseScore 3/bin/MuseScore3.exe'
us['musicxmlPath'] = 'C:/Program Files/MuseScore 3/bin/MuseScore3.exe'
filepath = r'output'
conv_musicxml = ConverterMusicXML()
out_filepath = conv_musicxml.write(s, 'musicxml', fp=filepath, subformats=['png'])
# importing PIL
#from PIL import Image
#img = Image.open(filepath + '-1.png')
#img.show()
s.show()
def get_correct_notes(mel_id):
con = sqlite3.connect("esac.db")
melody_df = pd.read_sql_query(f"SELECT * from melody WHERE melid = {mel_id}", con)
melody_list = list(melody_df['pitch'])
return melody_list
if __name__ == "__main__":
solo_id, mel_id = get_solo_melody(4)
file, opusnum = get_abc_file(solo_id)
get_correct_notes(mel_id)
display_music(file, opusnum)
```
|
{
"source": "jemsbhai/hackillinois2021",
"score": 3
}
|
#### File: hackillinois2021/restaurant_app/basebackendrouter.py
```python
import os
import pymongo
import json
import random
# import psycopg2
import hashlib
import time
from hashlib import sha256
def dummy(request):
"""Responds to any HTTP request.
Args:
request (flask.Request): HTTP request object.
Returns:
The response text or any set of values that can be turned into a
Response object using
`make_response <http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>`.
"""
if request.method == 'OPTIONS':
# Allows GET requests from origin https://mydomain.com with
# Authorization header
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'POST',
'Access-Control-Allow-Headers': '*',
'Access-Control-Max-Age': '3600',
'Access-Control-Allow-Credentials': 'true'
}
return ('', 204, headers)
# Set CORS headers for main requests
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Credentials': 'true'
}
request_json = request.get_json()
mongostr = os.environ.get('MONGOSTR')
client = pymongo.MongoClient(mongostr)
db = client["merrydining"]
retjson = {}
action = request_json['action']
if action == "getsingletabledata":
col = db.tabledata
for x in col.find():
if int(x['id']) == int(request_json['id']):
status = x['status']
diet = x['diet']
allergy = x['allergy']
retjson = {}
# retjson['dish'] = userid
retjson['status'] = "success"
retjson['occupancy'] = status
retjson['diet'] = diet
retjson['allergy'] = allergy
return json.dumps(retjson)
retjson = {}
# retjson['dish'] = userid
retjson['status'] = "fail"
retjson['id'] = "-1"
return json.dumps(retjson)
if action == "updatesingletable":
col = db.tabledata
for x in col.find():
if int(x['id']) == int(request_json['id']):
if 'status' in request_json:
col.update_one({"id": x['id']}, {"$set":{"status":request_json['status']}})
if 'diet' in request_json:
col.update_one({"id": x['id']}, {"$set":{"diet":request_json['diet']}})
if 'allergy' in request_json:
col.update_one({"id": x['id']}, {"$set":{"allergy":request_json['allergy']}})
status = x['status']
diet = x['diet']
allergy = x['allergy']
retjson = {}
# retjson['dish'] = userid
retjson['responsestatus'] = "success"
retjson['status'] = status
retjson['diet'] = diet
retjson['allergy'] = allergy
return json.dumps(retjson)
retjson = {}
# retjson['dish'] = userid
retjson['status'] = "fail"
retjson['id'] = "-1"
return json.dumps(retjson)
if action == "getalltabledata":
col = db.tabledata
tables = []
for x in col.find():
table = {}
table['tableid'] = x['id']
table['status'] = x['status']
table['diet'] = x['diet']
table['allergy'] = x['allergy']
tables.append(table)
retjson = {}
# retjson['dish'] = userid
retjson['status'] = "success"
retjson['tables'] = tables
return json.dumps(retjson)
retjson = {}
# retjson['dish'] = userid
retjson['status'] = "fail"
retjson['id'] = "-1"
return json.dumps(retjson)
if action == "addscore" :
maxid = 1
col = db.scores
for x in col.find():
id = x["id"]
maxid +=1
id = str(maxid+1)
payload = {}
uid = id
payload["id"] = id
# payload["uid"] = request_json['uid']
# payload["name"] = request_json['name']
payload["userid"] = request_json['userid']
payload["score"] = request_json['score']
result=col.insert_one(payload)
retjson = {}
# retjson['dish'] = userid
retjson['status'] = "successfully added"
retjson['id'] = id
return json.dumps(retjson)
if action == "getmyscore":
col = db.scores
for x in col.find():
if x['userid'] == request_json['userid']:
score = x['score']
retjson = {}
# retjson['dish'] = userid
retjson['status'] = "success"
retjson['score'] = score
return json.dumps(retjson)
retjson = {}
# retjson['dish'] = userid
retjson['status'] = "fail"
retjson['id'] = "-1"
return json.dumps(retjson)
if action == "getallitems":
col = db.items
scores = []
for x in col.find():
entry = {}
entry['userid'] = x['userid']
entry['item'] = x['item']
scores.append(entry)
# retjson['dish'] = userid
retjson['status'] = "success"
retjson['scores'] = scores
return json.dumps(retjson)
retjson = {}
if action == "attempt":
col = db.tables
uid = request_json['uid']
sid = request_json['sid']
uline = request_json['line']
for x in col.find():
if x['id'] == sid:
line = x['line']
num = len(line)
# score = num - editDistance(line, uline, len(line), len(uline))
score = num - edit_distance(line, uline)
if score < 0:
score = 0
retjson = {}
# retjson['dish'] = userid
retjson['score'] = score
retjson['id'] = "-1"
return json.dumps(retjson)
if action == "getrandomtable":
col = db.tables
maxid = 0
for x in col.find():
maxid = int(x["id"])
index = random.randint(1, maxid)
for x in col.find():
if x['id'] == str(index):
sid = x['id']
url = x['url']
retjson = {}
# retjson['dish'] = userid
retjson['url'] = url
retjson['id'] = sid
return json.dumps(retjson)
retjson = {}
# retjson['dish'] = userid
retjson['status'] = "fail"
retjson['id'] = "-1"
return json.dumps(retjson)
retstr = "action not done"
if request.args and 'message' in request.args:
return request.args.get('message')
elif request_json and 'message' in request_json:
return request_json['message']
else:
return retstr
```
|
{
"source": "jemsbhai/safesmart",
"score": 3
}
|
#### File: safesmart/backend/gcpuploader.py
```python
from google.cloud import storage
import os
import sys
def uploadtobucket(filename, bucketname):
from google.cloud import storage
# Explicitly use service account credentials by specifying the private key
# file.
storage_client = storage.Client.from_service_account_json('googlecreds.json')
# Make an authenticated API request
## buckets = list(storage_client.list_buckets())
## print(buckets)
bucket = storage_client.get_bucket(bucketname)
destination_blob_name = filename
source_file_name = filename
blob = bucket.blob(destination_blob_name)
blob.cache_control = "no-cache"
blob.upload_from_filename(source_file_name)
# blob.make_public()
blob.cache_control = "no-cache"
print('File {} uploaded to {}.'.format(source_file_name, destination_blob_name))
filename = sys.argv[1]
bucketname = sys.argv[2]
uploadtobucket(filename, bucketname)
```
|
{
"source": "jemsbhai/safewalk",
"score": 3
}
|
#### File: safewalk/backend/wayfinder.py
```python
import requests
import json
def getsafety(lat, lng):
url = "https://api.crimeometer.com/v1/incidents/raw-data"
querystring = {"lat":str(lat),"lon":str(lng),"distance":"500ft","datetime_ini":"%202020-03-01%27T%2700:%2000:%2000.0","datetime_end":"%202021-03-25%27T%2700:%2000:%2000.0"}
payload = ""
headers = {
'Content-Type': "application/json",
'x-api-key': "REDACTEDKEY",
'cache-control': "no-cache",
'Postman-Token': "<PASSWORD>"
}
response = requests.request("GET", url, data=payload, headers=headers, params=querystring)
print(response.text)
js = json.loads(response.text)
t = js['total_incidents']
return t
def getpoints(rj, n):
r = rj['routes'][n]
# if not r:
# return []
legs = []
for l in r['legs']:
steps = l['steps']
for s in steps:
lat = s['start_location']['lat']
lon = s['start_location']['lng']
point = {}
point['lat'] = lat
point['lon'] = lon
##test safety
sf = getsafety(lat, lon)
if sf > 3:
return getpoints(rj, n+1)
legs.append(point)
lat = s['end_location']['lat']
lon = s['end_location']['lng']
point = {}
point['lat'] = lat
point['lon'] = lon
##test safety
sf = getsafety(lat, lon)
if sf > 3:
return getpoints(rj, n+1)
legs.append(point)
print(legs)
return legs
def getpath(lat, lng, lat2, lng2):
url = "https://maps.googleapis.com/maps/api/directions/json"
deststr = str(lat2) + "," + str(lng2)
originstr = str(lat) + "," + str(lng)
querystring = {"destination":deststr,"key":"KEYREDACTEDFORSAFETY","mode":"walking","origin":originstr}
payload = ""
headers = {
'cache-control': "no-cache",
'Postman-Token': "<PASSWORD>"
}
response = requests.request("GET", url, data=payload, headers=headers, params=querystring)
print(response.text)
rj = json.loads(response.text)
legs = getpoints(rj, 0)
return legs
```
|
{
"source": "jemsgit/zero-btc-screen",
"score": 2
}
|
#### File: jemsgit/zero-btc-screen/main.py
```python
import json
import random
import time
import threading
from datetime import datetime, timezone, timedelta
from urllib.error import HTTPError, URLError
from urllib.request import Request, urlopen
from config.builder import Builder
from config.config import config
from config.currency_config import currencyConfig
from logs import logger
from presentation.observer import Observable
from alarm.alarm_manager import alarmManager as alarmManager
from settings_server.server import initSettingsServer
import RPi.GPIO as GPIO
BUTTON_CURRENCY_CHANNEL = 4
BUTTON_INTERVAL_CHANNEL = 18
API_INTERVALS = ['m', 'h', 'd', 'w', 'M']
GPIO.setmode(GPIO.BCM)
GPIO.setup(BUTTON_CURRENCY_CHANNEL, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(BUTTON_INTERVAL_CHANNEL, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
API_URL = 'https://api.binance.com/api/v3/ticker/price?symbol=%sUSDT'
API_URL_CANDLE = 'https://api.binance.com/api/v3/klines?symbol=%sUSDT&interval=1%s&limit=21'
currencyList = currencyConfig.currencyList or ['BTC']
currency_index = 0
currency_interval_index = 1
def data_mapper_to_old(item):
data = item[0:5]
return [float(i) for i in data]
def updateCurrencyList():
global currencyList
global currency_index
currentCurrency = currencyList[currency_index]
currencyList = currencyConfig.currencyList
currency_index = currencyList.index(currentCurrency) if currentCurrency in currencyList else -1
currencyConfig.subscribeToUpdates(updateCurrencyList)
def get_dummy_data():
logger.info('Generating dummy data')
def applyButtonCallback(channel, shortCb, longCb, event):
start_time = time.time()
while GPIO.input(channel) == 1: # Wait for the button up
pass
buttonTime = time.time() - start_time # How long was the button down?
if buttonTime >= 1:
print("long button")
longCb(event)
return
if buttonTime >= .1:
print("short button")
shortCb(event)
def switch_currency(event):
if(stopAlarm()):
return
applyButtonCallback(BUTTON_CURRENCY_CHANNEL, switch_currency_forward, switch_currency_back, event)
def switch_currency_forward(event):
global currency_index
currency_index +=1
if(currency_index >= len(currencyList)):
currency_index = 0
def switch_currency_back(event):
global currency_index
currency_index -=1
if(currency_index < 0):
currency_index = len(currencyList) - 1
def switch_interval(event):
if(stopAlarm()):
return
applyButtonCallback(BUTTON_INTERVAL_CHANNEL, switch_interval_forward, switch_interval_back, event)
def switch_interval_forward(event):
global currency_interval_index
currency_interval_index +=1
if(currency_interval_index >= len(API_INTERVALS)):
currency_interval_index = 0
def switch_interval_back(event):
global currency_interval_index
currency_interval_index -=1
if(currency_interval_index < 0):
currency_interval_index = len(API_INTERVALS) - 1
def stopAlarm():
if(alarmManager.isAlarm == True):
alarmManager.stopAlarm()
return True
else:
return False
def get_currency():
return currencyList[currency_index]
def get_period():
return API_INTERVALS[currency_interval_index]
def fetch_currency_data(currency, interval):
prices = None
current_price = None
try:
CURRENCY_API_URL = API_URL % currency
logger.info('Fetching prices')
req = Request(CURRENCY_API_URL)
data = urlopen(req).read()
external_data = json.loads(data)
current_price = float(external_data['price'])
CURRENCY_API_URL = API_URL_CANDLE % (currency, interval)
logger.info('Fetching prices2')
req = Request(CURRENCY_API_URL)
data = urlopen(req).read()
external_data = json.loads(data)
prices = map(data_mapper_to_old, external_data)
prices = [entry[1:] for entry in prices]
except:
print('currency error')
switch_currency_forward(None)
return (prices, current_price)
def fetch_prices():
currency = get_currency()
interval = get_period()
return fetch_currency_data(currency, interval)
def alarm_callback(cur):
print("alarm")
def main():
logger.info('Initialize')
try:
initSettingsServer()
except:
print('bluetooth error')
data_sink = Observable()
builder = Builder(config)
builder.bind(data_sink)
currency = get_currency()
interval = get_period()
GPIO.add_event_detect(BUTTON_CURRENCY_CHANNEL, GPIO.RISING, callback=switch_currency, bouncetime=400)
GPIO.add_event_detect(BUTTON_INTERVAL_CHANNEL, GPIO.RISING, callback=switch_interval, bouncetime=400)
try:
while True:
try:
prices = [entry[1:] for entry in get_dummy_data()] if config.dummy_data else fetch_prices()
data_sink.update_observers(prices[0], prices[1], currency)
time_left = config.refresh_interval
new_currency = currency
new_interval = interval
while time_left > 0 and currency == new_currency and interval == new_interval:
time.sleep(0.5)
time_left -= 0.5
new_currency = get_currency()
new_interval = get_period()
alarmManager.checkAlarms(currency, prices[1], alarm_callback)
if(currency != new_currency or interval != new_interval):
data_sink.update_observers(None, None, new_currency)
currency = new_currency
interval = new_interval
except (HTTPError, URLError) as e:
logger.error(str(e))
time.sleep(5)
except IOError as e:
logger.error(str(e))
except KeyboardInterrupt:
logger.info('Exit')
data_sink.close()
exit()
if __name__ == "__main__":
main()
```
|
{
"source": "jemshad/tremor-www",
"score": 2
}
|
#### File: tremor-www/python_scripts/include_stdlib.py
```python
from yaml import load, dump
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
from pathlib import Path
def main():
p = Path("docs/tremor-script/stdlib")
template = Path("mkdocs.yml.in")
output = Path("mkdocs.yml")
# load mkdocs.yml template
with template.open() as mkdocs_yaml_in:
data = load(mkdocs_yaml_in, Loader=Loader)
function_reference = []
for nav_entry in data["nav"]:
if "Tremor Script" in nav_entry:
for script_nav_entry in nav_entry["Tremor Script"]:
fr = script_nav_entry.get("Function Reference")
if fr is not None:
function_reference = fr
# clean function_reference
function_reference.pop()
# including files in nav
files = list(p.glob('**/*.md'))
for f in files:
print(f"Adding file: {f}")
f_rel = f.relative_to(p)
pparts = list(f_rel.parent.parts)
pparts.append(f_rel.stem)
name = "::".join(pparts)
filename = f.relative_to("docs")
function_reference.append({
name: str(filename)
})
# write out mkdocs.yml
with output.open("w") as mkdocs_yaml:
mkdocs_yaml.write(dump(data, Dumper=Dumper))
if __name__ == "__main__":
main()
```
|
{
"source": "jems-lee/bayesian-personalized-ranking",
"score": 2
}
|
#### File: src/bayesian_personalized_ranking/bpr_sklearn.py
```python
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
from sklearn.utils.validation import (
check_X_y,
check_array,
check_is_fitted,
check_random_state,
)
from bpr_numba import fit_bpr
from utils import (
create_user_map_table,
create_item_map_table,
create_user_map,
create_item_map,
create_data_triplets_index_only,
)
class BPR(BaseEstimator):
def __init__(
self,
n_factors=10,
n_epochs=1,
batch_size=1,
init_mean=0,
init_std_dev=0.1,
lr_all=0.005,
reg_all=0.02,
lr_bi=None,
lr_pu=None,
lr_qi=None,
reg_bi=None,
reg_pu=None,
reg_qi=None,
random_state=None,
eps=1e-5,
):
self.n_factors = n_factors
self.n_epochs = n_epochs
self.batch_size = batch_size
self.init_mean = init_mean
self.init_std_dev = init_std_dev
self.lr_all = lr_all
self.reg_all = reg_all
self.lr_bi = lr_bi
self.lr_pu = lr_pu
self.lr_qi = lr_qi
self.reg_bi = reg_bi
self.reg_pu = reg_pu
self.reg_qi = reg_qi
self.random_state = random_state
self.user_factors = None
self.item_factors = None
self.item_biases = None
self.known_users = None
self.known_items = None
self.user_map = None
self.item_map = None
self.residuals = None
self.eps = eps
def fit(self, X, y):
"""Fit the model using stochastic gradient descent.
Parameters
----------
X : ndarray shape ( m, 2 )
Columns are [ user_id, item_id ]
y : ndarray shape ( m, )
Array of 1 : relevent and 0 if not
Returns
-------
"""
X, y = check_X_y(X, y)
n_users = len(np.unique(X[:, 0]))
n_items = len(np.unique(X[:, 1]))
df = pd.DataFrame({"user_id": X[:, 0], "item_id": X[:, 1], "relevance": y})
user_map_table = create_user_map_table(df)
item_map_table = create_item_map_table(df)
self.user_map = create_user_map(df)
self.item_map = create_item_map(df)
data_triplets = create_data_triplets_index_only(
df, user_map_table, item_map_table
)
print("Data triplets created")
m = data_triplets.shape[0]
self.is_fitted_ = True
self.random_state_ = check_random_state(self.random_state)
self.lr_bi = self.lr_bi if self.lr_bi is not None else self.lr_all
self.lr_pu = self.lr_pu if self.lr_pu is not None else self.lr_all
self.lr_qi = self.lr_qi if self.lr_qi is not None else self.lr_all
self.reg_bi = self.reg_bi if self.reg_bi is not None else self.reg_all
self.reg_pu = self.reg_pu if self.reg_pu is not None else self.reg_all
self.reg_qi = self.reg_qi if self.reg_qi is not None else self.reg_all
self.batch_size = self.batch_size if self.batch_size is not None else 1
self.residuals = np.zeros(self.n_epochs)
self.known_users = set(X[:, 0])
self.known_items = set(X[:, 1])
self.user_factors = self.random_state_.normal(
loc=self.init_mean,
scale=self.init_std_dev,
size=(n_users, self.n_factors),
)
self.item_factors = self.random_state_.normal(
loc=self.init_mean,
scale=self.init_std_dev,
size=(n_items, self.n_factors),
)
self.user_biases = self.random_state_.normal(
loc=self.init_mean, scale=self.init_std_dev, size=n_users
)
self.item_biases = self.random_state_.normal(
loc=self.init_mean, scale=self.init_std_dev, size=n_items
)
(
self.user_factors,
self.item_factors,
self.item_biases,
self.residuals,
) = fit_bpr(
data_triplets=data_triplets,
initial_user_factors=self.user_factors,
initial_item_factors=self.item_factors,
initial_item_biases=self.item_biases,
lr_bi=self.lr_bi,
lr_pu=self.lr_pu,
lr_qi=self.lr_qi,
reg_bi=self.reg_bi,
reg_pu=self.reg_pu,
reg_qi=self.reg_qi,
verbose=False,
n_epochs=self.n_epochs,
batch_size=self.batch_size,
eps=self.eps,
)
if len(self.residuals) < self.n_epochs:
print(f"Converged")
return self
def predict(self, X: np.ndarray):
"""
Parameters
----------
X : array-like
Columns [ user_id, item_id ]
Returns
-------
scores : ndarray
"""
check_is_fitted(self, "is_fitted_")
X = check_array(X)
m = X.shape[0]
scores = np.zeros(m)
for i in np.arange(m):
user_id = X[i, 0]
item_id = X[i, 1]
if user_id in self.user_map and item_id in self.item_map:
u_idx = self.user_map[user_id]
i_idx = self.item_map[item_id]
scores[i] = (
np.dot(self.user_factors[u_idx, :], self.item_factors[i_idx, :])
+ self.item_biases[i_idx]
)
elif item_id in self.item_map:
i_idx = self.item_map[item_id]
scores[i] = self.item_biases[i_idx]
else:
# item not in training set
scores[i] = -np.inf
return scores
```
|
{
"source": "jemtech/Tca9548A",
"score": 3
}
|
#### File: jemtech/Tca9548A/Tca9548A.py
```python
import smbus2
from threading import Lock
class Tca9548A(object):
'''
handles communication and setup of a TCA9548A
'''
def __init__(self, i2cBus = None, address = 0x70):
'''
Constructor
'''
self.address = address
if i2cBus is None:
self.bus = smbus2.SMBus(1)
else:
self.bus = i2cBus
def getChannel(self, channel):
'''
returns the Tca9548AChannel for communication
'''
return Tca9548AChannel(self,channel)
def disable(self):
'''
disables the chip output to prevent collisions with other bus clients
'''
self.bus.write_byte(self.address, 0)
def openChannel(self, channel):
'''
activates the selected output 0 to 8 is possible
'''
self.bus.write_byte(self.address, channel)
class Tca9548AChannel(object):
'''
use like smbus2
thread safe i2c bus wrapper
caring about opening and closing the channels of the TCA9548A
preventing collisions
'''
def __init__(self, tca9548A, channel):
'''
Constructor
'''
#2 power channel number because it is the register bit
self.channel = 2**channel
self.tca9548A = tca9548A
def __transaction(self, method, *args, **kwargs):
lock.acquire()
try:
self.tca9548A.openChannel(self.channel)
result = method(*args, **kwargs)
self.tca9548A.disable()
finally:
lock.release()
return result
def read_word_data(self, *args, **kwargs):
return self.__transaction(self.tca9548A.bus.read_word_data, *args, **kwargs)
def read_byte_data(self, *args, **kwargs):
return self.__transaction(self.tca9548A.bus.read_byte_data, *args, **kwargs)
def process_call(self, *args, **kwargs):
return self.__transaction(self.tca9548A.bus.process_call, *args, **kwargs)
def block_process_call(self, *args, **kwargs):
return self.__transaction(self.tca9548A.bus.block_process_call, *args, **kwargs)
def read_block_data(self, *args, **kwargs):
return self.__transaction(self.tca9548A.bus.read_block_data, *args, **kwargs)
def read_byte(self, *args, **kwargs):
return self.__transaction(self.tca9548A.bus.read_byte, *args, **kwargs)
def read_i2c_block_data(self, *args, **kwargs):
return self.__transaction(self.tca9548A.bus.read_i2c_block_data, *args, **kwargs)
def write_block_data(self, *args, **kwargs):
return self.__transaction(self.tca9548A.bus.write_block_data, *args, **kwargs)
def write_byte(self, *args, **kwargs):
return self.__transaction(self.tca9548A.bus.write_byte, *args, **kwargs)
def write_byte_data(self, *args, **kwargs):
return self.__transaction(self.tca9548A.bus.write_byte_data, *args, **kwargs)
def write_i2c_block_data(self, *args, **kwargs):
return self.__transaction(self.tca9548A.bus.write_i2c_block_data, *args, **kwargs)
def write_quick(self, *args, **kwargs):
return self.__transaction(self.tca9548A.bus.write_quick, *args, **kwargs)
def write_word_data(self, *args, **kwargs):
return self.__transaction(self.tca9548A.bus.write_word_data, *args, **kwargs)
class Pipe(object):
def __init__(self, bus, address, force = None):
self.bus = bus
self.address = address
self.force = force
def read_word_data(self, register):
return self.bus.read_word_data(i2c_addr = self.address, register = register, force = self.force)
def read_byte_data(self, register):
return self.bus.read_byte_data(i2c_addr = self.address, register = register, force = self.force)
def process_call(self, register, value):
return self.bus.process_call(i2c_addr = self.address, register = register, value = value, force = self.force)
def block_process_call(self, register, data):
return self.bus.block_process_call(i2c_addr = self.address, register = register, data = data, force = self.force)
def read_block_data(self, register):
return self.tca9548A.bus.read_block_data(i2c_addr = self.address, register = register, force = self.force)
def read_byte(self):
return self.tca9548A.bus.read_byte(i2c_addr = self.address, force = self.force)
def read_i2c_block_data(self, register, length):
return self.bus.read_i2c_block_data(i2c_addr = self.address, register = register, length = length, force = self.force)
def write_block_data(self, register, data):
return self.bus.write_block_data(i2c_addr = self.address, register = register, data = data, force = self.force)
def write_byte(self, value):
return self.bus.write_byte(i2c_addr = self.address, value = value, force = self.force)
def write_byte_data(self, register, value):
return self.tca9548A.bus.write_byte_data(i2c_addr = self.address, register = register, value = value, force = self.force)
def write_i2c_block_data(self, register, data):
return self.bus.write_i2c_block_data(i2c_addr = self.address, register = register, data = data, force = self.force)
def write_quick(self):
return self.bus.write_quick(i2c_addr = self.address, force = self.force)
def write_word_data(self, register, value):
return self.bus.write_word_data(i2c_addr = self.address, register = register, value = value, force = self.force)
lock = Lock()
```
|
{
"source": "jemten/drop",
"score": 2
}
|
#### File: drop/config/ExportCounts.py
```python
from snakemake.io import expand
from drop import utils
class ExportCounts:
COUNT_TYPE_MAP = {
"geneCounts": "aberrantExpression",
"splitCounts": "aberrantSplicing",
"spliceSiteOverlapCounts": "aberrantSplicing"
}
def __init__(self, dict_, outputRoot, sampleAnnotation, geneAnnotations, genomeAssembly,
aberrantExpression, aberrantSplicing):
"""
:param dict_: config dictionary for count export
:param sampleAnnotation: parsed sample annotation
:param geneAnnotations: list of gene annotation names
:param aberrantExpression: AberrantExpression object
:param aberrantSplicing: AberrantSplicing object
"""
self.CONFIG_KEYS = ["geneAnnotations", "excludeGroups"]
self.config_dict = self.setDefaults(dict_, geneAnnotations)
self.outputRoot = outputRoot
self.sa = sampleAnnotation
self.genomeAssembly = genomeAssembly
self.modules = {
"aberrantExpression": aberrantExpression,
"aberrantSplicing": aberrantSplicing
}
self.pattern = self.outputRoot / "exported_counts" / "{dataset}--{genomeAssembly}--{annotation}"
def setDefaults(self, config_dict, gene_annotations):
utils.setKey(config_dict, None, "geneAnnotations", gene_annotations)
utils.setKey(config_dict, None, "excludeGroups", list())
# check consistency of gene annotations
anno_incomp = set(config_dict["geneAnnotations"]) - set(gene_annotations)
if len(anno_incomp) > 0:
message = f"{anno_incomp} are not valid annotation version in 'geneAnnotation'"
message += "but required in 'exportCounts'.\n Please make sure they match."
raise ValueError(message)
return config_dict
def get(self, key):
if key not in self.CONFIG_KEYS:
raise KeyError(f"{key} not defined for count export")
return self.config_dict[key]
def getFilePattern(self, str_=True):
return utils.returnPath(self.pattern, str_=str_)
def getExportGroups(self, modules=None):
"""
Determine from which DROP groups counts should be exported
:param modules: 'aberrantExpression' for gene counts, 'aberrantSplicing' for splicing counts export
:return: DROP groups from which to export counts
"""
if modules is None:
modules = self.modules.keys()
elif isinstance(modules, str):
modules = [modules]
groups = [] # get all active groups
for module in modules:
groups.extend(self.modules[module].groups)
export_groups = set(groups) - set(self.get("excludeGroups"))
return export_groups
def getExportCountFiles(self, prefix):
"""
Determine export count files.
:param prefix: name of file
:return: list of files to
"""
if prefix not in self.COUNT_TYPE_MAP.keys():
raise ValueError(f"{prefix} not a valid file type for exported counts")
datasets = self.getExportGroups([self.COUNT_TYPE_MAP[prefix]])
file_pattern = str(self.pattern / f"{prefix}.tsv.gz")
count_files = expand(file_pattern, annotation=self.get("geneAnnotations"),
dataset=datasets, genomeAssembly=self.genomeAssembly)
return count_files
```
|
{
"source": "jemten/trailblazer",
"score": 2
}
|
#### File: jemten/trailblazer/setup.py
```python
import codecs
import os
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
# Shortcut for building/publishing to Pypi
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist bdist_wheel upload')
sys.exit()
def parse_reqs(req_path='./requirements.txt'):
"""Recursively parse requirements from nested pip files."""
install_requires = []
with codecs.open(req_path, 'r') as handle:
# remove comments and empty lines
lines = (line.strip() for line in handle
if line.strip() and not line.startswith('#'))
for line in lines:
# check for nested requirements files
if line.startswith('-r'):
# recursively call this function
install_requires += parse_reqs(req_path=line[3:])
else:
# add the line as a new requirement
install_requires.append(line)
return install_requires
# This is a plug-in for setuptools that will invoke py.test
# when you run python setup.py test
class PyTest(TestCommand):
"""Set up the py.test test runner."""
def finalize_options(self):
"""Set options for the command line."""
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
"""Execute the test runner command."""
# Import here, because outside the required eggs aren't loaded yet
import pytest
sys.exit(pytest.main(self.test_args))
setup(
name='trailblazer',
# Versions should comply with PEP440. For a discussion on
# single-sourcing the version across setup.py and the project code,
# see http://packaging.python.org/en/latest/tutorial.html#version
version='3.1.0',
description=('Track MIP analyses.'),
long_description=__doc__,
# What does your project relate to? Separate with spaces.
keywords='analysis development',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
# The project's main homepage
url='https://github.com/Clinical-Genomics/analysis',
packages=find_packages(exclude=('tests*', 'docs', 'examples')),
# If there are data files included in your packages that need to be
# installed, specify them here.
include_package_data=True,
# package_data={
# 'taboo': [
# 'server/genotype/templates/genotype/*.html',
# ]
# },
zip_safe=False,
# Although 'package_data' is the preferred approach, in some case you
# may need to place data files outside of your packages.
# In this case, 'data_file' will be installed into:
# '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# Install requirements loaded from ``requirements.txt``
install_requires=parse_reqs(),
tests_require=[
'pytest',
],
cmdclass=dict(
test=PyTest,
),
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and
# allow pip to create the appropriate form of executable for the
# target platform.
entry_points={
'console_scripts': [
'trailblazer = trailblazer.cli:base',
]
},
# See: http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Environment :: Console',
],
)
```
#### File: tests/mip/conftest.py
```python
import pytest
from trailblazer.mip import sacct
from trailblazer.mip import start
@pytest.fixture(scope='session')
def failed_sacct_jobs():
with open('tests/fixtures/sacct/failed.log.status') as stream:
sacct_jobs = sacct.parse_sacct(stream)
return sacct_jobs
@pytest.fixture(scope='session')
def mip_cli():
_mip_cli = start.MipCli(script='test/fake_mip.pl')
return _mip_cli
```
#### File: trailblazer/cli/check.py
```python
import logging
from pathlib import Path
import click
import ruamel.yaml
from tabulate import tabulate
from trailblazer.mip import files
LOG = logging.getLogger(__name__)
@click.command()
@click.argument('family')
@click.pass_context
def check(context: click.Context, family: str):
"""Delete an analysis log from the database."""
analysis_obj = context.obj['store'].analyses(family=family).first()
if analysis_obj is None:
LOG.error('no analysis found')
context.abort()
config_path = Path(analysis_obj.config_path)
if not config_path.exists():
LOG.error(f"analysis config not found: {config_path}")
context.abort()
config_raw = ruamel.yaml.safe_load(config_path.open())
config_data = files.parse_config(config_raw)
sampleinfo_raw = ruamel.yaml.safe_load(Path(config_data['sampleinfo_path']).open())
sampleinfo_data = files.parse_sampleinfo(sampleinfo_raw)
qcmetrics_path = Path(sampleinfo_data['qcmetrics_path'])
if not qcmetrics_path.exists():
LOG.error(f"qc metrics not found: {str(qcmetrics_path)}")
context.abort()
qcmetrics_raw = ruamel.yaml.safe_load(qcmetrics_path.open())
qcmetrics_data = files.parse_qcmetrics(qcmetrics_raw)
samples = {
'sample': [],
'type': [],
'ped': [],
'chanjo': [],
'peddy': [],
'plink': [],
'duplicates': [],
}
for sample_data in config_data['samples']:
LOG.debug(f"{sample_data['id']}: parse analysis config")
samples['sample'].append(sample_data['id'])
samples['type'].append(sample_data['type'])
for sample_data in sampleinfo_data['samples']:
LOG.debug(f"{sample_data['id']}: parse sample info")
samples['ped'].append(sample_data['sex'])
with Path(sample_data['chanjo_sexcheck']).open() as chanjo_handle:
sexcheck_data = files.parse_chanjo_sexcheck(chanjo_handle)
predicted_sex = sexcheck_data['predicted_sex']
xy_ratio = sexcheck_data['y_coverage'] / sexcheck_data['x_coverage']
samples['chanjo'].append(f"{predicted_sex} ({xy_ratio:.3f})")
for sample_data in qcmetrics_data['samples']:
LOG.debug(f"{sample_data['id']}: parse qc metrics")
samples['plink'].append(sample_data['plink_sex'])
duplicates_percent = sample_data['duplicates'] * 100
samples['duplicates'].append(f"{duplicates_percent:.3f}%")
peddy_path = Path(sampleinfo_data['peddy']['sex_check'])
if peddy_path.exists():
with peddy_path.open() as sexcheck_handle:
peddy_data = files.parse_peddy_sexcheck(sexcheck_handle)
for sample_id in samples['sample']:
LOG.debug(f"{sample_id}: parse peddy")
predicted_sex = peddy_data[sample_id]['predicted_sex']
het_ratio = peddy_data[sample_id]['het_ratio']
samples['peddy'].append(f"{predicted_sex} ({het_ratio})")
else:
LOG.warning(f"missing peddy output: {peddy_path}")
print(tabulate(samples, headers='keys', tablefmt='psql'))
```
#### File: trailblazer/trailblazer/exc.py
```python
class TrailblazerError(Exception):
def __init__(self, message):
self.message = message
class MissingFileError(TrailblazerError):
pass
class MipStartError(TrailblazerError):
pass
class ConfigError(TrailblazerError):
def __init__(self, message, errors=None):
self.message = message
self.errors = errors
```
#### File: trailblazer/server/api.py
```python
from flask import abort, g, Blueprint, jsonify, make_response, request
from google.auth import jwt
from trailblazer.server.ext import store
blueprint = Blueprint('api', __name__, url_prefix='/api/v1')
@blueprint.before_request
def before_request():
if request.method == 'OPTIONS':
return make_response(jsonify(ok=True), 204)
auth_header = request.headers.get('Authorization')
if auth_header:
jwt_token = auth_header.split('Bearer ')[-1]
else:
return abort(403, 'no JWT token found on request')
user_data = jwt.decode(jwt_token, verify=False)
user_obj = store.user(user_data['email'])
if user_obj is None:
return abort(403, f"{user_data['email']} doesn't have access")
g.current_user = user_obj
@blueprint.route('/analyses')
def analyses():
"""Display analyses."""
per_page = int(request.args.get('per_page', 50))
page = int(request.args.get('page', 1))
query = store.analyses(status=request.args.get('status'),
query=request.args.get('query'),
is_visible=request.args.get('is_visible') == 'true' or None)
query_page = query.paginate(page, per_page=per_page)
data = []
for analysis_obj in query_page.items:
analysis_data = analysis_obj.to_dict()
analysis_data['user'] = analysis_obj.user.to_dict() if analysis_obj.user else None
analysis_data['failed_jobs'] = [job_obj.to_dict() for job_obj in analysis_obj.failed_jobs]
data.append(analysis_data)
return jsonify(analyses=data)
@blueprint.route('/analyses/<int:analysis_id>', methods=['GET', 'PUT'])
def analysis(analysis_id):
"""Display a single analysis."""
analysis_obj = store.analysis(analysis_id)
if analysis_obj is None:
return abort(404)
if request.method == 'PUT':
analysis_obj.update(request.json)
store.commit()
data = analysis_obj.to_dict()
data['failed_jobs'] = [job_obj.to_dict() for job_obj in analysis_obj.failed_jobs]
data['user'] = analysis_obj.user.to_dict() if analysis_obj.user else None
return jsonify(**data)
@blueprint.route('/info')
def info():
"""Display meta data about database."""
metadata_obj = store.info()
return jsonify(**metadata_obj.to_dict())
@blueprint.route('/me')
def me():
"""Return information about a logged in user."""
return jsonify(**g.current_user.to_dict())
@blueprint.route('/aggregate/jobs')
def aggregate_jobs():
"""Return stats about jobs."""
data = store.aggregate_failed()
return jsonify(jobs=data)
```
|
{
"source": "jemvega/vocabulary-flashcard-app",
"score": 4
}
|
#### File: jemvega/vocabulary-flashcard-app/vocabulary-flash-card-app.py
```python
import os
import sys
import random
import pandas as pd
from pandas import Series, DataFrame
q1 = "Would you like to continue? "
def rename_file(file_name):
if not ".csv" in file_name:
file_rename = file_name + ".csv"
return file_rename
def check_file(file_name):
try:
if os.path.isfile(file_rename) == True:
print(f"\nVocabulary word list uploaded: {file_name}")
else:
print("\nI'm sorry. The file name you provided is not in the same directory.\n")
except:
print("\nError. Please try again.\n")
def yes_or_no(question):
while True:
user_input = input(question + " Y or N? ").lower().strip()
if user_input == 'y':
print("User Input = y")
return True
elif user_input == 'n':
print("User Input = n")
return False
break
else:
print("I'm sorry. That is an invalid input. Y or N?")
continue
def choose_card_range():
q2= '''
Would you like to choose the list of words to study?
Press Y to indicate the range of cards you want to study.
Press N to study all of the cards.'''
while True:
user_input = yes_or_no(q2)
if user_input == True:
start_index = input("Please type in the starting index number: ")
stop_index = input("Please type in the ending index number: ")
print(f"Your starting value is: {start_index}")
print(f"Your ending value is: {stop_index}")
if (start_index.isdigit() and stop_index.isdigit() and int(start_index) < int(stop_index)):
print("Your word set has been selected.")
range_input = [start_index, stop_index]
return card_range.extend(range_input)
break
elif user_input == False:
return card_range.extend([0, len(vocab_list)])
break
else:
print("""I'm sorry. That is an invalid input. Please provide two integer values
where the second integer is larger than the first integer.""")
continue
def choose_shuffle():
q3 = "Would you like to randomize the cards?"
while True:
user_input = yes_or_no(q3)
if user_input == True:
return game.shuffle()
elif user_input == False:
break
else:
print("I'm sorry. That is an invalid input. Y or N?")
continue
def quit_game():
print("Keep studying the following saved words:\n")
unknown_display = ""
for i,j in game.saved:
unknown_display = f"\n{i.strip()}: {j}"
print(unknown_display)
print("\nGoodbye!")
# In[ ]:
class VocabCardSet():
def __init__(self):
self.index = 0
self.vocab_cardset = vocab_active
self.saved = []
def __str__(self):
image_deck = " "
for i in self.vocab_cardset:
image_deck += f"\n{i[0].strip()}: {i[1]}\n"
return image_deck
def __iter__(self):
return self
def __len__(self):
return len(self.vocab_cardset)
def __next__(self): # Special method to cycle through the list when reaching the end of the index.
flashcard = " "
try:
if self.index == len(self.vocab_cardset) - 1:
self.index = 0
result = self.vocab_cardset[self.index]
return f"\n{result[0].strip()}: {result[1]}\n"
elif 0 <= self.index < len(self.vocab_cardset):
self.index += 1
result = self.vocab_cardset[self.index]
return f"\n{result[0].strip()}: {result[1]}\n"
except:
print("There are no more vocabulary words!")
print("Please restart the app study your vocabulary words again.")
print("error in next")
return quit_game()
def previous(self): # A function that returns the previous word in the list.
try:
if self.index == 0:
self.index = len(self.vocab_cardset) - 1
result = self.vocab_cardset[self.index]
return f"\n{result[0].strip()}: {result[1]}\n"
elif self.index > 0:
self.index -= 1
result = self.vocab_cardset[self.index]
return f"\n{result[0].strip()}: {result[1]}\n"
except:
print("There are no more vocabulary words!")
print("Please restart the app to study your vocabulary words again.")
print("Error in previous")
return quit_game()
def shuffle(self):
random.shuffle(self.vocab_cardset)
def save_unknown(self): # A function that saves an unknown vocab word to another list.
try:
if self.index == len(self.vocab_cardset):
self.index = 0
unknown = self.vocab_cardset.pop(self.index)
self.saved.append(unknown)
print(f"""\nThe following word was saved:\n
{unknown[0].strip()}: {unknown[1].strip()}\n""")
elif self.index < len(self.vocab_cardset):
unknown = self.vocab_cardset.pop(self.index)
self.saved.append(unknown)
print(f"""\nThe following word was saved:\n
{unknown[0].strip()}: {unknown[1].strip()}\n""")
except:
print("There are no more vocabulary words!")
print("Please restart the app to study your vocabulary words again.")
print("Error in save unknown")
return quit_game()
def print_unknown(self):
saved_deck = ""
for i in self.saved:
saved_deck += f"{i[0]}: {i[1]}"
return saved_deck
# In[ ]:
# Game play: Ask user to input vocab list file name and to continue
print("""Welcome to the Vocabulary Flash Card Study App!\n
This app was intended to be used for studying vocabulary words.
It does not keep score of correct or wrong answers.
Instead, it allows users to cycle through the vocabulary flash card set
quickly for studying and personally assessing one's
own performance.\n\n""")
print("""How the Vocabulary Flash Card App Works:
The user will be prompted to input the file name of the .csv file
he or she would like to study. Please have a .csv file saved in the
same directory as this program. The user will get to choose certain
user settings before starting the app, like choosing a subset of words
from the entire vocabulary list or randomizing the words in the list.
The user will also be given a menu to allow the user to navigate through
the vocabulary list. The menu will allow the user to skip the word (S)
or to go back to the previous word (P). The user can also save
words in an unknown word pile for the user to review at the end (U).
The user can also quit the App (Q) at any point during the game.""")
file_name = str(input("What is the name of the vocabulary list file you wish to study?"))
file_rename = rename_file(file_name)
check_file(file_rename)
data = pd.read_csv(file_rename, encoding = 'latin-1')
col_headers = list(data)
words_list = data[col_headers[0]].values.tolist()
definitions_list = data[col_headers[1]].values.tolist()
vocab_dict = dict(zip(words_list, definitions_list))
vocab_list = list(vocab_dict.items())
# Game play: Ask user to choose to study a specified set of cards or all cards in vocab list
card_range = []
choose_card_range()
vocab_active = vocab_list[int(card_range[0]): int(card_range[1])]
vocab_active_dc = dict(vocab_active)
game = VocabCardSet()
# Game play: asks user if he or she wants to randomize cards
choose_shuffle()
# Game play: Start studying!
while True:
if len(game.vocab_cardset) > 0:
selection = input("""\nWhat would you like to do?
Skip to the next word? Press S.
Go back to previous word? Press P.
Mark word as unknown? Press U.
To quit flash card generator. Press Q.\n""").lower()
if selection == 's':
#print("Skip")
print(next(game))
elif selection == 'p':
#print("Previous")
print(game.previous())
elif selection == 'u':
print("Unknown")
game.save_unknown()
elif selection == 'q':
quit_game()
break
else:
print("I'm sorry. That's an invalid input.")
continue
else:
print("There are no more vocabulary words!")
print("Please restart the app to study your vocabulary words again.")
print("error at end of game")
quit_game()
break
```
|
{
"source": "Jemy2019/Wifi-Attendance-Tracker",
"score": 3
}
|
#### File: Wifi-Attendance-Tracker/Server/script.py
```python
import os,time
import datetime
def main():
# load database
DB = open('data.txt', 'r')
ipsMap = {}
for i in DB:
l = []
l = i.split("=>")
ipsMap[l[0]] = l[1]
print l[0],l[1]
# get initialize state for routers
os.system('nmap -sP 192.168.43.0/24|grep "192"|cut -d " " -f 6 >ipsSaved.txt')
# check the router state
while(True):
attendance=open('attendance.txt','a')
leaving=open('leaving.txt','a')
time.sleep(10)
# get new state of router
os.system('nmap -sP 192.168.43.0/24|grep "192"|cut -d " " -f 6|sort >ipsNew.txt')
# check for attendance
cpFlag = 0
if 0 == os.system('grep -v -x -f ipsSaved.txt ipsNew.txt > newConnectors.txt'):
# record the attendance of all employees
newConnectors=open('newConnectors.txt','r')
for i in newConnectors:
attendance.write(str(ipsMap[i[:-1]][:-1]) + "\t" + str(datetime.datetime.now())+"\n")
#print str(ipsMap[i[:-2]]) + str(datetime.datetime.now())
cpFlag = 1
print "connectors\n"
# check for leaving
if 0 == os.system('grep -v -x -f ipsNew.txt ipsSaved.txt > DisConnectors.txt'):
# record the attendance of all employees
DisConnectors=open('DisConnectors.txt','r')
for i in DisConnectors:
leaving.write(str(ipsMap[i[:-1]][:-1]) + "\t" + str(datetime.datetime.now())+"\n")
#print str(ipsMap[i]) + "\t" + str(datetime.datetime.now())
print "disconnectors\n"
cpFlag = 1
# update current state
if cpFlag == 1:
os.system('cp ipsNew.txt ipsSaved.txt')
print "checking..."
if __name__=="__main__":
main()
```
|
{
"source": "jen20/pulumi-aws",
"score": 2
}
|
#### File: pulumi_aws/apigatewayv2/integration_response.py
```python
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
__all__ = ['IntegrationResponseArgs', 'IntegrationResponse']
@pulumi.input_type
class IntegrationResponseArgs:
def __init__(__self__, *,
api_id: pulumi.Input[str],
integration_id: pulumi.Input[str],
integration_response_key: pulumi.Input[str],
content_handling_strategy: Optional[pulumi.Input[str]] = None,
response_templates: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
template_selection_expression: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a IntegrationResponse resource.
:param pulumi.Input[str] api_id: The API identifier.
:param pulumi.Input[str] integration_id: The identifier of the `apigatewayv2.Integration`.
:param pulumi.Input[str] integration_response_key: The integration response key.
:param pulumi.Input[str] content_handling_strategy: How to handle response payload content type conversions. Valid values: `CONVERT_TO_BINARY`, `CONVERT_TO_TEXT`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] response_templates: A map of Velocity templates that are applied on the request payload based on the value of the Content-Type header sent by the client.
:param pulumi.Input[str] template_selection_expression: The [template selection expression](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-websocket-api-selection-expressions.html#apigateway-websocket-api-template-selection-expressions) for the integration response.
"""
pulumi.set(__self__, "api_id", api_id)
pulumi.set(__self__, "integration_id", integration_id)
pulumi.set(__self__, "integration_response_key", integration_response_key)
if content_handling_strategy is not None:
pulumi.set(__self__, "content_handling_strategy", content_handling_strategy)
if response_templates is not None:
pulumi.set(__self__, "response_templates", response_templates)
if template_selection_expression is not None:
pulumi.set(__self__, "template_selection_expression", template_selection_expression)
@property
@pulumi.getter(name="apiId")
def api_id(self) -> pulumi.Input[str]:
"""
The API identifier.
"""
return pulumi.get(self, "api_id")
@api_id.setter
def api_id(self, value: pulumi.Input[str]):
pulumi.set(self, "api_id", value)
@property
@pulumi.getter(name="integrationId")
def integration_id(self) -> pulumi.Input[str]:
"""
The identifier of the `apigatewayv2.Integration`.
"""
return pulumi.get(self, "integration_id")
@integration_id.setter
def integration_id(self, value: pulumi.Input[str]):
pulumi.set(self, "integration_id", value)
@property
@pulumi.getter(name="integrationResponseKey")
def integration_response_key(self) -> pulumi.Input[str]:
"""
The integration response key.
"""
return pulumi.get(self, "integration_response_key")
@integration_response_key.setter
def integration_response_key(self, value: pulumi.Input[str]):
pulumi.set(self, "integration_response_key", value)
@property
@pulumi.getter(name="contentHandlingStrategy")
def content_handling_strategy(self) -> Optional[pulumi.Input[str]]:
"""
How to handle response payload content type conversions. Valid values: `CONVERT_TO_BINARY`, `CONVERT_TO_TEXT`.
"""
return pulumi.get(self, "content_handling_strategy")
@content_handling_strategy.setter
def content_handling_strategy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_handling_strategy", value)
@property
@pulumi.getter(name="responseTemplates")
def response_templates(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of Velocity templates that are applied on the request payload based on the value of the Content-Type header sent by the client.
"""
return pulumi.get(self, "response_templates")
@response_templates.setter
def response_templates(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "response_templates", value)
@property
@pulumi.getter(name="templateSelectionExpression")
def template_selection_expression(self) -> Optional[pulumi.Input[str]]:
"""
The [template selection expression](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-websocket-api-selection-expressions.html#apigateway-websocket-api-template-selection-expressions) for the integration response.
"""
return pulumi.get(self, "template_selection_expression")
@template_selection_expression.setter
def template_selection_expression(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "template_selection_expression", value)
class IntegrationResponse(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_id: Optional[pulumi.Input[str]] = None,
content_handling_strategy: Optional[pulumi.Input[str]] = None,
integration_id: Optional[pulumi.Input[str]] = None,
integration_response_key: Optional[pulumi.Input[str]] = None,
response_templates: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
template_selection_expression: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Manages an Amazon API Gateway Version 2 integration response.
More information can be found in the [Amazon API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-websocket-api.html).
## Example Usage
### Basic
```python
import pulumi
import pulumi_aws as aws
example = aws.apigatewayv2.IntegrationResponse("example",
api_id=aws_apigatewayv2_api["example"]["id"],
integration_id=aws_apigatewayv2_integration["example"]["id"],
integration_response_key="/200/")
```
## Import
`aws_apigatewayv2_integration_response` can be imported by using the API identifier, integration identifier and integration response identifier, e.g.
```sh
$ pulumi import aws:apigatewayv2/integrationResponse:IntegrationResponse example aabbccddee/1122334/998877
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_id: The API identifier.
:param pulumi.Input[str] content_handling_strategy: How to handle response payload content type conversions. Valid values: `CONVERT_TO_BINARY`, `CONVERT_TO_TEXT`.
:param pulumi.Input[str] integration_id: The identifier of the `apigatewayv2.Integration`.
:param pulumi.Input[str] integration_response_key: The integration response key.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] response_templates: A map of Velocity templates that are applied on the request payload based on the value of the Content-Type header sent by the client.
:param pulumi.Input[str] template_selection_expression: The [template selection expression](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-websocket-api-selection-expressions.html#apigateway-websocket-api-template-selection-expressions) for the integration response.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: IntegrationResponseArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an Amazon API Gateway Version 2 integration response.
More information can be found in the [Amazon API Gateway Developer Guide](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-websocket-api.html).
## Example Usage
### Basic
```python
import pulumi
import pulumi_aws as aws
example = aws.apigatewayv2.IntegrationResponse("example",
api_id=aws_apigatewayv2_api["example"]["id"],
integration_id=aws_apigatewayv2_integration["example"]["id"],
integration_response_key="/200/")
```
## Import
`aws_apigatewayv2_integration_response` can be imported by using the API identifier, integration identifier and integration response identifier, e.g.
```sh
$ pulumi import aws:apigatewayv2/integrationResponse:IntegrationResponse example aabbccddee/1122334/998877
```
:param str resource_name: The name of the resource.
:param IntegrationResponseArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IntegrationResponseArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_id: Optional[pulumi.Input[str]] = None,
content_handling_strategy: Optional[pulumi.Input[str]] = None,
integration_id: Optional[pulumi.Input[str]] = None,
integration_response_key: Optional[pulumi.Input[str]] = None,
response_templates: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
template_selection_expression: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if api_id is None and not opts.urn:
raise TypeError("Missing required property 'api_id'")
__props__['api_id'] = api_id
__props__['content_handling_strategy'] = content_handling_strategy
if integration_id is None and not opts.urn:
raise TypeError("Missing required property 'integration_id'")
__props__['integration_id'] = integration_id
if integration_response_key is None and not opts.urn:
raise TypeError("Missing required property 'integration_response_key'")
__props__['integration_response_key'] = integration_response_key
__props__['response_templates'] = response_templates
__props__['template_selection_expression'] = template_selection_expression
super(IntegrationResponse, __self__).__init__(
'aws:apigatewayv2/integrationResponse:IntegrationResponse',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
api_id: Optional[pulumi.Input[str]] = None,
content_handling_strategy: Optional[pulumi.Input[str]] = None,
integration_id: Optional[pulumi.Input[str]] = None,
integration_response_key: Optional[pulumi.Input[str]] = None,
response_templates: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
template_selection_expression: Optional[pulumi.Input[str]] = None) -> 'IntegrationResponse':
"""
Get an existing IntegrationResponse resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_id: The API identifier.
:param pulumi.Input[str] content_handling_strategy: How to handle response payload content type conversions. Valid values: `CONVERT_TO_BINARY`, `CONVERT_TO_TEXT`.
:param pulumi.Input[str] integration_id: The identifier of the `apigatewayv2.Integration`.
:param pulumi.Input[str] integration_response_key: The integration response key.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] response_templates: A map of Velocity templates that are applied on the request payload based on the value of the Content-Type header sent by the client.
:param pulumi.Input[str] template_selection_expression: The [template selection expression](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-websocket-api-selection-expressions.html#apigateway-websocket-api-template-selection-expressions) for the integration response.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["api_id"] = api_id
__props__["content_handling_strategy"] = content_handling_strategy
__props__["integration_id"] = integration_id
__props__["integration_response_key"] = integration_response_key
__props__["response_templates"] = response_templates
__props__["template_selection_expression"] = template_selection_expression
return IntegrationResponse(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="apiId")
def api_id(self) -> pulumi.Output[str]:
"""
The API identifier.
"""
return pulumi.get(self, "api_id")
@property
@pulumi.getter(name="contentHandlingStrategy")
def content_handling_strategy(self) -> pulumi.Output[Optional[str]]:
"""
How to handle response payload content type conversions. Valid values: `CONVERT_TO_BINARY`, `CONVERT_TO_TEXT`.
"""
return pulumi.get(self, "content_handling_strategy")
@property
@pulumi.getter(name="integrationId")
def integration_id(self) -> pulumi.Output[str]:
"""
The identifier of the `apigatewayv2.Integration`.
"""
return pulumi.get(self, "integration_id")
@property
@pulumi.getter(name="integrationResponseKey")
def integration_response_key(self) -> pulumi.Output[str]:
"""
The integration response key.
"""
return pulumi.get(self, "integration_response_key")
@property
@pulumi.getter(name="responseTemplates")
def response_templates(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of Velocity templates that are applied on the request payload based on the value of the Content-Type header sent by the client.
"""
return pulumi.get(self, "response_templates")
@property
@pulumi.getter(name="templateSelectionExpression")
def template_selection_expression(self) -> pulumi.Output[Optional[str]]:
"""
The [template selection expression](https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-websocket-api-selection-expressions.html#apigateway-websocket-api-template-selection-expressions) for the integration response.
"""
return pulumi.get(self, "template_selection_expression")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_aws/budgets/outputs.py
```python
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
__all__ = [
'BudgetCostTypes',
'BudgetNotification',
]
@pulumi.output_type
class BudgetCostTypes(dict):
def __init__(__self__, *,
include_credit: Optional[bool] = None,
include_discount: Optional[bool] = None,
include_other_subscription: Optional[bool] = None,
include_recurring: Optional[bool] = None,
include_refund: Optional[bool] = None,
include_subscription: Optional[bool] = None,
include_support: Optional[bool] = None,
include_tax: Optional[bool] = None,
include_upfront: Optional[bool] = None,
use_amortized: Optional[bool] = None,
use_blended: Optional[bool] = None):
"""
:param bool include_credit: A boolean value whether to include credits in the cost budget. Defaults to `true`
:param bool include_discount: Specifies whether a budget includes discounts. Defaults to `true`
:param bool include_other_subscription: A boolean value whether to include other subscription costs in the cost budget. Defaults to `true`
:param bool include_recurring: A boolean value whether to include recurring costs in the cost budget. Defaults to `true`
:param bool include_refund: A boolean value whether to include refunds in the cost budget. Defaults to `true`
:param bool include_subscription: A boolean value whether to include subscriptions in the cost budget. Defaults to `true`
:param bool include_support: A boolean value whether to include support costs in the cost budget. Defaults to `true`
:param bool include_tax: A boolean value whether to include tax in the cost budget. Defaults to `true`
:param bool include_upfront: A boolean value whether to include upfront costs in the cost budget. Defaults to `true`
:param bool use_amortized: Specifies whether a budget uses the amortized rate. Defaults to `false`
:param bool use_blended: A boolean value whether to use blended costs in the cost budget. Defaults to `false`
"""
if include_credit is not None:
pulumi.set(__self__, "include_credit", include_credit)
if include_discount is not None:
pulumi.set(__self__, "include_discount", include_discount)
if include_other_subscription is not None:
pulumi.set(__self__, "include_other_subscription", include_other_subscription)
if include_recurring is not None:
pulumi.set(__self__, "include_recurring", include_recurring)
if include_refund is not None:
pulumi.set(__self__, "include_refund", include_refund)
if include_subscription is not None:
pulumi.set(__self__, "include_subscription", include_subscription)
if include_support is not None:
pulumi.set(__self__, "include_support", include_support)
if include_tax is not None:
pulumi.set(__self__, "include_tax", include_tax)
if include_upfront is not None:
pulumi.set(__self__, "include_upfront", include_upfront)
if use_amortized is not None:
pulumi.set(__self__, "use_amortized", use_amortized)
if use_blended is not None:
pulumi.set(__self__, "use_blended", use_blended)
@property
@pulumi.getter(name="includeCredit")
def include_credit(self) -> Optional[bool]:
"""
A boolean value whether to include credits in the cost budget. Defaults to `true`
"""
return pulumi.get(self, "include_credit")
@property
@pulumi.getter(name="includeDiscount")
def include_discount(self) -> Optional[bool]:
"""
Specifies whether a budget includes discounts. Defaults to `true`
"""
return pulumi.get(self, "include_discount")
@property
@pulumi.getter(name="includeOtherSubscription")
def include_other_subscription(self) -> Optional[bool]:
"""
A boolean value whether to include other subscription costs in the cost budget. Defaults to `true`
"""
return pulumi.get(self, "include_other_subscription")
@property
@pulumi.getter(name="includeRecurring")
def include_recurring(self) -> Optional[bool]:
"""
A boolean value whether to include recurring costs in the cost budget. Defaults to `true`
"""
return pulumi.get(self, "include_recurring")
@property
@pulumi.getter(name="includeRefund")
def include_refund(self) -> Optional[bool]:
"""
A boolean value whether to include refunds in the cost budget. Defaults to `true`
"""
return pulumi.get(self, "include_refund")
@property
@pulumi.getter(name="includeSubscription")
def include_subscription(self) -> Optional[bool]:
"""
A boolean value whether to include subscriptions in the cost budget. Defaults to `true`
"""
return pulumi.get(self, "include_subscription")
@property
@pulumi.getter(name="includeSupport")
def include_support(self) -> Optional[bool]:
"""
A boolean value whether to include support costs in the cost budget. Defaults to `true`
"""
return pulumi.get(self, "include_support")
@property
@pulumi.getter(name="includeTax")
def include_tax(self) -> Optional[bool]:
"""
A boolean value whether to include tax in the cost budget. Defaults to `true`
"""
return pulumi.get(self, "include_tax")
@property
@pulumi.getter(name="includeUpfront")
def include_upfront(self) -> Optional[bool]:
"""
A boolean value whether to include upfront costs in the cost budget. Defaults to `true`
"""
return pulumi.get(self, "include_upfront")
@property
@pulumi.getter(name="useAmortized")
def use_amortized(self) -> Optional[bool]:
"""
Specifies whether a budget uses the amortized rate. Defaults to `false`
"""
return pulumi.get(self, "use_amortized")
@property
@pulumi.getter(name="useBlended")
def use_blended(self) -> Optional[bool]:
"""
A boolean value whether to use blended costs in the cost budget. Defaults to `false`
"""
return pulumi.get(self, "use_blended")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class BudgetNotification(dict):
def __init__(__self__, *,
comparison_operator: str,
notification_type: str,
threshold: float,
threshold_type: str,
subscriber_email_addresses: Optional[Sequence[str]] = None,
subscriber_sns_topic_arns: Optional[Sequence[str]] = None):
"""
:param str comparison_operator: (Required) Comparison operator to use to evaluate the condition. Can be `LESS_THAN`, `EQUAL_TO` or `GREATER_THAN`.
:param str notification_type: (Required) What kind of budget value to notify on. Can be `ACTUAL` or `FORECASTED`
:param float threshold: (Required) Threshold when the notification should be sent.
:param str threshold_type: (Required) What kind of threshold is defined. Can be `PERCENTAGE` OR `ABSOLUTE_VALUE`.
:param Sequence[str] subscriber_email_addresses: (Optional) E-Mail addresses to notify. Either this or `subscriber_sns_topic_arns` is required.
:param Sequence[str] subscriber_sns_topic_arns: (Optional) SNS topics to notify. Either this or `subscriber_email_addresses` is required.
"""
pulumi.set(__self__, "comparison_operator", comparison_operator)
pulumi.set(__self__, "notification_type", notification_type)
pulumi.set(__self__, "threshold", threshold)
pulumi.set(__self__, "threshold_type", threshold_type)
if subscriber_email_addresses is not None:
pulumi.set(__self__, "subscriber_email_addresses", subscriber_email_addresses)
if subscriber_sns_topic_arns is not None:
pulumi.set(__self__, "subscriber_sns_topic_arns", subscriber_sns_topic_arns)
@property
@pulumi.getter(name="comparisonOperator")
def comparison_operator(self) -> str:
"""
(Required) Comparison operator to use to evaluate the condition. Can be `LESS_THAN`, `EQUAL_TO` or `GREATER_THAN`.
"""
return pulumi.get(self, "comparison_operator")
@property
@pulumi.getter(name="notificationType")
def notification_type(self) -> str:
"""
(Required) What kind of budget value to notify on. Can be `ACTUAL` or `FORECASTED`
"""
return pulumi.get(self, "notification_type")
@property
@pulumi.getter
def threshold(self) -> float:
"""
(Required) Threshold when the notification should be sent.
"""
return pulumi.get(self, "threshold")
@property
@pulumi.getter(name="thresholdType")
def threshold_type(self) -> str:
"""
(Required) What kind of threshold is defined. Can be `PERCENTAGE` OR `ABSOLUTE_VALUE`.
"""
return pulumi.get(self, "threshold_type")
@property
@pulumi.getter(name="subscriberEmailAddresses")
def subscriber_email_addresses(self) -> Optional[Sequence[str]]:
"""
(Optional) E-Mail addresses to notify. Either this or `subscriber_sns_topic_arns` is required.
"""
return pulumi.get(self, "subscriber_email_addresses")
@property
@pulumi.getter(name="subscriberSnsTopicArns")
def subscriber_sns_topic_arns(self) -> Optional[Sequence[str]]:
"""
(Optional) SNS topics to notify. Either this or `subscriber_email_addresses` is required.
"""
return pulumi.get(self, "subscriber_sns_topic_arns")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_aws/cloudtrail/outputs.py
```python
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
from . import outputs
__all__ = [
'TrailEventSelector',
'TrailEventSelectorDataResource',
'TrailInsightSelector',
]
@pulumi.output_type
class TrailEventSelector(dict):
def __init__(__self__, *,
data_resources: Optional[Sequence['outputs.TrailEventSelectorDataResource']] = None,
include_management_events: Optional[bool] = None,
read_write_type: Optional[str] = None):
"""
:param Sequence['TrailEventSelectorDataResourceArgs'] data_resources: Specifies logging data events. Fields documented below.
:param bool include_management_events: Specify if you want your event selector to include management events for your trail.
:param str read_write_type: Specify if you want your trail to log read-only events, write-only events, or all. By default, the value is All. You can specify only the following value: "ReadOnly", "WriteOnly", "All". Defaults to `All`.
"""
if data_resources is not None:
pulumi.set(__self__, "data_resources", data_resources)
if include_management_events is not None:
pulumi.set(__self__, "include_management_events", include_management_events)
if read_write_type is not None:
pulumi.set(__self__, "read_write_type", read_write_type)
@property
@pulumi.getter(name="dataResources")
def data_resources(self) -> Optional[Sequence['outputs.TrailEventSelectorDataResource']]:
"""
Specifies logging data events. Fields documented below.
"""
return pulumi.get(self, "data_resources")
@property
@pulumi.getter(name="includeManagementEvents")
def include_management_events(self) -> Optional[bool]:
"""
Specify if you want your event selector to include management events for your trail.
"""
return pulumi.get(self, "include_management_events")
@property
@pulumi.getter(name="readWriteType")
def read_write_type(self) -> Optional[str]:
"""
Specify if you want your trail to log read-only events, write-only events, or all. By default, the value is All. You can specify only the following value: "ReadOnly", "WriteOnly", "All". Defaults to `All`.
"""
return pulumi.get(self, "read_write_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class TrailEventSelectorDataResource(dict):
def __init__(__self__, *,
type: str,
values: Sequence[str]):
"""
:param str type: The resource type in which you want to log data events. You can specify only the following value: "AWS::S3::Object", "AWS::Lambda::Function"
:param Sequence[str] values: A list of ARN for the specified S3 buckets and object prefixes..
"""
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def type(self) -> str:
"""
The resource type in which you want to log data events. You can specify only the following value: "AWS::S3::Object", "AWS::Lambda::Function"
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
"""
A list of ARN for the specified S3 buckets and object prefixes..
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class TrailInsightSelector(dict):
def __init__(__self__, *,
insight_type: str):
"""
:param str insight_type: The type of insights to log on a trail. In this release, only `ApiCallRateInsight` is supported as an insight type.
"""
pulumi.set(__self__, "insight_type", insight_type)
@property
@pulumi.getter(name="insightType")
def insight_type(self) -> str:
"""
The type of insights to log on a trail. In this release, only `ApiCallRateInsight` is supported as an insight type.
"""
return pulumi.get(self, "insight_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_aws/ec2clientvpn/outputs.py
```python
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
__all__ = [
'EndpointAuthenticationOption',
'EndpointConnectionLogOptions',
]
@pulumi.output_type
class EndpointAuthenticationOption(dict):
def __init__(__self__, *,
type: str,
active_directory_id: Optional[str] = None,
root_certificate_chain_arn: Optional[str] = None,
saml_provider_arn: Optional[str] = None):
"""
:param str type: The type of client authentication to be used. Specify `certificate-authentication` to use certificate-based authentication, `directory-service-authentication` to use Active Directory authentication, or `federated-authentication` to use Federated Authentication via SAML 2.0.
:param str active_directory_id: The ID of the Active Directory to be used for authentication if type is `directory-service-authentication`.
:param str root_certificate_chain_arn: The ARN of the client certificate. The certificate must be signed by a certificate authority (CA) and it must be provisioned in AWS Certificate Manager (ACM). Only necessary when type is set to `certificate-authentication`.
:param str saml_provider_arn: The ARN of the IAM SAML identity provider if type is `federated-authentication`.
"""
pulumi.set(__self__, "type", type)
if active_directory_id is not None:
pulumi.set(__self__, "active_directory_id", active_directory_id)
if root_certificate_chain_arn is not None:
pulumi.set(__self__, "root_certificate_chain_arn", root_certificate_chain_arn)
if saml_provider_arn is not None:
pulumi.set(__self__, "saml_provider_arn", saml_provider_arn)
@property
@pulumi.getter
def type(self) -> str:
"""
The type of client authentication to be used. Specify `certificate-authentication` to use certificate-based authentication, `directory-service-authentication` to use Active Directory authentication, or `federated-authentication` to use Federated Authentication via SAML 2.0.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="activeDirectoryId")
def active_directory_id(self) -> Optional[str]:
"""
The ID of the Active Directory to be used for authentication if type is `directory-service-authentication`.
"""
return pulumi.get(self, "active_directory_id")
@property
@pulumi.getter(name="rootCertificateChainArn")
def root_certificate_chain_arn(self) -> Optional[str]:
"""
The ARN of the client certificate. The certificate must be signed by a certificate authority (CA) and it must be provisioned in AWS Certificate Manager (ACM). Only necessary when type is set to `certificate-authentication`.
"""
return pulumi.get(self, "root_certificate_chain_arn")
@property
@pulumi.getter(name="samlProviderArn")
def saml_provider_arn(self) -> Optional[str]:
"""
The ARN of the IAM SAML identity provider if type is `federated-authentication`.
"""
return pulumi.get(self, "saml_provider_arn")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class EndpointConnectionLogOptions(dict):
def __init__(__self__, *,
enabled: bool,
cloudwatch_log_group: Optional[str] = None,
cloudwatch_log_stream: Optional[str] = None):
"""
:param bool enabled: Indicates whether connection logging is enabled.
:param str cloudwatch_log_group: The name of the CloudWatch Logs log group.
:param str cloudwatch_log_stream: The name of the CloudWatch Logs log stream to which the connection data is published.
"""
pulumi.set(__self__, "enabled", enabled)
if cloudwatch_log_group is not None:
pulumi.set(__self__, "cloudwatch_log_group", cloudwatch_log_group)
if cloudwatch_log_stream is not None:
pulumi.set(__self__, "cloudwatch_log_stream", cloudwatch_log_stream)
@property
@pulumi.getter
def enabled(self) -> bool:
"""
Indicates whether connection logging is enabled.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="cloudwatchLogGroup")
def cloudwatch_log_group(self) -> Optional[str]:
"""
The name of the CloudWatch Logs log group.
"""
return pulumi.get(self, "cloudwatch_log_group")
@property
@pulumi.getter(name="cloudwatchLogStream")
def cloudwatch_log_stream(self) -> Optional[str]:
"""
The name of the CloudWatch Logs log stream to which the connection data is published.
"""
return pulumi.get(self, "cloudwatch_log_stream")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_aws/ec2/managed_prefix_list.py
```python
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['ManagedPrefixListArgs', 'ManagedPrefixList']
@pulumi.input_type
class ManagedPrefixListArgs:
def __init__(__self__, *,
address_family: pulumi.Input[str],
max_entries: pulumi.Input[int],
entries: Optional[pulumi.Input[Sequence[pulumi.Input['ManagedPrefixListEntryArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ManagedPrefixList resource.
:param pulumi.Input[str] address_family: The address family (`IPv4` or `IPv6`) of
this prefix list.
:param pulumi.Input[int] max_entries: The maximum number of entries that
this prefix list can contain.
:param pulumi.Input[Sequence[pulumi.Input['ManagedPrefixListEntryArgs']]] entries: Can be specified multiple times for each prefix list entry.
Each entry block supports fields documented below. Different entries may have
overlapping CIDR blocks, but a particular CIDR should not be duplicated.
:param pulumi.Input[str] name: The name of this resource. The name must not start with `com.amazonaws`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to this resource.
"""
pulumi.set(__self__, "address_family", address_family)
pulumi.set(__self__, "max_entries", max_entries)
if entries is not None:
pulumi.set(__self__, "entries", entries)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="addressFamily")
def address_family(self) -> pulumi.Input[str]:
"""
The address family (`IPv4` or `IPv6`) of
this prefix list.
"""
return pulumi.get(self, "address_family")
@address_family.setter
def address_family(self, value: pulumi.Input[str]):
pulumi.set(self, "address_family", value)
@property
@pulumi.getter(name="maxEntries")
def max_entries(self) -> pulumi.Input[int]:
"""
The maximum number of entries that
this prefix list can contain.
"""
return pulumi.get(self, "max_entries")
@max_entries.setter
def max_entries(self, value: pulumi.Input[int]):
pulumi.set(self, "max_entries", value)
@property
@pulumi.getter
def entries(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ManagedPrefixListEntryArgs']]]]:
"""
Can be specified multiple times for each prefix list entry.
Each entry block supports fields documented below. Different entries may have
overlapping CIDR blocks, but a particular CIDR should not be duplicated.
"""
return pulumi.get(self, "entries")
@entries.setter
def entries(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ManagedPrefixListEntryArgs']]]]):
pulumi.set(self, "entries", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of this resource. The name must not start with `com.amazonaws`.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to this resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class ManagedPrefixList(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_family: Optional[pulumi.Input[str]] = None,
entries: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ManagedPrefixListEntryArgs']]]]] = None,
max_entries: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides a managed prefix list resource.
> **NOTE on `max_entries`:** When you reference a Prefix List in a resource,
the maximum number of entries for the prefix lists counts as the same number of rules
or entries for the resource. For example, if you create a prefix list with a maximum
of 20 entries and you reference that prefix list in a security group rule, this counts
as 20 rules for the security group.
## Example Usage
Basic usage
```python
import pulumi
import pulumi_aws as aws
example = aws.ec2.ManagedPrefixList("example",
address_family="IPv4",
max_entries=5,
entries=[
aws.ec2.ManagedPrefixListEntryArgs(
cidr=aws_vpc["example"]["cidr_block"],
description="Primary",
),
aws.ec2.ManagedPrefixListEntryArgs(
cidr=aws_vpc_ipv4_cidr_block_association["example"]["cidr_block"],
description="Secondary",
),
],
tags={
"Env": "live",
})
```
## Import
Prefix Lists can be imported using the `id`, e.g.
```sh
$ pulumi import aws:ec2/managedPrefixList:ManagedPrefixList default pl-0570a1d2d725c16be
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] address_family: The address family (`IPv4` or `IPv6`) of
this prefix list.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ManagedPrefixListEntryArgs']]]] entries: Can be specified multiple times for each prefix list entry.
Each entry block supports fields documented below. Different entries may have
overlapping CIDR blocks, but a particular CIDR should not be duplicated.
:param pulumi.Input[int] max_entries: The maximum number of entries that
this prefix list can contain.
:param pulumi.Input[str] name: The name of this resource. The name must not start with `com.amazonaws`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to this resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ManagedPrefixListArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a managed prefix list resource.
> **NOTE on `max_entries`:** When you reference a Prefix List in a resource,
the maximum number of entries for the prefix lists counts as the same number of rules
or entries for the resource. For example, if you create a prefix list with a maximum
of 20 entries and you reference that prefix list in a security group rule, this counts
as 20 rules for the security group.
## Example Usage
Basic usage
```python
import pulumi
import pulumi_aws as aws
example = aws.ec2.ManagedPrefixList("example",
address_family="IPv4",
max_entries=5,
entries=[
aws.ec2.ManagedPrefixListEntryArgs(
cidr=aws_vpc["example"]["cidr_block"],
description="Primary",
),
aws.ec2.ManagedPrefixListEntryArgs(
cidr=aws_vpc_ipv4_cidr_block_association["example"]["cidr_block"],
description="Secondary",
),
],
tags={
"Env": "live",
})
```
## Import
Prefix Lists can be imported using the `id`, e.g.
```sh
$ pulumi import aws:ec2/managedPrefixList:ManagedPrefixList default pl-0570a1d2d725c16be
```
:param str resource_name: The name of the resource.
:param ManagedPrefixListArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ManagedPrefixListArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_family: Optional[pulumi.Input[str]] = None,
entries: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ManagedPrefixListEntryArgs']]]]] = None,
max_entries: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if address_family is None and not opts.urn:
raise TypeError("Missing required property 'address_family'")
__props__['address_family'] = address_family
__props__['entries'] = entries
if max_entries is None and not opts.urn:
raise TypeError("Missing required property 'max_entries'")
__props__['max_entries'] = max_entries
__props__['name'] = name
__props__['tags'] = tags
__props__['arn'] = None
__props__['owner_id'] = None
__props__['version'] = None
super(ManagedPrefixList, __self__).__init__(
'aws:ec2/managedPrefixList:ManagedPrefixList',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
address_family: Optional[pulumi.Input[str]] = None,
arn: Optional[pulumi.Input[str]] = None,
entries: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ManagedPrefixListEntryArgs']]]]] = None,
max_entries: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
owner_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
version: Optional[pulumi.Input[int]] = None) -> 'ManagedPrefixList':
"""
Get an existing ManagedPrefixList resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] address_family: The address family (`IPv4` or `IPv6`) of
this prefix list.
:param pulumi.Input[str] arn: The ARN of the prefix list.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ManagedPrefixListEntryArgs']]]] entries: Can be specified multiple times for each prefix list entry.
Each entry block supports fields documented below. Different entries may have
overlapping CIDR blocks, but a particular CIDR should not be duplicated.
:param pulumi.Input[int] max_entries: The maximum number of entries that
this prefix list can contain.
:param pulumi.Input[str] name: The name of this resource. The name must not start with `com.amazonaws`.
:param pulumi.Input[str] owner_id: The ID of the AWS account that owns this prefix list.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to this resource.
:param pulumi.Input[int] version: The latest version of this prefix list.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["address_family"] = address_family
__props__["arn"] = arn
__props__["entries"] = entries
__props__["max_entries"] = max_entries
__props__["name"] = name
__props__["owner_id"] = owner_id
__props__["tags"] = tags
__props__["version"] = version
return ManagedPrefixList(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="addressFamily")
def address_family(self) -> pulumi.Output[str]:
"""
The address family (`IPv4` or `IPv6`) of
this prefix list.
"""
return pulumi.get(self, "address_family")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The ARN of the prefix list.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def entries(self) -> pulumi.Output[Optional[Sequence['outputs.ManagedPrefixListEntry']]]:
"""
Can be specified multiple times for each prefix list entry.
Each entry block supports fields documented below. Different entries may have
overlapping CIDR blocks, but a particular CIDR should not be duplicated.
"""
return pulumi.get(self, "entries")
@property
@pulumi.getter(name="maxEntries")
def max_entries(self) -> pulumi.Output[int]:
"""
The maximum number of entries that
this prefix list can contain.
"""
return pulumi.get(self, "max_entries")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of this resource. The name must not start with `com.amazonaws`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="ownerId")
def owner_id(self) -> pulumi.Output[str]:
"""
The ID of the AWS account that owns this prefix list.
"""
return pulumi.get(self, "owner_id")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to this resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def version(self) -> pulumi.Output[int]:
"""
The latest version of this prefix list.
"""
return pulumi.get(self, "version")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_aws/elasticbeanstalk/outputs.py
```python
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
__all__ = [
'ApplicationAppversionLifecycle',
'ConfigurationTemplateSetting',
'EnvironmentAllSetting',
'EnvironmentSetting',
'GetApplicationAppversionLifecycleResult',
]
@pulumi.output_type
class ApplicationAppversionLifecycle(dict):
def __init__(__self__, *,
service_role: str,
delete_source_from_s3: Optional[bool] = None,
max_age_in_days: Optional[int] = None,
max_count: Optional[int] = None):
"""
:param str service_role: The ARN of an IAM service role under which the application version is deleted. Elastic Beanstalk must have permission to assume this role.
:param bool delete_source_from_s3: Set to `true` to delete a version's source bundle from S3 when the application version is deleted.
:param int max_age_in_days: The number of days to retain an application version ('max_age_in_days' and 'max_count' cannot be enabled simultaneously.).
:param int max_count: The maximum number of application versions to retain ('max_age_in_days' and 'max_count' cannot be enabled simultaneously.).
"""
pulumi.set(__self__, "service_role", service_role)
if delete_source_from_s3 is not None:
pulumi.set(__self__, "delete_source_from_s3", delete_source_from_s3)
if max_age_in_days is not None:
pulumi.set(__self__, "max_age_in_days", max_age_in_days)
if max_count is not None:
pulumi.set(__self__, "max_count", max_count)
@property
@pulumi.getter(name="serviceRole")
def service_role(self) -> str:
"""
The ARN of an IAM service role under which the application version is deleted. Elastic Beanstalk must have permission to assume this role.
"""
return pulumi.get(self, "service_role")
@property
@pulumi.getter(name="deleteSourceFromS3")
def delete_source_from_s3(self) -> Optional[bool]:
"""
Set to `true` to delete a version's source bundle from S3 when the application version is deleted.
"""
return pulumi.get(self, "delete_source_from_s3")
@property
@pulumi.getter(name="maxAgeInDays")
def max_age_in_days(self) -> Optional[int]:
"""
The number of days to retain an application version ('max_age_in_days' and 'max_count' cannot be enabled simultaneously.).
"""
return pulumi.get(self, "max_age_in_days")
@property
@pulumi.getter(name="maxCount")
def max_count(self) -> Optional[int]:
"""
The maximum number of application versions to retain ('max_age_in_days' and 'max_count' cannot be enabled simultaneously.).
"""
return pulumi.get(self, "max_count")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ConfigurationTemplateSetting(dict):
def __init__(__self__, *,
name: str,
namespace: str,
value: str,
resource: Optional[str] = None):
"""
:param str name: A unique name for this Template.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "namespace", namespace)
pulumi.set(__self__, "value", value)
if resource is not None:
pulumi.set(__self__, "resource", resource)
@property
@pulumi.getter
def name(self) -> str:
"""
A unique name for this Template.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def namespace(self) -> str:
return pulumi.get(self, "namespace")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@property
@pulumi.getter
def resource(self) -> Optional[str]:
return pulumi.get(self, "resource")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class EnvironmentAllSetting(dict):
def __init__(__self__, *,
name: str,
namespace: str,
value: str,
resource: Optional[str] = None):
"""
:param str name: A unique name for this Environment. This name is used
in the application URL
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "namespace", namespace)
pulumi.set(__self__, "value", value)
if resource is not None:
pulumi.set(__self__, "resource", resource)
@property
@pulumi.getter
def name(self) -> str:
"""
A unique name for this Environment. This name is used
in the application URL
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def namespace(self) -> str:
return pulumi.get(self, "namespace")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@property
@pulumi.getter
def resource(self) -> Optional[str]:
return pulumi.get(self, "resource")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class EnvironmentSetting(dict):
def __init__(__self__, *,
name: str,
namespace: str,
value: str,
resource: Optional[str] = None):
"""
:param str name: A unique name for this Environment. This name is used
in the application URL
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "namespace", namespace)
pulumi.set(__self__, "value", value)
if resource is not None:
pulumi.set(__self__, "resource", resource)
@property
@pulumi.getter
def name(self) -> str:
"""
A unique name for this Environment. This name is used
in the application URL
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def namespace(self) -> str:
return pulumi.get(self, "namespace")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@property
@pulumi.getter
def resource(self) -> Optional[str]:
return pulumi.get(self, "resource")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class GetApplicationAppversionLifecycleResult(dict):
def __init__(__self__, *,
delete_source_from_s3: bool,
max_age_in_days: int,
max_count: int,
service_role: str):
"""
:param bool delete_source_from_s3: Specifies whether delete a version's source bundle from S3 when the application version is deleted.
:param int max_age_in_days: The number of days to retain an application version.
:param int max_count: The maximum number of application versions to retain.
:param str service_role: The ARN of an IAM service role under which the application version is deleted. Elastic Beanstalk must have permission to assume this role.
"""
pulumi.set(__self__, "delete_source_from_s3", delete_source_from_s3)
pulumi.set(__self__, "max_age_in_days", max_age_in_days)
pulumi.set(__self__, "max_count", max_count)
pulumi.set(__self__, "service_role", service_role)
@property
@pulumi.getter(name="deleteSourceFromS3")
def delete_source_from_s3(self) -> bool:
"""
Specifies whether delete a version's source bundle from S3 when the application version is deleted.
"""
return pulumi.get(self, "delete_source_from_s3")
@property
@pulumi.getter(name="maxAgeInDays")
def max_age_in_days(self) -> int:
"""
The number of days to retain an application version.
"""
return pulumi.get(self, "max_age_in_days")
@property
@pulumi.getter(name="maxCount")
def max_count(self) -> int:
"""
The maximum number of application versions to retain.
"""
return pulumi.get(self, "max_count")
@property
@pulumi.getter(name="serviceRole")
def service_role(self) -> str:
"""
The ARN of an IAM service role under which the application version is deleted. Elastic Beanstalk must have permission to assume this role.
"""
return pulumi.get(self, "service_role")
```
#### File: python/pulumi_aws/get_autoscaling_groups.py
```python
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = [
'GetAutoscalingGroupsResult',
'AwaitableGetAutoscalingGroupsResult',
'get_autoscaling_groups',
]
warnings.warn("""aws.getAutoscalingGroups has been deprecated in favor of aws.autoscaling.getAmiIds""", DeprecationWarning)
@pulumi.output_type
class GetAutoscalingGroupsResult:
"""
A collection of values returned by getAutoscalingGroups.
"""
def __init__(__self__, arns=None, filters=None, id=None, names=None):
if arns and not isinstance(arns, list):
raise TypeError("Expected argument 'arns' to be a list")
pulumi.set(__self__, "arns", arns)
if filters and not isinstance(filters, list):
raise TypeError("Expected argument 'filters' to be a list")
pulumi.set(__self__, "filters", filters)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if names and not isinstance(names, list):
raise TypeError("Expected argument 'names' to be a list")
pulumi.set(__self__, "names", names)
@property
@pulumi.getter
def arns(self) -> Sequence[str]:
"""
A list of the Autoscaling Groups Arns in the current region.
"""
return pulumi.get(self, "arns")
@property
@pulumi.getter
def filters(self) -> Optional[Sequence['outputs.GetAutoscalingGroupsFilterResult']]:
return pulumi.get(self, "filters")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def names(self) -> Sequence[str]:
"""
A list of the Autoscaling Groups in the current region.
"""
return pulumi.get(self, "names")
class AwaitableGetAutoscalingGroupsResult(GetAutoscalingGroupsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAutoscalingGroupsResult(
arns=self.arns,
filters=self.filters,
id=self.id,
names=self.names)
def get_autoscaling_groups(filters: Optional[Sequence[pulumi.InputType['GetAutoscalingGroupsFilterArgs']]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAutoscalingGroupsResult:
"""
The Autoscaling Groups data source allows access to the list of AWS
ASGs within a specific region. This will allow you to pass a list of AutoScaling Groups to other resources.
:param Sequence[pulumi.InputType['GetAutoscalingGroupsFilterArgs']] filters: A filter used to scope the list e.g. by tags. See [related docs](http://docs.aws.amazon.com/AutoScaling/latest/APIReference/API_Filter.html).
"""
pulumi.log.warn("""get_autoscaling_groups is deprecated: aws.getAutoscalingGroups has been deprecated in favor of aws.autoscaling.getAmiIds""")
__args__ = dict()
__args__['filters'] = filters
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:index/getAutoscalingGroups:getAutoscalingGroups', __args__, opts=opts, typ=GetAutoscalingGroupsResult).value
return AwaitableGetAutoscalingGroupsResult(
arns=__ret__.arns,
filters=__ret__.filters,
id=__ret__.id,
names=__ret__.names)
```
#### File: pulumi_aws/identitystore/_inputs.py
```python
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
__all__ = [
'GetGroupFilterArgs',
'GetUserFilterArgs',
]
@pulumi.input_type
class GetGroupFilterArgs:
def __init__(__self__, *,
attribute_path: str,
attribute_value: str):
"""
:param str attribute_path: The attribute path that is used to specify which attribute name to search. Currently, `DisplayName` is the only valid attribute path.
:param str attribute_value: The value for an attribute.
"""
pulumi.set(__self__, "attribute_path", attribute_path)
pulumi.set(__self__, "attribute_value", attribute_value)
@property
@pulumi.getter(name="attributePath")
def attribute_path(self) -> str:
"""
The attribute path that is used to specify which attribute name to search. Currently, `DisplayName` is the only valid attribute path.
"""
return pulumi.get(self, "attribute_path")
@attribute_path.setter
def attribute_path(self, value: str):
pulumi.set(self, "attribute_path", value)
@property
@pulumi.getter(name="attributeValue")
def attribute_value(self) -> str:
"""
The value for an attribute.
"""
return pulumi.get(self, "attribute_value")
@attribute_value.setter
def attribute_value(self, value: str):
pulumi.set(self, "attribute_value", value)
@pulumi.input_type
class GetUserFilterArgs:
def __init__(__self__, *,
attribute_path: str,
attribute_value: str):
"""
:param str attribute_path: The attribute path that is used to specify which attribute name to search. Currently, `UserName` is the only valid attribute path.
:param str attribute_value: The value for an attribute.
"""
pulumi.set(__self__, "attribute_path", attribute_path)
pulumi.set(__self__, "attribute_value", attribute_value)
@property
@pulumi.getter(name="attributePath")
def attribute_path(self) -> str:
"""
The attribute path that is used to specify which attribute name to search. Currently, `UserName` is the only valid attribute path.
"""
return pulumi.get(self, "attribute_path")
@attribute_path.setter
def attribute_path(self, value: str):
pulumi.set(self, "attribute_path", value)
@property
@pulumi.getter(name="attributeValue")
def attribute_value(self) -> str:
"""
The value for an attribute.
"""
return pulumi.get(self, "attribute_value")
@attribute_value.setter
def attribute_value(self, value: str):
pulumi.set(self, "attribute_value", value)
```
#### File: pulumi_aws/kinesis/stream.py
```python
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
__all__ = ['StreamArgs', 'Stream']
@pulumi.input_type
class StreamArgs:
def __init__(__self__, *,
shard_count: pulumi.Input[int],
arn: Optional[pulumi.Input[str]] = None,
encryption_type: Optional[pulumi.Input[str]] = None,
enforce_consumer_deletion: Optional[pulumi.Input[bool]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
retention_period: Optional[pulumi.Input[int]] = None,
shard_level_metrics: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Stream resource.
:param pulumi.Input[int] shard_count: The number of shards that the stream will use.
Amazon has guidelines for specifying the Stream size that should be referenced when creating a Kinesis stream. See [Amazon Kinesis Streams](https://docs.aws.amazon.com/kinesis/latest/dev/amazon-kinesis-streams.html) for more.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) specifying the Stream (same as `id`)
:param pulumi.Input[str] encryption_type: The encryption type to use. The only acceptable values are `NONE` or `KMS`. The default value is `NONE`.
:param pulumi.Input[bool] enforce_consumer_deletion: A boolean that indicates all registered consumers should be deregistered from the stream so that the stream can be destroyed without error. The default value is `false`.
:param pulumi.Input[str] kms_key_id: The GUID for the customer-managed KMS key to use for encryption. You can also use a Kinesis-owned master key by specifying the alias `alias/aws/kinesis`.
:param pulumi.Input[str] name: A name to identify the stream. This is unique to the AWS account and region the Stream is created in.
:param pulumi.Input[int] retention_period: Length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 8760 hours. Minimum value is 24. Default is 24.
:param pulumi.Input[Sequence[pulumi.Input[str]]] shard_level_metrics: A list of shard-level CloudWatch metrics which can be enabled for the stream. See [Monitoring with CloudWatch](https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html) for more. Note that the value ALL should not be used; instead you should provide an explicit list of metrics you wish to enable.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource.
"""
pulumi.set(__self__, "shard_count", shard_count)
if arn is not None:
pulumi.set(__self__, "arn", arn)
if encryption_type is not None:
pulumi.set(__self__, "encryption_type", encryption_type)
if enforce_consumer_deletion is not None:
pulumi.set(__self__, "enforce_consumer_deletion", enforce_consumer_deletion)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if name is not None:
pulumi.set(__self__, "name", name)
if retention_period is not None:
pulumi.set(__self__, "retention_period", retention_period)
if shard_level_metrics is not None:
pulumi.set(__self__, "shard_level_metrics", shard_level_metrics)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="shardCount")
def shard_count(self) -> pulumi.Input[int]:
"""
The number of shards that the stream will use.
Amazon has guidelines for specifying the Stream size that should be referenced when creating a Kinesis stream. See [Amazon Kinesis Streams](https://docs.aws.amazon.com/kinesis/latest/dev/amazon-kinesis-streams.html) for more.
"""
return pulumi.get(self, "shard_count")
@shard_count.setter
def shard_count(self, value: pulumi.Input[int]):
pulumi.set(self, "shard_count", value)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) specifying the Stream (same as `id`)
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="encryptionType")
def encryption_type(self) -> Optional[pulumi.Input[str]]:
"""
The encryption type to use. The only acceptable values are `NONE` or `KMS`. The default value is `NONE`.
"""
return pulumi.get(self, "encryption_type")
@encryption_type.setter
def encryption_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "encryption_type", value)
@property
@pulumi.getter(name="enforceConsumerDeletion")
def enforce_consumer_deletion(self) -> Optional[pulumi.Input[bool]]:
"""
A boolean that indicates all registered consumers should be deregistered from the stream so that the stream can be destroyed without error. The default value is `false`.
"""
return pulumi.get(self, "enforce_consumer_deletion")
@enforce_consumer_deletion.setter
def enforce_consumer_deletion(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enforce_consumer_deletion", value)
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[pulumi.Input[str]]:
"""
The GUID for the customer-managed KMS key to use for encryption. You can also use a Kinesis-owned master key by specifying the alias `alias/aws/kinesis`.
"""
return pulumi.get(self, "kms_key_id")
@kms_key_id.setter
def kms_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A name to identify the stream. This is unique to the AWS account and region the Stream is created in.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="retentionPeriod")
def retention_period(self) -> Optional[pulumi.Input[int]]:
"""
Length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 8760 hours. Minimum value is 24. Default is 24.
"""
return pulumi.get(self, "retention_period")
@retention_period.setter
def retention_period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "retention_period", value)
@property
@pulumi.getter(name="shardLevelMetrics")
def shard_level_metrics(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of shard-level CloudWatch metrics which can be enabled for the stream. See [Monitoring with CloudWatch](https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html) for more. Note that the value ALL should not be used; instead you should provide an explicit list of metrics you wish to enable.
"""
return pulumi.get(self, "shard_level_metrics")
@shard_level_metrics.setter
def shard_level_metrics(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "shard_level_metrics", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class Stream(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
encryption_type: Optional[pulumi.Input[str]] = None,
enforce_consumer_deletion: Optional[pulumi.Input[bool]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
retention_period: Optional[pulumi.Input[int]] = None,
shard_count: Optional[pulumi.Input[int]] = None,
shard_level_metrics: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides a Kinesis Stream resource. Amazon Kinesis is a managed service that
scales elastically for real-time processing of streaming big data.
For more details, see the [Amazon Kinesis Documentation](https://aws.amazon.com/documentation/kinesis/).
## Example Usage
```python
import pulumi
import pulumi_aws as aws
test_stream = aws.kinesis.Stream("testStream",
retention_period=48,
shard_count=1,
shard_level_metrics=[
"IncomingBytes",
"OutgoingBytes",
],
tags={
"Environment": "test",
})
```
## Import
Kinesis Streams can be imported using the `name`, e.g.
```sh
$ pulumi import aws:kinesis/stream:Stream test_stream kinesis-test
```
[1]https://aws.amazon.com/documentation/kinesis/ [2]https://docs.aws.amazon.com/kinesis/latest/dev/amazon-kinesis-streams.html [3]https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) specifying the Stream (same as `id`)
:param pulumi.Input[str] encryption_type: The encryption type to use. The only acceptable values are `NONE` or `KMS`. The default value is `NONE`.
:param pulumi.Input[bool] enforce_consumer_deletion: A boolean that indicates all registered consumers should be deregistered from the stream so that the stream can be destroyed without error. The default value is `false`.
:param pulumi.Input[str] kms_key_id: The GUID for the customer-managed KMS key to use for encryption. You can also use a Kinesis-owned master key by specifying the alias `alias/aws/kinesis`.
:param pulumi.Input[str] name: A name to identify the stream. This is unique to the AWS account and region the Stream is created in.
:param pulumi.Input[int] retention_period: Length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 8760 hours. Minimum value is 24. Default is 24.
:param pulumi.Input[int] shard_count: The number of shards that the stream will use.
Amazon has guidelines for specifying the Stream size that should be referenced when creating a Kinesis stream. See [Amazon Kinesis Streams](https://docs.aws.amazon.com/kinesis/latest/dev/amazon-kinesis-streams.html) for more.
:param pulumi.Input[Sequence[pulumi.Input[str]]] shard_level_metrics: A list of shard-level CloudWatch metrics which can be enabled for the stream. See [Monitoring with CloudWatch](https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html) for more. Note that the value ALL should not be used; instead you should provide an explicit list of metrics you wish to enable.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: StreamArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Kinesis Stream resource. Amazon Kinesis is a managed service that
scales elastically for real-time processing of streaming big data.
For more details, see the [Amazon Kinesis Documentation](https://aws.amazon.com/documentation/kinesis/).
## Example Usage
```python
import pulumi
import pulumi_aws as aws
test_stream = aws.kinesis.Stream("testStream",
retention_period=48,
shard_count=1,
shard_level_metrics=[
"IncomingBytes",
"OutgoingBytes",
],
tags={
"Environment": "test",
})
```
## Import
Kinesis Streams can be imported using the `name`, e.g.
```sh
$ pulumi import aws:kinesis/stream:Stream test_stream kinesis-test
```
[1]https://aws.amazon.com/documentation/kinesis/ [2]https://docs.aws.amazon.com/kinesis/latest/dev/amazon-kinesis-streams.html [3]https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html
:param str resource_name: The name of the resource.
:param StreamArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(StreamArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
encryption_type: Optional[pulumi.Input[str]] = None,
enforce_consumer_deletion: Optional[pulumi.Input[bool]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
retention_period: Optional[pulumi.Input[int]] = None,
shard_count: Optional[pulumi.Input[int]] = None,
shard_level_metrics: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['arn'] = arn
__props__['encryption_type'] = encryption_type
__props__['enforce_consumer_deletion'] = enforce_consumer_deletion
__props__['kms_key_id'] = kms_key_id
__props__['name'] = name
__props__['retention_period'] = retention_period
if shard_count is None and not opts.urn:
raise TypeError("Missing required property 'shard_count'")
__props__['shard_count'] = shard_count
__props__['shard_level_metrics'] = shard_level_metrics
__props__['tags'] = tags
super(Stream, __self__).__init__(
'aws:kinesis/stream:Stream',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
encryption_type: Optional[pulumi.Input[str]] = None,
enforce_consumer_deletion: Optional[pulumi.Input[bool]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
retention_period: Optional[pulumi.Input[int]] = None,
shard_count: Optional[pulumi.Input[int]] = None,
shard_level_metrics: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Stream':
"""
Get an existing Stream resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) specifying the Stream (same as `id`)
:param pulumi.Input[str] encryption_type: The encryption type to use. The only acceptable values are `NONE` or `KMS`. The default value is `NONE`.
:param pulumi.Input[bool] enforce_consumer_deletion: A boolean that indicates all registered consumers should be deregistered from the stream so that the stream can be destroyed without error. The default value is `false`.
:param pulumi.Input[str] kms_key_id: The GUID for the customer-managed KMS key to use for encryption. You can also use a Kinesis-owned master key by specifying the alias `alias/aws/kinesis`.
:param pulumi.Input[str] name: A name to identify the stream. This is unique to the AWS account and region the Stream is created in.
:param pulumi.Input[int] retention_period: Length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 8760 hours. Minimum value is 24. Default is 24.
:param pulumi.Input[int] shard_count: The number of shards that the stream will use.
Amazon has guidelines for specifying the Stream size that should be referenced when creating a Kinesis stream. See [Amazon Kinesis Streams](https://docs.aws.amazon.com/kinesis/latest/dev/amazon-kinesis-streams.html) for more.
:param pulumi.Input[Sequence[pulumi.Input[str]]] shard_level_metrics: A list of shard-level CloudWatch metrics which can be enabled for the stream. See [Monitoring with CloudWatch](https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html) for more. Note that the value ALL should not be used; instead you should provide an explicit list of metrics you wish to enable.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["encryption_type"] = encryption_type
__props__["enforce_consumer_deletion"] = enforce_consumer_deletion
__props__["kms_key_id"] = kms_key_id
__props__["name"] = name
__props__["retention_period"] = retention_period
__props__["shard_count"] = shard_count
__props__["shard_level_metrics"] = shard_level_metrics
__props__["tags"] = tags
return Stream(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) specifying the Stream (same as `id`)
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="encryptionType")
def encryption_type(self) -> pulumi.Output[Optional[str]]:
"""
The encryption type to use. The only acceptable values are `NONE` or `KMS`. The default value is `NONE`.
"""
return pulumi.get(self, "encryption_type")
@property
@pulumi.getter(name="enforceConsumerDeletion")
def enforce_consumer_deletion(self) -> pulumi.Output[Optional[bool]]:
"""
A boolean that indicates all registered consumers should be deregistered from the stream so that the stream can be destroyed without error. The default value is `false`.
"""
return pulumi.get(self, "enforce_consumer_deletion")
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> pulumi.Output[Optional[str]]:
"""
The GUID for the customer-managed KMS key to use for encryption. You can also use a Kinesis-owned master key by specifying the alias `alias/aws/kinesis`.
"""
return pulumi.get(self, "kms_key_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
A name to identify the stream. This is unique to the AWS account and region the Stream is created in.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="retentionPeriod")
def retention_period(self) -> pulumi.Output[Optional[int]]:
"""
Length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 8760 hours. Minimum value is 24. Default is 24.
"""
return pulumi.get(self, "retention_period")
@property
@pulumi.getter(name="shardCount")
def shard_count(self) -> pulumi.Output[int]:
"""
The number of shards that the stream will use.
Amazon has guidelines for specifying the Stream size that should be referenced when creating a Kinesis stream. See [Amazon Kinesis Streams](https://docs.aws.amazon.com/kinesis/latest/dev/amazon-kinesis-streams.html) for more.
"""
return pulumi.get(self, "shard_count")
@property
@pulumi.getter(name="shardLevelMetrics")
def shard_level_metrics(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of shard-level CloudWatch metrics which can be enabled for the stream. See [Monitoring with CloudWatch](https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html) for more. Note that the value ALL should not be used; instead you should provide an explicit list of metrics you wish to enable.
"""
return pulumi.get(self, "shard_level_metrics")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_aws/lightsail/key_pair.py
```python
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
__all__ = ['KeyPairArgs', 'KeyPair']
@pulumi.input_type
class KeyPairArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
name_prefix: Optional[pulumi.Input[str]] = None,
pgp_key: Optional[pulumi.Input[str]] = None,
public_key: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a KeyPair resource.
:param pulumi.Input[str] name: The name of the Lightsail Key Pair. If omitted, a unique
name will be generated by this provider
:param pulumi.Input[str] pgp_key: An optional PGP key to encrypt the resulting private
key material. Only used when creating a new key pair
:param pulumi.Input[str] public_key: The public key material. This public key will be
imported into Lightsail
"""
if name is not None:
pulumi.set(__self__, "name", name)
if name_prefix is not None:
pulumi.set(__self__, "name_prefix", name_prefix)
if pgp_key is not None:
pulumi.set(__self__, "pgp_key", pgp_key)
if public_key is not None:
pulumi.set(__self__, "public_key", public_key)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Lightsail Key Pair. If omitted, a unique
name will be generated by this provider
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="namePrefix")
def name_prefix(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name_prefix")
@name_prefix.setter
def name_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name_prefix", value)
@property
@pulumi.getter(name="pgpKey")
def pgp_key(self) -> Optional[pulumi.Input[str]]:
"""
An optional PGP key to encrypt the resulting private
key material. Only used when creating a new key pair
"""
return pulumi.get(self, "pgp_key")
@pgp_key.setter
def pgp_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pgp_key", value)
@property
@pulumi.getter(name="publicKey")
def public_key(self) -> Optional[pulumi.Input[str]]:
"""
The public key material. This public key will be
imported into Lightsail
"""
return pulumi.get(self, "public_key")
@public_key.setter
def public_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "public_key", value)
class KeyPair(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
name_prefix: Optional[pulumi.Input[str]] = None,
pgp_key: Optional[pulumi.Input[str]] = None,
public_key: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides a Lightsail Key Pair, for use with Lightsail Instances. These key pairs
are separate from EC2 Key Pairs, and must be created or imported for use with
Lightsail.
> **Note:** Lightsail is currently only supported in a limited number of AWS Regions, please see ["Regions and Availability Zones in Amazon Lightsail"](https://lightsail.aws.amazon.com/ls/docs/overview/article/understanding-regions-and-availability-zones-in-amazon-lightsail) for more details
## Example Usage
### Create New Key Pair
```python
import pulumi
import pulumi_aws as aws
# Create a new Lightsail Key Pair
lg_key_pair = aws.lightsail.KeyPair("lgKeyPair")
```
### Create New Key Pair with PGP Encrypted Private Key
```python
import pulumi
import pulumi_aws as aws
lg_key_pair = aws.lightsail.KeyPair("lgKeyPair", pgp_key="keybase:keybaseusername")
```
### Existing Public Key Import
```python
import pulumi
import pulumi_aws as aws
lg_key_pair = aws.lightsail.KeyPair("lgKeyPair", public_key=(lambda path: open(path).read())("~/.ssh/id_rsa.pub"))
```
## Import
Lightsail Key Pairs cannot be imported, because the private and public key are only available on initial creation.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] name: The name of the Lightsail Key Pair. If omitted, a unique
name will be generated by this provider
:param pulumi.Input[str] pgp_key: An optional PGP key to encrypt the resulting private
key material. Only used when creating a new key pair
:param pulumi.Input[str] public_key: The public key material. This public key will be
imported into Lightsail
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[KeyPairArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Lightsail Key Pair, for use with Lightsail Instances. These key pairs
are separate from EC2 Key Pairs, and must be created or imported for use with
Lightsail.
> **Note:** Lightsail is currently only supported in a limited number of AWS Regions, please see ["Regions and Availability Zones in Amazon Lightsail"](https://lightsail.aws.amazon.com/ls/docs/overview/article/understanding-regions-and-availability-zones-in-amazon-lightsail) for more details
## Example Usage
### Create New Key Pair
```python
import pulumi
import pulumi_aws as aws
# Create a new Lightsail Key Pair
lg_key_pair = aws.lightsail.KeyPair("lgKeyPair")
```
### Create New Key Pair with PGP Encrypted Private Key
```python
import pulumi
import pulumi_aws as aws
lg_key_pair = aws.lightsail.KeyPair("lgKeyPair", pgp_key="keybase:keybaseusername")
```
### Existing Public Key Import
```python
import pulumi
import pulumi_aws as aws
lg_key_pair = aws.lightsail.KeyPair("lgKeyPair", public_key=(lambda path: open(path).read())("~/.ssh/id_rsa.pub"))
```
## Import
Lightsail Key Pairs cannot be imported, because the private and public key are only available on initial creation.
:param str resource_name: The name of the resource.
:param KeyPairArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(KeyPairArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
name_prefix: Optional[pulumi.Input[str]] = None,
pgp_key: Optional[pulumi.Input[str]] = None,
public_key: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['name'] = name
__props__['name_prefix'] = name_prefix
__props__['pgp_key'] = pgp_key
__props__['public_key'] = public_key
__props__['arn'] = None
__props__['encrypted_fingerprint'] = None
__props__['encrypted_private_key'] = None
__props__['fingerprint'] = None
__props__['private_key'] = None
super(KeyPair, __self__).__init__(
'aws:lightsail/keyPair:KeyPair',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
encrypted_fingerprint: Optional[pulumi.Input[str]] = None,
encrypted_private_key: Optional[pulumi.Input[str]] = None,
fingerprint: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
name_prefix: Optional[pulumi.Input[str]] = None,
pgp_key: Optional[pulumi.Input[str]] = None,
private_key: Optional[pulumi.Input[str]] = None,
public_key: Optional[pulumi.Input[str]] = None) -> 'KeyPair':
"""
Get an existing KeyPair resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The ARN of the Lightsail key pair
:param pulumi.Input[str] encrypted_fingerprint: The MD5 public key fingerprint for the encrypted
private key
:param pulumi.Input[str] encrypted_private_key: the private key material, base 64 encoded and
encrypted with the given `pgp_key`. This is only populated when creating a new
key and `pgp_key` is supplied
:param pulumi.Input[str] fingerprint: The MD5 public key fingerprint as specified in section 4 of RFC 4716.
:param pulumi.Input[str] name: The name of the Lightsail Key Pair. If omitted, a unique
name will be generated by this provider
:param pulumi.Input[str] pgp_key: An optional PGP key to encrypt the resulting private
key material. Only used when creating a new key pair
:param pulumi.Input[str] private_key: the private key, base64 encoded. This is only populated
when creating a new key, and when no `pgp_key` is provided
:param pulumi.Input[str] public_key: The public key material. This public key will be
imported into Lightsail
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["encrypted_fingerprint"] = encrypted_fingerprint
__props__["encrypted_private_key"] = encrypted_private_key
__props__["fingerprint"] = fingerprint
__props__["name"] = name
__props__["name_prefix"] = name_prefix
__props__["pgp_key"] = pgp_key
__props__["private_key"] = private_key
__props__["public_key"] = public_key
return KeyPair(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The ARN of the Lightsail key pair
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="encryptedFingerprint")
def encrypted_fingerprint(self) -> pulumi.Output[str]:
"""
The MD5 public key fingerprint for the encrypted
private key
"""
return pulumi.get(self, "encrypted_fingerprint")
@property
@pulumi.getter(name="encryptedPrivateKey")
def encrypted_private_key(self) -> pulumi.Output[str]:
"""
the private key material, base 64 encoded and
encrypted with the given `pgp_key`. This is only populated when creating a new
key and `pgp_key` is supplied
"""
return pulumi.get(self, "encrypted_private_key")
@property
@pulumi.getter
def fingerprint(self) -> pulumi.Output[str]:
"""
The MD5 public key fingerprint as specified in section 4 of RFC 4716.
"""
return pulumi.get(self, "fingerprint")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the Lightsail Key Pair. If omitted, a unique
name will be generated by this provider
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="namePrefix")
def name_prefix(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "name_prefix")
@property
@pulumi.getter(name="pgpKey")
def pgp_key(self) -> pulumi.Output[Optional[str]]:
"""
An optional PGP key to encrypt the resulting private
key material. Only used when creating a new key pair
"""
return pulumi.get(self, "pgp_key")
@property
@pulumi.getter(name="privateKey")
def private_key(self) -> pulumi.Output[str]:
"""
the private key, base64 encoded. This is only populated
when creating a new key, and when no `pgp_key` is provided
"""
return pulumi.get(self, "private_key")
@property
@pulumi.getter(name="publicKey")
def public_key(self) -> pulumi.Output[str]:
"""
The public key material. This public key will be
imported into Lightsail
"""
return pulumi.get(self, "public_key")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_aws/neptune/_inputs.py
```python
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
__all__ = [
'ClusterParameterGroupParameterArgs',
'ParameterGroupParameterArgs',
]
@pulumi.input_type
class ClusterParameterGroupParameterArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str],
apply_method: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: The name of the neptune parameter.
:param pulumi.Input[str] value: The value of the neptune parameter.
:param pulumi.Input[str] apply_method: Valid values are `immediate` and `pending-reboot`. Defaults to `pending-reboot`.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
if apply_method is not None:
pulumi.set(__self__, "apply_method", apply_method)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the neptune parameter.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The value of the neptune parameter.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@property
@pulumi.getter(name="applyMethod")
def apply_method(self) -> Optional[pulumi.Input[str]]:
"""
Valid values are `immediate` and `pending-reboot`. Defaults to `pending-reboot`.
"""
return pulumi.get(self, "apply_method")
@apply_method.setter
def apply_method(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "apply_method", value)
@pulumi.input_type
class ParameterGroupParameterArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str],
apply_method: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: The name of the Neptune parameter.
:param pulumi.Input[str] value: The value of the Neptune parameter.
:param pulumi.Input[str] apply_method: The apply method of the Neptune parameter. Valid values are `immediate` and `pending-reboot`. Defaults to `pending-reboot`.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
if apply_method is not None:
pulumi.set(__self__, "apply_method", apply_method)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the Neptune parameter.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The value of the Neptune parameter.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@property
@pulumi.getter(name="applyMethod")
def apply_method(self) -> Optional[pulumi.Input[str]]:
"""
The apply method of the Neptune parameter. Valid values are `immediate` and `pending-reboot`. Defaults to `pending-reboot`.
"""
return pulumi.get(self, "apply_method")
@apply_method.setter
def apply_method(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "apply_method", value)
```
#### File: pulumi_aws/rds/cluster_endpoint.py
```python
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
__all__ = ['ClusterEndpointArgs', 'ClusterEndpoint']
@pulumi.input_type
class ClusterEndpointArgs:
def __init__(__self__, *,
cluster_endpoint_identifier: pulumi.Input[str],
cluster_identifier: pulumi.Input[str],
custom_endpoint_type: pulumi.Input[str],
excluded_members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
static_members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ClusterEndpoint resource.
:param pulumi.Input[str] cluster_endpoint_identifier: The identifier to use for the new endpoint. This parameter is stored as a lowercase string.
:param pulumi.Input[str] cluster_identifier: The cluster identifier.
:param pulumi.Input[str] custom_endpoint_type: The type of the endpoint. One of: READER , ANY .
:param pulumi.Input[Sequence[pulumi.Input[str]]] excluded_members: List of DB instance identifiers that aren't part of the custom endpoint group. All other eligible instances are reachable through the custom endpoint. Only relevant if the list of static members is empty. Conflicts with `static_members`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] static_members: List of DB instance identifiers that are part of the custom endpoint group. Conflicts with `excluded_members`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags
"""
pulumi.set(__self__, "cluster_endpoint_identifier", cluster_endpoint_identifier)
pulumi.set(__self__, "cluster_identifier", cluster_identifier)
pulumi.set(__self__, "custom_endpoint_type", custom_endpoint_type)
if excluded_members is not None:
pulumi.set(__self__, "excluded_members", excluded_members)
if static_members is not None:
pulumi.set(__self__, "static_members", static_members)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="clusterEndpointIdentifier")
def cluster_endpoint_identifier(self) -> pulumi.Input[str]:
"""
The identifier to use for the new endpoint. This parameter is stored as a lowercase string.
"""
return pulumi.get(self, "cluster_endpoint_identifier")
@cluster_endpoint_identifier.setter
def cluster_endpoint_identifier(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_endpoint_identifier", value)
@property
@pulumi.getter(name="clusterIdentifier")
def cluster_identifier(self) -> pulumi.Input[str]:
"""
The cluster identifier.
"""
return pulumi.get(self, "cluster_identifier")
@cluster_identifier.setter
def cluster_identifier(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_identifier", value)
@property
@pulumi.getter(name="customEndpointType")
def custom_endpoint_type(self) -> pulumi.Input[str]:
"""
The type of the endpoint. One of: READER , ANY .
"""
return pulumi.get(self, "custom_endpoint_type")
@custom_endpoint_type.setter
def custom_endpoint_type(self, value: pulumi.Input[str]):
pulumi.set(self, "custom_endpoint_type", value)
@property
@pulumi.getter(name="excludedMembers")
def excluded_members(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of DB instance identifiers that aren't part of the custom endpoint group. All other eligible instances are reachable through the custom endpoint. Only relevant if the list of static members is empty. Conflicts with `static_members`.
"""
return pulumi.get(self, "excluded_members")
@excluded_members.setter
def excluded_members(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "excluded_members", value)
@property
@pulumi.getter(name="staticMembers")
def static_members(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of DB instance identifiers that are part of the custom endpoint group. Conflicts with `excluded_members`.
"""
return pulumi.get(self, "static_members")
@static_members.setter
def static_members(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "static_members", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value map of resource tags
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class ClusterEndpoint(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cluster_endpoint_identifier: Optional[pulumi.Input[str]] = None,
cluster_identifier: Optional[pulumi.Input[str]] = None,
custom_endpoint_type: Optional[pulumi.Input[str]] = None,
excluded_members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
static_members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Manages an RDS Aurora Cluster Endpoint.
You can refer to the [User Guide](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.Endpoints.html#Aurora.Endpoints.Cluster).
## Example Usage
```python
import pulumi
import pulumi_aws as aws
default = aws.rds.Cluster("default",
cluster_identifier="aurora-cluster-demo",
availability_zones=[
"us-west-2a",
"us-west-2b",
"us-west-2c",
],
database_name="mydb",
master_username="foo",
master_password="<PASSWORD>",
backup_retention_period=5,
preferred_backup_window="07:00-09:00")
test1 = aws.rds.ClusterInstance("test1",
apply_immediately=True,
cluster_identifier=default.id,
identifier="test1",
instance_class="db.t2.small",
engine=default.engine,
engine_version=default.engine_version)
test2 = aws.rds.ClusterInstance("test2",
apply_immediately=True,
cluster_identifier=default.id,
identifier="test2",
instance_class="db.t2.small",
engine=default.engine,
engine_version=default.engine_version)
test3 = aws.rds.ClusterInstance("test3",
apply_immediately=True,
cluster_identifier=default.id,
identifier="test3",
instance_class="db.t2.small",
engine=default.engine,
engine_version=default.engine_version)
eligible = aws.rds.ClusterEndpoint("eligible",
cluster_identifier=default.id,
cluster_endpoint_identifier="reader",
custom_endpoint_type="READER",
excluded_members=[
test1.id,
test2.id,
])
static = aws.rds.ClusterEndpoint("static",
cluster_identifier=default.id,
cluster_endpoint_identifier="static",
custom_endpoint_type="READER",
static_members=[
test1.id,
test3.id,
])
```
## Import
RDS Clusters Endpoint can be imported using the `cluster_endpoint_identifier`, e.g.
```sh
$ pulumi import aws:rds/clusterEndpoint:ClusterEndpoint custom_reader aurora-prod-cluster-custom-reader
```
[1]https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.Endpoints.html#Aurora.Endpoints.Cluster
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cluster_endpoint_identifier: The identifier to use for the new endpoint. This parameter is stored as a lowercase string.
:param pulumi.Input[str] cluster_identifier: The cluster identifier.
:param pulumi.Input[str] custom_endpoint_type: The type of the endpoint. One of: READER , ANY .
:param pulumi.Input[Sequence[pulumi.Input[str]]] excluded_members: List of DB instance identifiers that aren't part of the custom endpoint group. All other eligible instances are reachable through the custom endpoint. Only relevant if the list of static members is empty. Conflicts with `static_members`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] static_members: List of DB instance identifiers that are part of the custom endpoint group. Conflicts with `excluded_members`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ClusterEndpointArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an RDS Aurora Cluster Endpoint.
You can refer to the [User Guide](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.Endpoints.html#Aurora.Endpoints.Cluster).
## Example Usage
```python
import pulumi
import pulumi_aws as aws
default = aws.rds.Cluster("default",
cluster_identifier="aurora-cluster-demo",
availability_zones=[
"us-west-2a",
"us-west-2b",
"us-west-2c",
],
database_name="mydb",
master_username="foo",
master_password="<PASSWORD>",
backup_retention_period=5,
preferred_backup_window="07:00-09:00")
test1 = aws.rds.ClusterInstance("test1",
apply_immediately=True,
cluster_identifier=default.id,
identifier="test1",
instance_class="db.t2.small",
engine=default.engine,
engine_version=default.engine_version)
test2 = aws.rds.ClusterInstance("test2",
apply_immediately=True,
cluster_identifier=default.id,
identifier="test2",
instance_class="db.t2.small",
engine=default.engine,
engine_version=default.engine_version)
test3 = aws.rds.ClusterInstance("test3",
apply_immediately=True,
cluster_identifier=default.id,
identifier="test3",
instance_class="db.t2.small",
engine=default.engine,
engine_version=default.engine_version)
eligible = aws.rds.ClusterEndpoint("eligible",
cluster_identifier=default.id,
cluster_endpoint_identifier="reader",
custom_endpoint_type="READER",
excluded_members=[
test1.id,
test2.id,
])
static = aws.rds.ClusterEndpoint("static",
cluster_identifier=default.id,
cluster_endpoint_identifier="static",
custom_endpoint_type="READER",
static_members=[
test1.id,
test3.id,
])
```
## Import
RDS Clusters Endpoint can be imported using the `cluster_endpoint_identifier`, e.g.
```sh
$ pulumi import aws:rds/clusterEndpoint:ClusterEndpoint custom_reader aurora-prod-cluster-custom-reader
```
[1]https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.Endpoints.html#Aurora.Endpoints.Cluster
:param str resource_name: The name of the resource.
:param ClusterEndpointArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ClusterEndpointArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cluster_endpoint_identifier: Optional[pulumi.Input[str]] = None,
cluster_identifier: Optional[pulumi.Input[str]] = None,
custom_endpoint_type: Optional[pulumi.Input[str]] = None,
excluded_members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
static_members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if cluster_endpoint_identifier is None and not opts.urn:
raise TypeError("Missing required property 'cluster_endpoint_identifier'")
__props__['cluster_endpoint_identifier'] = cluster_endpoint_identifier
if cluster_identifier is None and not opts.urn:
raise TypeError("Missing required property 'cluster_identifier'")
__props__['cluster_identifier'] = cluster_identifier
if custom_endpoint_type is None and not opts.urn:
raise TypeError("Missing required property 'custom_endpoint_type'")
__props__['custom_endpoint_type'] = custom_endpoint_type
__props__['excluded_members'] = excluded_members
__props__['static_members'] = static_members
__props__['tags'] = tags
__props__['arn'] = None
__props__['endpoint'] = None
super(ClusterEndpoint, __self__).__init__(
'aws:rds/clusterEndpoint:ClusterEndpoint',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
cluster_endpoint_identifier: Optional[pulumi.Input[str]] = None,
cluster_identifier: Optional[pulumi.Input[str]] = None,
custom_endpoint_type: Optional[pulumi.Input[str]] = None,
endpoint: Optional[pulumi.Input[str]] = None,
excluded_members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
static_members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'ClusterEndpoint':
"""
Get an existing ClusterEndpoint resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN) of cluster
:param pulumi.Input[str] cluster_endpoint_identifier: The identifier to use for the new endpoint. This parameter is stored as a lowercase string.
:param pulumi.Input[str] cluster_identifier: The cluster identifier.
:param pulumi.Input[str] custom_endpoint_type: The type of the endpoint. One of: READER , ANY .
:param pulumi.Input[str] endpoint: A custom endpoint for the Aurora cluster
:param pulumi.Input[Sequence[pulumi.Input[str]]] excluded_members: List of DB instance identifiers that aren't part of the custom endpoint group. All other eligible instances are reachable through the custom endpoint. Only relevant if the list of static members is empty. Conflicts with `static_members`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] static_members: List of DB instance identifiers that are part of the custom endpoint group. Conflicts with `excluded_members`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["cluster_endpoint_identifier"] = cluster_endpoint_identifier
__props__["cluster_identifier"] = cluster_identifier
__props__["custom_endpoint_type"] = custom_endpoint_type
__props__["endpoint"] = endpoint
__props__["excluded_members"] = excluded_members
__props__["static_members"] = static_members
__props__["tags"] = tags
return ClusterEndpoint(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Name (ARN) of cluster
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="clusterEndpointIdentifier")
def cluster_endpoint_identifier(self) -> pulumi.Output[str]:
"""
The identifier to use for the new endpoint. This parameter is stored as a lowercase string.
"""
return pulumi.get(self, "cluster_endpoint_identifier")
@property
@pulumi.getter(name="clusterIdentifier")
def cluster_identifier(self) -> pulumi.Output[str]:
"""
The cluster identifier.
"""
return pulumi.get(self, "cluster_identifier")
@property
@pulumi.getter(name="customEndpointType")
def custom_endpoint_type(self) -> pulumi.Output[str]:
"""
The type of the endpoint. One of: READER , ANY .
"""
return pulumi.get(self, "custom_endpoint_type")
@property
@pulumi.getter
def endpoint(self) -> pulumi.Output[str]:
"""
A custom endpoint for the Aurora cluster
"""
return pulumi.get(self, "endpoint")
@property
@pulumi.getter(name="excludedMembers")
def excluded_members(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of DB instance identifiers that aren't part of the custom endpoint group. All other eligible instances are reachable through the custom endpoint. Only relevant if the list of static members is empty. Conflicts with `static_members`.
"""
return pulumi.get(self, "excluded_members")
@property
@pulumi.getter(name="staticMembers")
def static_members(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of DB instance identifiers that are part of the custom endpoint group. Conflicts with `excluded_members`.
"""
return pulumi.get(self, "static_members")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Key-value map of resource tags
"""
return pulumi.get(self, "tags")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_aws/signer/get_signing_profile.py
```python
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
from . import outputs
__all__ = [
'GetSigningProfileResult',
'AwaitableGetSigningProfileResult',
'get_signing_profile',
]
@pulumi.output_type
class GetSigningProfileResult:
"""
A collection of values returned by getSigningProfile.
"""
def __init__(__self__, arn=None, id=None, name=None, platform_display_name=None, platform_id=None, revocation_records=None, signature_validity_periods=None, status=None, tags=None, version=None, version_arn=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if platform_display_name and not isinstance(platform_display_name, str):
raise TypeError("Expected argument 'platform_display_name' to be a str")
pulumi.set(__self__, "platform_display_name", platform_display_name)
if platform_id and not isinstance(platform_id, str):
raise TypeError("Expected argument 'platform_id' to be a str")
pulumi.set(__self__, "platform_id", platform_id)
if revocation_records and not isinstance(revocation_records, list):
raise TypeError("Expected argument 'revocation_records' to be a list")
pulumi.set(__self__, "revocation_records", revocation_records)
if signature_validity_periods and not isinstance(signature_validity_periods, list):
raise TypeError("Expected argument 'signature_validity_periods' to be a list")
pulumi.set(__self__, "signature_validity_periods", signature_validity_periods)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if version and not isinstance(version, str):
raise TypeError("Expected argument 'version' to be a str")
pulumi.set(__self__, "version", version)
if version_arn and not isinstance(version_arn, str):
raise TypeError("Expected argument 'version_arn' to be a str")
pulumi.set(__self__, "version_arn", version_arn)
@property
@pulumi.getter
def arn(self) -> str:
"""
The Amazon Resource Name (ARN) for the signing profile.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="platformDisplayName")
def platform_display_name(self) -> str:
"""
A human-readable name for the signing platform associated with the signing profile.
"""
return pulumi.get(self, "platform_display_name")
@property
@pulumi.getter(name="platformId")
def platform_id(self) -> str:
"""
The ID of the platform that is used by the target signing profile.
"""
return pulumi.get(self, "platform_id")
@property
@pulumi.getter(name="revocationRecords")
def revocation_records(self) -> Sequence['outputs.GetSigningProfileRevocationRecordResult']:
"""
Revocation information for a signing profile.
"""
return pulumi.get(self, "revocation_records")
@property
@pulumi.getter(name="signatureValidityPeriods")
def signature_validity_periods(self) -> Sequence['outputs.GetSigningProfileSignatureValidityPeriodResult']:
"""
The validity period for a signing job.
"""
return pulumi.get(self, "signature_validity_periods")
@property
@pulumi.getter
def status(self) -> str:
"""
The status of the target signing profile.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
A list of tags associated with the signing profile.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def version(self) -> str:
"""
The current version of the signing profile.
"""
return pulumi.get(self, "version")
@property
@pulumi.getter(name="versionArn")
def version_arn(self) -> str:
"""
The signing profile ARN, including the profile version.
"""
return pulumi.get(self, "version_arn")
class AwaitableGetSigningProfileResult(GetSigningProfileResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSigningProfileResult(
arn=self.arn,
id=self.id,
name=self.name,
platform_display_name=self.platform_display_name,
platform_id=self.platform_id,
revocation_records=self.revocation_records,
signature_validity_periods=self.signature_validity_periods,
status=self.status,
tags=self.tags,
version=self.version,
version_arn=self.version_arn)
def get_signing_profile(name: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSigningProfileResult:
"""
Provides information about a Signer Signing Profile.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
production_signing_profile = aws.signer.get_signing_profile(name="prod_profile_DdW3Mk1foYL88fajut4mTVFGpuwfd4ACO6ANL0D1uIj7lrn8adK")
```
:param str name: The name of the target signing profile.
:param Mapping[str, str] tags: A list of tags associated with the signing profile.
"""
__args__ = dict()
__args__['name'] = name
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:signer/getSigningProfile:getSigningProfile', __args__, opts=opts, typ=GetSigningProfileResult).value
return AwaitableGetSigningProfileResult(
arn=__ret__.arn,
id=__ret__.id,
name=__ret__.name,
platform_display_name=__ret__.platform_display_name,
platform_id=__ret__.platform_id,
revocation_records=__ret__.revocation_records,
signature_validity_periods=__ret__.signature_validity_periods,
status=__ret__.status,
tags=__ret__.tags,
version=__ret__.version,
version_arn=__ret__.version_arn)
```
#### File: pulumi_aws/signer/outputs.py
```python
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
from . import outputs
__all__ = [
'SigningJobDestination',
'SigningJobDestinationS3',
'SigningJobRevocationRecord',
'SigningJobSignedObject',
'SigningJobSignedObjectS3',
'SigningJobSource',
'SigningJobSourceS3',
'SigningProfileRevocationRecord',
'SigningProfileSignatureValidityPeriod',
'GetSigningJobRevocationRecordResult',
'GetSigningJobSignedObjectResult',
'GetSigningJobSignedObjectS3Result',
'GetSigningJobSourceResult',
'GetSigningJobSourceS3Result',
'GetSigningProfileRevocationRecordResult',
'GetSigningProfileSignatureValidityPeriodResult',
]
@pulumi.output_type
class SigningJobDestination(dict):
def __init__(__self__, *,
s3: 'outputs.SigningJobDestinationS3'):
"""
:param 'SigningJobDestinationS3Args' s3: A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
pulumi.set(__self__, "s3", s3)
@property
@pulumi.getter
def s3(self) -> 'outputs.SigningJobDestinationS3':
"""
A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
return pulumi.get(self, "s3")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SigningJobDestinationS3(dict):
def __init__(__self__, *,
bucket: str,
prefix: Optional[str] = None):
"""
:param str bucket: Name of the S3 bucket.
:param str prefix: An Amazon S3 object key prefix that you can use to limit signed objects keys to begin with the specified prefix.
"""
pulumi.set(__self__, "bucket", bucket)
if prefix is not None:
pulumi.set(__self__, "prefix", prefix)
@property
@pulumi.getter
def bucket(self) -> str:
"""
Name of the S3 bucket.
"""
return pulumi.get(self, "bucket")
@property
@pulumi.getter
def prefix(self) -> Optional[str]:
"""
An Amazon S3 object key prefix that you can use to limit signed objects keys to begin with the specified prefix.
"""
return pulumi.get(self, "prefix")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SigningJobRevocationRecord(dict):
def __init__(__self__, *,
reason: Optional[str] = None,
revoked_at: Optional[str] = None,
revoked_by: Optional[str] = None):
if reason is not None:
pulumi.set(__self__, "reason", reason)
if revoked_at is not None:
pulumi.set(__self__, "revoked_at", revoked_at)
if revoked_by is not None:
pulumi.set(__self__, "revoked_by", revoked_by)
@property
@pulumi.getter
def reason(self) -> Optional[str]:
return pulumi.get(self, "reason")
@property
@pulumi.getter(name="revokedAt")
def revoked_at(self) -> Optional[str]:
return pulumi.get(self, "revoked_at")
@property
@pulumi.getter(name="revokedBy")
def revoked_by(self) -> Optional[str]:
return pulumi.get(self, "revoked_by")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SigningJobSignedObject(dict):
def __init__(__self__, *,
s3s: Optional[Sequence['outputs.SigningJobSignedObjectS3']] = None):
"""
:param Sequence['SigningJobSignedObjectS3Args'] s3s: A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
if s3s is not None:
pulumi.set(__self__, "s3s", s3s)
@property
@pulumi.getter
def s3s(self) -> Optional[Sequence['outputs.SigningJobSignedObjectS3']]:
"""
A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
return pulumi.get(self, "s3s")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SigningJobSignedObjectS3(dict):
def __init__(__self__, *,
bucket: Optional[str] = None,
key: Optional[str] = None):
"""
:param str bucket: Name of the S3 bucket.
:param str key: Key name of the bucket object that contains your unsigned code.
"""
if bucket is not None:
pulumi.set(__self__, "bucket", bucket)
if key is not None:
pulumi.set(__self__, "key", key)
@property
@pulumi.getter
def bucket(self) -> Optional[str]:
"""
Name of the S3 bucket.
"""
return pulumi.get(self, "bucket")
@property
@pulumi.getter
def key(self) -> Optional[str]:
"""
Key name of the bucket object that contains your unsigned code.
"""
return pulumi.get(self, "key")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SigningJobSource(dict):
def __init__(__self__, *,
s3: 'outputs.SigningJobSourceS3'):
"""
:param 'SigningJobSourceS3Args' s3: A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
pulumi.set(__self__, "s3", s3)
@property
@pulumi.getter
def s3(self) -> 'outputs.SigningJobSourceS3':
"""
A configuration block describing the S3 Destination object: See S3 Destination below for details.
"""
return pulumi.get(self, "s3")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SigningJobSourceS3(dict):
def __init__(__self__, *,
bucket: str,
key: str,
version: str):
"""
:param str bucket: Name of the S3 bucket.
:param str key: Key name of the bucket object that contains your unsigned code.
:param str version: Version of your source image in your version enabled S3 bucket.
"""
pulumi.set(__self__, "bucket", bucket)
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def bucket(self) -> str:
"""
Name of the S3 bucket.
"""
return pulumi.get(self, "bucket")
@property
@pulumi.getter
def key(self) -> str:
"""
Key name of the bucket object that contains your unsigned code.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def version(self) -> str:
"""
Version of your source image in your version enabled S3 bucket.
"""
return pulumi.get(self, "version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SigningProfileRevocationRecord(dict):
def __init__(__self__, *,
revocation_effective_from: Optional[str] = None,
revoked_at: Optional[str] = None,
revoked_by: Optional[str] = None):
if revocation_effective_from is not None:
pulumi.set(__self__, "revocation_effective_from", revocation_effective_from)
if revoked_at is not None:
pulumi.set(__self__, "revoked_at", revoked_at)
if revoked_by is not None:
pulumi.set(__self__, "revoked_by", revoked_by)
@property
@pulumi.getter(name="revocationEffectiveFrom")
def revocation_effective_from(self) -> Optional[str]:
return pulumi.get(self, "revocation_effective_from")
@property
@pulumi.getter(name="revokedAt")
def revoked_at(self) -> Optional[str]:
return pulumi.get(self, "revoked_at")
@property
@pulumi.getter(name="revokedBy")
def revoked_by(self) -> Optional[str]:
return pulumi.get(self, "revoked_by")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SigningProfileSignatureValidityPeriod(dict):
def __init__(__self__, *,
type: str,
value: int):
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter
def value(self) -> int:
return pulumi.get(self, "value")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class GetSigningJobRevocationRecordResult(dict):
def __init__(__self__, *,
reason: str,
revoked_at: str,
revoked_by: str):
pulumi.set(__self__, "reason", reason)
pulumi.set(__self__, "revoked_at", revoked_at)
pulumi.set(__self__, "revoked_by", revoked_by)
@property
@pulumi.getter
def reason(self) -> str:
return pulumi.get(self, "reason")
@property
@pulumi.getter(name="revokedAt")
def revoked_at(self) -> str:
return pulumi.get(self, "revoked_at")
@property
@pulumi.getter(name="revokedBy")
def revoked_by(self) -> str:
return pulumi.get(self, "revoked_by")
@pulumi.output_type
class GetSigningJobSignedObjectResult(dict):
def __init__(__self__, *,
s3s: Sequence['outputs.GetSigningJobSignedObjectS3Result']):
pulumi.set(__self__, "s3s", s3s)
@property
@pulumi.getter
def s3s(self) -> Sequence['outputs.GetSigningJobSignedObjectS3Result']:
return pulumi.get(self, "s3s")
@pulumi.output_type
class GetSigningJobSignedObjectS3Result(dict):
def __init__(__self__, *,
bucket: str,
key: str):
pulumi.set(__self__, "bucket", bucket)
pulumi.set(__self__, "key", key)
@property
@pulumi.getter
def bucket(self) -> str:
return pulumi.get(self, "bucket")
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@pulumi.output_type
class GetSigningJobSourceResult(dict):
def __init__(__self__, *,
s3s: Sequence['outputs.GetSigningJobSourceS3Result']):
pulumi.set(__self__, "s3s", s3s)
@property
@pulumi.getter
def s3s(self) -> Sequence['outputs.GetSigningJobSourceS3Result']:
return pulumi.get(self, "s3s")
@pulumi.output_type
class GetSigningJobSourceS3Result(dict):
def __init__(__self__, *,
bucket: str,
key: str,
version: str):
pulumi.set(__self__, "bucket", bucket)
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def bucket(self) -> str:
return pulumi.get(self, "bucket")
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def version(self) -> str:
return pulumi.get(self, "version")
@pulumi.output_type
class GetSigningProfileRevocationRecordResult(dict):
def __init__(__self__, *,
revocation_effective_from: str,
revoked_at: str,
revoked_by: str):
pulumi.set(__self__, "revocation_effective_from", revocation_effective_from)
pulumi.set(__self__, "revoked_at", revoked_at)
pulumi.set(__self__, "revoked_by", revoked_by)
@property
@pulumi.getter(name="revocationEffectiveFrom")
def revocation_effective_from(self) -> str:
return pulumi.get(self, "revocation_effective_from")
@property
@pulumi.getter(name="revokedAt")
def revoked_at(self) -> str:
return pulumi.get(self, "revoked_at")
@property
@pulumi.getter(name="revokedBy")
def revoked_by(self) -> str:
return pulumi.get(self, "revoked_by")
@pulumi.output_type
class GetSigningProfileSignatureValidityPeriodResult(dict):
def __init__(__self__, *,
type: str,
value: int):
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter
def value(self) -> int:
return pulumi.get(self, "value")
```
#### File: pulumi_aws/workspaces/get_workspace.py
```python
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
from . import outputs
__all__ = [
'GetWorkspaceResult',
'AwaitableGetWorkspaceResult',
'get_workspace',
]
@pulumi.output_type
class GetWorkspaceResult:
"""
A collection of values returned by getWorkspace.
"""
def __init__(__self__, bundle_id=None, computer_name=None, directory_id=None, id=None, ip_address=None, root_volume_encryption_enabled=None, state=None, tags=None, user_name=None, user_volume_encryption_enabled=None, volume_encryption_key=None, workspace_id=None, workspace_properties=None):
if bundle_id and not isinstance(bundle_id, str):
raise TypeError("Expected argument 'bundle_id' to be a str")
pulumi.set(__self__, "bundle_id", bundle_id)
if computer_name and not isinstance(computer_name, str):
raise TypeError("Expected argument 'computer_name' to be a str")
pulumi.set(__self__, "computer_name", computer_name)
if directory_id and not isinstance(directory_id, str):
raise TypeError("Expected argument 'directory_id' to be a str")
pulumi.set(__self__, "directory_id", directory_id)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ip_address and not isinstance(ip_address, str):
raise TypeError("Expected argument 'ip_address' to be a str")
pulumi.set(__self__, "ip_address", ip_address)
if root_volume_encryption_enabled and not isinstance(root_volume_encryption_enabled, bool):
raise TypeError("Expected argument 'root_volume_encryption_enabled' to be a bool")
pulumi.set(__self__, "root_volume_encryption_enabled", root_volume_encryption_enabled)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if user_name and not isinstance(user_name, str):
raise TypeError("Expected argument 'user_name' to be a str")
pulumi.set(__self__, "user_name", user_name)
if user_volume_encryption_enabled and not isinstance(user_volume_encryption_enabled, bool):
raise TypeError("Expected argument 'user_volume_encryption_enabled' to be a bool")
pulumi.set(__self__, "user_volume_encryption_enabled", user_volume_encryption_enabled)
if volume_encryption_key and not isinstance(volume_encryption_key, str):
raise TypeError("Expected argument 'volume_encryption_key' to be a str")
pulumi.set(__self__, "volume_encryption_key", volume_encryption_key)
if workspace_id and not isinstance(workspace_id, str):
raise TypeError("Expected argument 'workspace_id' to be a str")
pulumi.set(__self__, "workspace_id", workspace_id)
if workspace_properties and not isinstance(workspace_properties, list):
raise TypeError("Expected argument 'workspace_properties' to be a list")
pulumi.set(__self__, "workspace_properties", workspace_properties)
@property
@pulumi.getter(name="bundleId")
def bundle_id(self) -> str:
return pulumi.get(self, "bundle_id")
@property
@pulumi.getter(name="computerName")
def computer_name(self) -> str:
"""
The name of the WorkSpace, as seen by the operating system.
"""
return pulumi.get(self, "computer_name")
@property
@pulumi.getter(name="directoryId")
def directory_id(self) -> str:
return pulumi.get(self, "directory_id")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> str:
"""
The IP address of the WorkSpace.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="rootVolumeEncryptionEnabled")
def root_volume_encryption_enabled(self) -> bool:
return pulumi.get(self, "root_volume_encryption_enabled")
@property
@pulumi.getter
def state(self) -> str:
"""
The operational state of the WorkSpace.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="userName")
def user_name(self) -> str:
return pulumi.get(self, "user_name")
@property
@pulumi.getter(name="userVolumeEncryptionEnabled")
def user_volume_encryption_enabled(self) -> bool:
return pulumi.get(self, "user_volume_encryption_enabled")
@property
@pulumi.getter(name="volumeEncryptionKey")
def volume_encryption_key(self) -> str:
return pulumi.get(self, "volume_encryption_key")
@property
@pulumi.getter(name="workspaceId")
def workspace_id(self) -> str:
return pulumi.get(self, "workspace_id")
@property
@pulumi.getter(name="workspaceProperties")
def workspace_properties(self) -> Sequence['outputs.GetWorkspaceWorkspacePropertyResult']:
return pulumi.get(self, "workspace_properties")
class AwaitableGetWorkspaceResult(GetWorkspaceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWorkspaceResult(
bundle_id=self.bundle_id,
computer_name=self.computer_name,
directory_id=self.directory_id,
id=self.id,
ip_address=self.ip_address,
root_volume_encryption_enabled=self.root_volume_encryption_enabled,
state=self.state,
tags=self.tags,
user_name=self.user_name,
user_volume_encryption_enabled=self.user_volume_encryption_enabled,
volume_encryption_key=self.volume_encryption_key,
workspace_id=self.workspace_id,
workspace_properties=self.workspace_properties)
def get_workspace(directory_id: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
user_name: Optional[str] = None,
workspace_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWorkspaceResult:
"""
Use this data source to get information about a workspace in [AWS Workspaces](https://docs.aws.amazon.com/workspaces/latest/adminguide/amazon-workspaces.html) Service.
## Example Usage
### Filter By Workspace ID
```python
import pulumi
import pulumi_aws as aws
example = aws.workspaces.get_workspace(workspace_id="ws-cj5xcxsz5")
```
### Filter By Directory ID & User Name
```python
import pulumi
import pulumi_aws as aws
example = aws.workspaces.get_workspace(directory_id="d-9967252f57",
user_name="Example")
```
:param str directory_id: The ID of the directory for the WorkSpace. You have to specify `user_name` along with `directory_id`. You cannot combine this parameter with `workspace_id`.
:param Mapping[str, str] tags: The tags for the WorkSpace.
:param str user_name: The user name of the user for the WorkSpace. This user name must exist in the directory for the WorkSpace. You cannot combine this parameter with `workspace_id`.
:param str workspace_id: The ID of the WorkSpace. You cannot combine this parameter with `directory_id`.
"""
__args__ = dict()
__args__['directoryId'] = directory_id
__args__['tags'] = tags
__args__['userName'] = user_name
__args__['workspaceId'] = workspace_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:workspaces/getWorkspace:getWorkspace', __args__, opts=opts, typ=GetWorkspaceResult).value
return AwaitableGetWorkspaceResult(
bundle_id=__ret__.bundle_id,
computer_name=__ret__.computer_name,
directory_id=__ret__.directory_id,
id=__ret__.id,
ip_address=__ret__.ip_address,
root_volume_encryption_enabled=__ret__.root_volume_encryption_enabled,
state=__ret__.state,
tags=__ret__.tags,
user_name=__ret__.user_name,
user_volume_encryption_enabled=__ret__.user_volume_encryption_enabled,
volume_encryption_key=__ret__.volume_encryption_key,
workspace_id=__ret__.workspace_id,
workspace_properties=__ret__.workspace_properties)
```
|
{
"source": "jen6/consoleme",
"score": 2
}
|
#### File: cdk/consoleme_ecs_service/consoleme_spoke_accounts_stack.py
```python
from aws_cdk import aws_iam as iam
from aws_cdk import core as cdk
from constants import MAIN_ACCOUNT_ID, SPOKE_BASE_NAME
class ConsolemeSpokeAccountsStack(cdk.Stack):
"""
Spoke accounts stack for running ConsoleMe on ECS
Granting the necessary permissions for ConsoleMe main account role
"""
def __init__(self, scope: cdk.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
trusted_role_arn = "arn:aws:iam::" + MAIN_ACCOUNT_ID + ":role/ConsoleMeTaskRole"
spoke_role = iam.Role(
self,
f"{SPOKE_BASE_NAME}TrustRole",
role_name="ConsoleMeTrustRole",
assumed_by=iam.ArnPrincipal(arn=trusted_role_arn),
)
spoke_role.add_to_policy(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
"autoscaling:Describe*",
"cloudwatch:Get*",
"cloudwatch:List*",
"config:BatchGet*",
"config:List*",
"config:Select*",
"ec2:describeregions",
"ec2:DescribeSubnets",
"ec2:describevpcendpoints",
"ec2:DescribeVpcs",
"iam:*",
"s3:GetBucketPolicy",
"s3:GetBucketTagging",
"s3:ListAllMyBuckets",
"s3:ListBucket",
"s3:PutBucketPolicy",
"s3:PutBucketTagging",
"sns:GetTopicAttributes",
"sns:ListTagsForResource",
"sns:ListTopics",
"sns:SetTopicAttributes",
"sns:TagResource",
"sns:UnTagResource",
"sqs:GetQueueAttributes",
"sqs:GetQueueUrl",
"sqs:ListQueues",
"sqs:ListQueueTags",
"sqs:SetQueueAttributes",
"sqs:TagQueue",
"sqs:UntagQueue",
],
resources=["*"],
)
)
```
|
{
"source": "Jenaer/FeatDepth",
"score": 2
}
|
#### File: core/evaluation/eval_hooks.py
```python
import os
import os.path as osp
import cv2
import matplotlib.pyplot as plt
import mmcv
import torch
import torch.distributed as dist
from mmcv.runner import Hook
from mmcv.parallel import scatter, collate
from torch.utils.data import Dataset
from .pixel_error import *
MIN_DEPTH = 1e-3
MAX_DEPTH = 80
def change_input_variable(data):
for k, v in data.items():
data[k] = torch.as_tensor(v).float()
return data
def unsqueeze_input_variable(data):
for k, v in data.items():
data[k] = torch.unsqueeze(v, dim=0)
return data
class NonDistEvalHook(Hook):
def __init__(self, dataset, cfg):
assert isinstance(dataset, Dataset)
self.dataset = dataset
self.interval = cfg.get('interval', 1)
self.out_path = cfg.get('work_dir', './')
self.cfg = cfg
def after_train_epoch(self, runner):
print('evaluation..............................................')
abs_rel = AverageMeter()
sq_rel = AverageMeter()
rmse = AverageMeter()
rmse_log = AverageMeter()
a1 = AverageMeter()
a2 = AverageMeter()
a3 = AverageMeter()
if not self.every_n_epochs(runner, self.interval):
return
runner.model.eval()
for idx in range(self.dataset.__len__()):
data = self.dataset[idx]
data = change_input_variable(data)
data = unsqueeze_input_variable(data)
with torch.no_grad():
result = runner.model(data)
disp = result[("disp", 0, 0)]
pred_disp, _ = disp_to_depth(disp)
pred_disp = pred_disp.cpu()[0, 0].numpy()
gt_depth = data['gt_depth'].cpu()[0].numpy()
gt_height, gt_width = gt_depth.shape[:2]
pred_disp = cv2.resize(pred_disp, (gt_width, gt_height))
pred_depth = 1 / pred_disp
mask = np.logical_and(gt_depth > MIN_DEPTH, gt_depth < MAX_DEPTH)
crop = np.array([0.40810811 * gt_height, 0.99189189 * gt_height,
0.03594771 * gt_width, 0.96405229 * gt_width]).astype(np.int32)
crop_mask = np.zeros(mask.shape)
crop_mask[crop[0]:crop[1], crop[2]:crop[3]] = 1
mask = np.logical_and(mask, crop_mask)
pred_depth = pred_depth[mask]
gt_depth = gt_depth[mask]
ratio = np.median(gt_depth) / np.median(pred_depth)
pred_depth *= ratio
pred_depth[pred_depth < MIN_DEPTH] = MIN_DEPTH
pred_depth[pred_depth > MAX_DEPTH] = MAX_DEPTH
abs_rel_, sq_rel_, rmse_, rmse_log_, a1_, a2_, a3_ = compute_errors(gt_depth, pred_depth)
abs_rel.update(abs_rel_)
sq_rel.update(sq_rel_)
rmse.update(rmse_)
rmse_log.update(rmse_log_)
a1.update(a1_)
a2.update(a2_)
a3.update(a3_)
print('a1_ is ', a1_)
print('a1 is ', a1.avg)
class DistEvalHook(Hook):
def __init__(self, dataset, interval=1, cfg=None):
assert isinstance(dataset, Dataset)
self.dataset = dataset
self.interval = interval
self.cfg = cfg
def after_train_epoch(self, runner):
print('evaluation..............................................')
if not self.every_n_epochs(runner, self.interval):
return
runner.model.eval()
results = [None for _ in range(len(self.dataset))]
if runner.rank == 0:
prog_bar = mmcv.ProgressBar(len(self.dataset))
t = 0
for idx in range(runner.rank, len(self.dataset), runner.world_size):
data = self.dataset[idx]
data = change_input_variable(data)
data_gpu = scatter(collate([data], samples_per_gpu=1), [torch.cuda.current_device()])[0]
# compute output
with torch.no_grad():
t1 = cv2.getTickCount()
result = runner.model(data_gpu)
t2 = cv2.getTickCount()
t += cv2.getTickFrequency() / (t2-t1)
disp = result[("disp", 0, 0)]
pred_disp, _ = disp_to_depth(disp)
pred_disp = pred_disp.cpu()[0, 0].numpy()
gt_depth = data['gt_depth'].cpu().numpy()
gt_height, gt_width = gt_depth.shape[:2]
pred_disp = cv2.resize(pred_disp, (gt_width, gt_height))
pred_depth = 1 / pred_disp
mask = np.logical_and(gt_depth > MIN_DEPTH, gt_depth < MAX_DEPTH)
crop = np.array([0.40810811 * gt_height, 0.99189189 * gt_height,
0.03594771 * gt_width, 0.96405229 * gt_width]).astype(np.int32)
crop_mask = np.zeros(mask.shape)
crop_mask[crop[0]:crop[1], crop[2]:crop[3]] = 1
mask = np.logical_and(mask, crop_mask)
pred_depth = pred_depth[mask]
gt_depth = gt_depth[mask]
ratio = np.median(gt_depth) / np.median(pred_depth)
if self.cfg.data['stereo_scale']:
pred_depth *= 36
else:
pred_depth *= ratio
pred_depth[pred_depth < MIN_DEPTH] = MIN_DEPTH
pred_depth[pred_depth > MAX_DEPTH] = MAX_DEPTH
abs_rel_, sq_rel_, rmse_, rmse_log_, a1_, a2_, a3_ = compute_errors(gt_depth, pred_depth)
# if runner.rank == 0:
# if idx % 5 == 0:
# img_path = os.path.join(self.cfg.work_dir, 'visual_{:0>4d}.png'.format(idx))
# vmax = np.percentile(pred_disp, 95)
# plt.imsave(img_path, pred_disp, cmap='magma', vmax=vmax)
result = {}
result['abs_rel'] = abs_rel_
result['sq_rel'] = sq_rel_
result['rmse'] = rmse_
result['rmse_log'] = rmse_log_
result['a1'] = a1_
result['a2'] = a2_
result['a3'] = a3_
result['scale'] = ratio
results[idx] = result
batch_size = runner.world_size
if runner.rank == 0:
for _ in range(batch_size):
prog_bar.update()
if runner.rank == 0:
print('\n')
print('FPS:', t/len(self.dataset))
print('\n')
dist.barrier()
for i in range(1, runner.world_size):
tmp_file = osp.join(runner.work_dir, 'temp_{}.pkl'.format(i))
tmp_results = mmcv.load(tmp_file)
for idx in range(i, len(results), runner.world_size):
results[idx] = tmp_results[idx]
os.remove(tmp_file)
self.evaluate(runner, results)
else:
tmp_file = osp.join(runner.work_dir,
'temp_{}.pkl'.format(runner.rank))
mmcv.dump(results, tmp_file)
dist.barrier()
dist.barrier()
def evaluate(self, runner, results):
raise NotImplementedError
class DistEvalMonoHook(DistEvalHook):
def evaluate(self, runner, results):
if mmcv.is_str(results):
assert results.endswith('.pkl')
results = mmcv.load(results)
elif not isinstance(results, list):
raise TypeError('results must be a list of numpy arrays or a filename, not {}'.format(type(results)))
abs_rel = AverageMeter()
sq_rel = AverageMeter()
rmse = AverageMeter()
rmse_log = AverageMeter()
a1 = AverageMeter()
a2 = AverageMeter()
a3 = AverageMeter()
scale = AverageMeter()
print('results len is ', results.__len__())
ratio = []
for result in results:
abs_rel.update(result['abs_rel'])
sq_rel.update(result['sq_rel'])
rmse.update(result['rmse'])
rmse_log.update(result['rmse_log'])
a1.update(result['a1'])
a2.update(result['a2'])
a3.update(result['a3'])
scale.update(result['scale'])
ratio.append(result['scale'])
runner.log_buffer.output['abs_rel'] = abs_rel.avg
runner.log_buffer.output['sq_rel'] = sq_rel.avg
runner.log_buffer.output['rmse'] = rmse.avg
runner.log_buffer.output['rmse_log'] = rmse_log.avg
runner.log_buffer.output['a1'] = a1.avg
runner.log_buffer.output['a2'] = a2.avg
runner.log_buffer.output['a3'] = a3.avg
runner.log_buffer.output['scale mean'] = scale.avg
runner.log_buffer.output['scale std'] = np.std(ratio)
runner.log_buffer.ready = True
```
#### File: model/mono_autoencoder/layers.py
```python
from __future__ import absolute_import, division, print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
class SSIM(nn.Module):
def __init__(self):
super(SSIM, self).__init__()
self.mu_x_pool = nn.AvgPool2d(3, 1)
self.mu_y_pool = nn.AvgPool2d(3, 1)
self.sig_x_pool = nn.AvgPool2d(3, 1)
self.sig_y_pool = nn.AvgPool2d(3, 1)
self.sig_xy_pool = nn.AvgPool2d(3, 1)
self.refl = nn.ReflectionPad2d(1)
self.C1 = 0.01 ** 2
self.C2 = 0.03 ** 2
def forward(self, x, y):
x = self.refl(x)
y = self.refl(y)
mu_x = self.mu_x_pool(x)
mu_y = self.mu_y_pool(y)
sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2
sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2
sigma_xy = self.sig_xy_pool(x * y) - mu_x * mu_y
SSIM_n = (2 * mu_x * mu_y + self.C1) * (2 * sigma_xy + self.C2)
SSIM_d = (mu_x ** 2 + mu_y ** 2 + self.C1) * (sigma_x + sigma_y + self.C2)
return torch.clamp((1 - SSIM_n / SSIM_d) / 2, 0, 1)
def upsample(x):
return F.interpolate(x, scale_factor=2, mode="nearest")
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv = Conv3x3(in_channels, out_channels)
self.nonlin = nn.ELU(inplace=True)
def forward(self, x):
out = self.conv(x)
out = self.nonlin(out)
return out
class Conv1x1(nn.Module):
def __init__(self, in_channels, out_channels, bias=False):
super(Conv1x1, self).__init__()
self.conv = nn.Conv2d(int(in_channels), int(out_channels), kernel_size=1, stride=1, bias=bias)
def forward(self, x):
out = self.conv(x)
return out
class Conv3x3(nn.Module):
def __init__(self, in_channels, out_channels, use_refl=True):
super(Conv3x3, self).__init__()
if use_refl:
self.pad = nn.ReflectionPad2d(1)
else:
self.pad = nn.ZeroPad2d(1)
self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3)
def forward(self, x):
out = self.pad(x)
out = self.conv(out)
return out
class Conv5x5(nn.Module):
def __init__(self, in_channels, out_channels, use_refl=True):
super(Conv5x5, self).__init__()
if use_refl:
self.pad = nn.ReflectionPad2d(2)
else:
self.pad = nn.ZeroPad2d(2)
self.conv = nn.Conv2d(int(in_channels), int(out_channels), 5)
def forward(self, x):
out = self.pad(x)
out = self.conv(out)
return out
class CRPBlock(nn.Module):
def __init__(self, in_planes, out_planes, n_stages):
super(CRPBlock, self).__init__()
for i in range(n_stages):
setattr(self, '{}_{}'.format(i + 1, 'pointwise'), Conv1x1(in_planes if (i == 0) else out_planes, out_planes, False))
self.stride = 1
self.n_stages = n_stages
self.maxpool = nn.MaxPool2d(kernel_size=5, stride=1, padding=2)
def forward(self, x):
top = x
for i in range(self.n_stages):
top = self.maxpool(top)
top = getattr(self, '{}_{}'.format(i + 1, 'pointwise'))(top)
x = top + x
return x
def compute_depth_errors(gt, pred):
thresh = torch.max((gt / pred), (pred / gt))
a1 = (thresh < 1.25 ).float().mean()
a2 = (thresh < 1.25 ** 2).float().mean()
a3 = (thresh < 1.25 ** 3).float().mean()
rmse = (gt - pred) ** 2
rmse = torch.sqrt(rmse.mean())
rmse_log = (torch.log(gt) - torch.log(pred)) ** 2
rmse_log = torch.sqrt(rmse_log.mean())
abs_rel = torch.mean(torch.abs(gt - pred) / gt)
sq_rel = torch.mean((gt - pred) ** 2 / gt)
return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3
```
#### File: model/mono_fm_joint/encoder.py
```python
from __future__ import absolute_import, division, print_function
import numpy as np
import torch
import torch.nn as nn
from .resnet import resnet18, resnet34, resnet50, resnet101
class Encoder(nn.Module):
def __init__(self, num_layers, pretrained_path=None):
super(Encoder, self).__init__()
self.num_ch_enc = np.array([64, 64, 128, 256, 512])
resnets = {18: resnet18,
34: resnet34,
50: resnet50,
101: resnet101,}
if num_layers not in resnets:
raise ValueError("{} is not a valid number of resnet layers".format(num_layers))
self.encoder = resnets[num_layers]()
if pretrained_path is not None:
checkpoint = torch.load(pretrained_path)
self.encoder.load_state_dict(checkpoint)
if num_layers > 34:
self.num_ch_enc[1:] *= 4
# for name, param in self.encoder.named_parameters():
# if 'bn' in name:
# param.requires_grad = False
def forward(self, input_image):
self.features = []
self.features.append(self.encoder.relu(self.encoder.bn1(self.encoder.conv1(input_image))))
self.features.append(self.encoder.layer1(self.encoder.maxpool(self.features[-1])))
self.features.append(self.encoder.layer2(self.features[-1]))
self.features.append(self.encoder.layer3(self.features[-1]))
self.features.append(self.encoder.layer4(self.features[-1]))
return self.features
```
|
{
"source": "Jenaf37/CMSim",
"score": 3
}
|
#### File: Jenaf37/CMSim/sim.py
```python
from cmDistribution import *
from timeit import default_timer as timer
class TestTimeout(Exception):
pass
class SimMigrate:
def __init__(self,addSelect,deSelect,startDist):
self.addSelect = addSelect
self.deSelect = deSelect
self.startDist = startDist
def run(self,time):#returns when nothing to delevel
startTime = timer()
results=[]
while (timer() - startTime < time):
x=self.startDist()
costs = 0
while True :
if not x.delevelWithSelector(self.deSelect):
break
costs +=1
while not x.addCMwithSelector(self.addSelect):
costs +=1
results.append(costs)
return results
def test(self,timeout):
startTime = timer()
x=self.startDist()
costs = 0
while True :
if timer()-startTime > timeout:
raise TestTimeout("Timeout in outer loop")
if not x.delevelWithSelector(self.deSelect):
break
costs +=1
while not x.addCMwithSelector(self.addSelect):
if timer()-startTime > timeout:
raise TestTimeout("timeout in inner loop")
costs +=1
return [costs,x]
class SimRespec:
def __init__(self,addSelect,totalCM):
self.addSelect = addSelect
self.totalCM = totalCM
def run(self,time):#returns when totalCM have been spent
startTime=timer()
results=[]
while(timer()-startTime < time):
x= cmDistribution()
costs = 5
CMspent = 0
while (CMspent < self.totalCM):
if x.addCMwithSelector(self.addSelect):
CMspent +=1
else :
costs +=1
results.append(costs)
return results
def test(self,timeout):
startTime = timer()
x= cmDistribution()
costs = 5
CMspent = 0
while (CMspent < self.totalCM):
if timer()-startTime > timeout:
raise TestTimeout("timeout")
if x.addCMwithSelector(self.addSelect):
CMspent +=1
else :
costs +=1
return [costs,x]
class SimReMig:
def __init__(self,addSelectLoose,deSelect,addSelectStrict,totalCM):
self.addSelectLoose = addSelectLoose
self.deSelect = deSelect
self.addSelectStrict = addSelectStrict
self.totalCM = totalCM
def run (self,time):
startTime=timer()
results=[]
while(timer()-startTime < time):
x= cmDistribution()
costs = 5
# respec without reroll
CMspent=0
while (CMspent < self.totalCM):
if not x.addCMwithSelector(self.addSelectLoose):
raise RuntimeError('Loose selector must always choose an option')
CMspent += 1
#migrate the result to the desired ouput
while True :
if not x.delevelWithSelector(self.deSelect):
break
costs +=1
while not x.addCMwithSelector(self.addSelectStrict):
costs +=1
results.append(costs)
return results
def test(self,timeout):
startTime=timer()
x= cmDistribution()
costs = 5
# respec without reroll
CMspent=0
while (CMspent < self.totalCM):
if timer()-startTime > timeout:
raise TestTimeout("timeout in respec phase")
if not x.addCMwithSelector(self.addSelectLoose):
raise RuntimeError('Loose selector must always choose an option')
CMspent += 1
#migrate the result to the desired ouput
while True :
if timer()-startTime > timeout:
raise TestTimeout("timeout in outer loop of migrate")
if not x.delevelWithSelector(self.deSelect):
break
costs +=1
while not x.addCMwithSelector(self.addSelectStrict):
if timer()-startTime > timeout:
raise TestTimeout("timeout in inner loop of migrate")
costs +=1
return [costs,x]
```
|
{
"source": "jenapidev/DjangoREST",
"score": 3
}
|
#### File: circles/models/invitations.py
```python
from django.db import models
#Utilities
from cride.utils.models import CRideModel
#Managers
from cride.circles.managers import InvitationManager
class Invitation(CRideModel):
"""Circle Unique invitations are created by using this model"""
code = models.CharField(max_length=50, unique=True)
issued_by = models.ForeignKey(
'users.User',
on_delete=models.CASCADE,
help_text='Circle member that is providing the invitation',
related_name='issued_by'
)
used_by = models.ForeignKey(
'users.User',
on_delete=models.CASCADE,
null=True,
help_text='User that used the code to enter the circle'
)
circle = models.ForeignKey('circles.Circle', on_delete=models.CASCADE)
used = models.BooleanField(default=False)
used_at = models.DateTimeField(blank=True, null=True)
#Managers
objects = InvitationManager()
def __str__(self):
"""Return code and circle"""
return '#{}: #{}'.format(self.circle.slug_name, self.code)
```
#### File: rides/permissions/rides.py
```python
from rest_framework.permissions import BasePermission
class IsRideOwner(BasePermission):
"""Verify if requesting users has owner account permissions"""
def has_object_permission(self, request, view, obj):
"""Verify if requesting users is the creator of the object to modify"""
return request.user == obj.offered_by
class IsNotRideOwner(BasePermission):
"""Verify if requesting user is owner"""
def has_object_permission(self, request, view, obj):
"""Verify if requesting users is the creator of the object"""
return not request.user == obj.offered_by
```
#### File: cride/users/permissions.py
```python
from rest_framework.permissions import BasePermission
class IsAccountOwner(BasePermission):
"""Allow acces to objects only to owners"""
def has_object_permission(self, request, view, obj):
"""Check object and user"""
return request.user == obj
```
|
{
"source": "JenardKin/django-react-boilerplate",
"score": 2
}
|
#### File: wopr/models/energy.py
```python
from django.db import models
class TEnergydata(models.Model):
siteid = models.ForeignKey('TSites', models.DO_NOTHING, db_column='siteID') # Field name made lowercase.
id = models.ForeignKey('TSiteconfig', models.DO_NOTHING, db_column='id')
ts = models.DateTimeField()
periodid = models.BigIntegerField(db_column='periodID', primary_key=True, db_index=True) # Field name made lowercase.
nws = models.FloatField(blank=True, null=True)
kw_net = models.FloatField(blank=True, null=True)
kw_exp = models.FloatField(blank=True, null=True)
validfrom = models.DateTimeField(db_column='validFrom') # Field name made lowercase.
validto = models.DateTimeField(db_column='validTo') # Field name made lowercase.
kw_min_exp = models.FloatField(db_column='kW_min_exp', blank=True, null=True) # Field name made lowercase.
curtailed = models.SmallIntegerField()
edited = models.SmallIntegerField(blank=True, null=True)
class Meta:
db_table = 't_EnergyData'
unique_together = (('siteid', 'id', 'periodid'),)
def __str__(self):
return str(self.siteid) + ' ' + str(self.id) + ', ' + str(self.periodid)
```
#### File: wopr/models/site.py
```python
from django.db import models
class TSites(models.Model):
siteid = models.IntegerField(db_column='SiteID', primary_key=True) # Field name made lowercase.
description = models.CharField(db_column='Description', max_length=100) # Field name made lowercase.
doimportflow = models.IntegerField(db_column='doImportFlow') # Field name made lowercase.
dsnid = models.IntegerField(db_column='DSNID', blank=True, null=True) # Field name made lowercase.
strwstagname = models.CharField(db_column='strWSTagName', max_length=100, blank=True, null=True) # Field name made lowercase.
strkwtagname = models.CharField(db_column='strkWTagName', max_length=100, blank=True, null=True) # Field name made lowercase.
streventtagname = models.CharField(db_column='strEventTagName', max_length=100, blank=True, null=True) # Field name made lowercase.
strdsn = models.CharField(db_column='strDSN', max_length=100, blank=True, null=True) # Field name made lowercase.
tz_offsetfromhistorian_h = models.IntegerField(db_column='tz_offsetFromHistorian_h') # Field name made lowercase.
eventmod1000 = models.SmallIntegerField(db_column='EventMod1000') # Field name made lowercase.
strstatustagname = models.CharField(db_column='strStatusTagName', max_length=100, blank=True, null=True) # Field name made lowercase.
nnsite1 = models.IntegerField(db_column='nnSite1', blank=True, null=True) # Field name made lowercase.
nnsite2 = models.IntegerField(db_column='nnSite2', blank=True, null=True) # Field name made lowercase.
albertasmp = models.SmallIntegerField(db_column='AlbertaSMP', blank=True, null=True) # Field name made lowercase.
greencreditstart = models.DateTimeField(db_column='GreenCreditStart', blank=True, null=True) # Field name made lowercase.
greencreditend = models.DateTimeField(db_column='GreenCreditEnd', blank=True, null=True) # Field name made lowercase.
greencredit_cd = models.FloatField(db_column='GreenCredit_cd', blank=True, null=True) # Field name made lowercase.
ppaescalation = models.CharField(db_column='PPAEscalation', max_length=200, blank=True, null=True) # Field name made lowercase.
greencreditstartperiod = models.BigIntegerField(db_column='GreenCreditStartPeriod', blank=True, null=True) # Field name made lowercase.
greencreditendperiod = models.BigIntegerField(db_column='GreenCreditEndPeriod', blank=True, null=True) # Field name made lowercase.
power_in_mw = models.SmallIntegerField(db_column='Power_in_MW', blank=True, null=True) # Field name made lowercase.
importtimeoffset_h = models.IntegerField(db_column='importTimeOffset_h', blank=True, null=True) # Field name made lowercase.
capacity_mw = models.FloatField(db_column='Capacity_MW', blank=True, null=True) # Field name made lowercase.
jvrate = models.FloatField(db_column='JVRate', blank=True, null=True) # Field name made lowercase.
financereportordering = models.IntegerField(db_column='FinanceReportOrdering', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 't_sites'
def getSiteDescription(self):
return self.description
def __str__(self):
return str(self.siteid) + " - " + self.description
class TSiteconfig(models.Model):
siteid = models.ForeignKey('TSites', models.DO_NOTHING, db_column='siteID') # Field name made lowercase.
id = models.IntegerField(db_column='ID', primary_key=True) # Field name made lowercase.
turbine = models.CharField(db_column='Turbine', max_length=255) # Field name made lowercase.
kksname = models.CharField(db_column='KKSName', max_length=255) # Field name made lowercase.
turbtypeid = models.IntegerField(db_column='turbTypeID') # Field name made lowercase.
pad = models.IntegerField(db_column='Pad', blank=True, null=True) # Field name made lowercase.
gearboxfrom = models.DateTimeField(db_column='GearboxFrom', blank=True, null=True) # Field name made lowercase.
gearbox_make = models.CharField(db_column='Gearbox Make', max_length=255, blank=True, null=True) # Field name made lowercase. Field renamed to remove unsuitable characters.
gearbox_model = models.CharField(db_column='Gearbox Model', max_length=255, blank=True, null=True) # Field name made lowercase. Field renamed to remove unsuitable characters.
generatorfrom = models.DateTimeField(db_column='GeneratorFrom', blank=True, null=True) # Field name made lowercase.
generator_make = models.CharField(db_column='Generator Make', max_length=255, blank=True, null=True) # Field name made lowercase. Field renamed to remove unsuitable characters.
generator_model = models.CharField(db_column='Generator Model', max_length=255, blank=True, null=True) # Field name made lowercase. Field renamed to remove unsuitable characters.
nn1 = models.IntegerField(blank=True, null=True)
nn2 = models.IntegerField(blank=True, null=True)
includeinsitetotals = models.SmallIntegerField(db_column='IncludeInSiteTotals') # Field name made lowercase.
mw = models.DecimalField(db_column='MW', max_digits=18, decimal_places=3, blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 't_SiteConfig'
unique_together = (('siteid', 'id'),)
```
#### File: server/wopr/utils.py
```python
import json
from datetime import timedelta, datetime, date
import pytz
from django.db import connection
from .models import *
def isNum(data):
try:
int(data)
return True
except ValueError:
return False
# Function that json.dumps() uses which converts datetime object to JSON string with format "2012-03-09 01:50:00" (gets rid of T)
def dateConverter(o):
if isinstance(o, datetime):
return o.__str__()
return o
# code that makes a dict from a raw sql query
def dictfetchall(cursor):
"Return all rows from a cursor as a dict"
columns = [col[0] for col in cursor.description]
return [
dict(zip(columns, row))
for row in cursor.fetchall()
]
# used for quality report to make the dictionary into the format:
# {editID:{ts_edit,description,Turbine,period_from,period_to,ts_editstart,ts_editend,username,comment}}
def idPairDict(d):
retdict = {}
for each in d:
subdict = {'ts_edit':each['ts_edit'],'description':each['description'],'turbine':each['Turbine'],'period_from':each['period_from'],'period_to':each['period_to'],'ts_editstart':['ts_EditStart'],'ts_editend':each['ts_EditEnd'],'username':each['username'],'comment':['comment']}
retdict[each['editID']] = subdict
return retdict
# Return an array that contains the colors for each state
def getStateColors():
return [
{
'state': 'CTH',
'description': 'Contact Turbine-Hours',
'code': 0,
'color': '#FFFFFF'
},
{
'state': 'RSTH',
'description': 'Reserve Shutdown Turbine-Hours',
'code': 1,
'color': '#cedcff'
},
{
'state': 'FTH',
'description': 'Forced Turbine-Hours',
'code': 2,
'color': '#ffa500'
},
{
'state': 'MTH',
'description': 'Maintenance Turbine-Hours',
'code': 3,
'color': '#ffff00'
},
{
'state': 'PTH',
'description': 'Planned Turbine-Hours',
'code': 4,
'color': '#3eb503'
},
{
'state': 'oFTH',
'description': 'Out of Management Control Forced Turbine-Hours',
'code': 5,
'color': '#089ad9'
},
{
'state': 'oMTH',
'description': 'Out of Management Control Maintenance Turbine-Hours',
'code': 6,
'color': '#05668f'
},
{
'state': 'oPTH',
'description': 'Out of Management Control Planned Turbine-Hours',
'code': 7,
'color': '#808080'
},
{
'state': 'RUTH',
'description': 'Resource Unavailable Turbine-Hours',
'code': 8,
'color': '#FFFFFF'
},
{
'state': 'IRTH',
'description': 'Inactive Reserve Turbine-Hours',
'code': 9,
'color': '#800080'
},
{
'state': 'MBTH',
'description': 'Mothballed Turbine-Hours',
'code': 10,
'color': '#800000'
},
{
'state': 'RTH',
'description': 'Retired Unit Turbine-Hours',
'code': 11,
'color': '#008080'
},
{
'state': 'DTH',
'description': 'Derated Turbine-Hours',
'code': 12,
'color': '#45f29f'
},
{
'state': 'oDTH',
'description': 'Out of Management Control Derated Turbine-Hours',
'code': 13,
'color': '#FF69B4'
},
{
'state': 'FDXTH - Env',
'description': 'Forced Delay Turbine Hours - Environment',
'code': 14,
'color': '#f59d67'
},
# Skips code no. 15 here for some reason accoding to WOPRLegend
{
'state': 'FDXTH - Eq',
'description': 'Forced Delay Turbine Hours - Equipment',
'code': 16,
'color': '#f59d67'
},
{
'state': 'FDXTH - Lab',
'description': 'Forced Delay Turbine Hours - Labour',
'code': 17,
'color': '#f59d67'
},
{
'state': 'FDXTH - Mat',
'description': 'Forced Delay Turbine Hours - Material',
'code': 18,
'color': '#f59d67'
},
{
'state': 'MDXTH - Env',
'description': 'Maintenance Delay Turbine Hours - Environment',
'code': 19,
'color': '#f7d023'
},
# Skips code no# 20 here according to WOPRLegend
{
'state': 'MDXTH - Eq',
'description': 'Maintenance Delay Turbine Hours - Equipment',
'code': 21,
'color': '#f7d023'
},
{
'state': 'MDXTH - Lab',
'description': 'Maintenance Delay Turbine Hours - Labour',
'code': 22,
'color': '#f7d023'
},
{
'state': 'MDXTH - Mat',
'description': 'Maintenance Delay Turbine Hours - Material',
'code': 23,
'color': '#f7d023'
},
{
'state': 'PDXTH - Env',
'description': 'Planned Delay Turbine Hours - Environment',
'code': 24,
'color': '#808000'
},
# Skips code# 25 here according to WOPRLegend
{
'state': 'PDXTH - Eq',
'description': 'Maintenance Delay Turbine Hours - Equipment',
'code': 26,
'color': '#808000'
},
{
'state': 'PDXTH - Lab',
'description': 'Maintenance Delay Turbine Hours - Labour',
'code': 27,
'color': '#808000'
},
{
'state': 'PDXTH - Mat',
'description': 'Contact Turbine-Hours',
'code': 28,
'color': '#808000'
},
{
'state': 'No Data',
'description': 'No Data available from SCADA',
'code': '',
'color': '#FFFFFF'
}
]
# Return an array that contains the colors for each system
def getSystemColors():
return [
{
'system': 'BOP',
'color': '#f47742'
},
{
'system': 'Brake',
'color': '#f4b241'
},
{
'system': 'CS',
'color': '#f1f441'
},
{
'system': 'DT',
'color': '#92d330'
},
{
'system': 'Elec',
'color': '#3ba5e2'
},
{
'system': 'Ext',
'color': '#a8b6bf'
},
{
'system': 'GB',
'color': '#a8b6bf'
},
{
'system': 'Gen',
'color': '#d1c0d8'
},
{
'system': 'Hyd',
'color': '#53196b'
},
{
'system': 'Pitch',
'color': '#db9d76'
},
{
'system': 'Rotor',
'color': '#7bb5c4'
},
{
'system': 'Struct',
'color': '#97e5bd'
},
{
'system': 'Yaw',
'color': '#d17fcd'
},
{
'system': 'Wind Turbine',
'color': '#b5a97c'
},
{
'system': 'CTH',
'color': '#FFFFFF'
},
{
'system': 'PM',
'color': '#f1f441'
},
{
'system': 'Underperformance',
'color': '#FFFFFF',
'border': '#f73333'
},
{
'system': 'No Production',
'color': '#FFFFFF',
'border': '#4d4bea'
},
{
'system': 'Ext - Ice (OMC)',
'color': '#7abbcc'
},
{
'system': 'None',
'color': '#FFFFFF'
},
]
def makeChoicesList_EditsQualityCheck():
# could have got this with the orm
siteNames = TSites.objects.distinct().values_list( 'siteid', 'description' )
siteList = []
for siteid, description in siteNames:
tup = ('siteid', 'description')
siteList.append(tup)
return siteList
def makeSiteList():
# could have got this with the orm
siteNames = TSites.objects.distinct().values_list( 'siteid', 'description' )
siteList = [ ( ' ', ' ' ) ]
print( siteNames )
for siteid, description in siteNames:
tup = (siteid, description + ', ' + str(siteid))
siteList.append(tup)
return siteList
def makeTurbineList(site, id_from=0, id_till=500):
query_set = TSiteconfig.objects.filter(siteid=site, id__range=[id_from, id_till]).order_by('id').values('id', 'kksname')
if(len(query_set) > 0):
return [(q['id'], 'Turbine ' + str(q['id']) + ', ' + q['kksname']) for q in query_set]
else: # return
return [(-1,'There are no turbines available for this site.')]
# datetime objects returned as list.
# Note: inputs must be date objects.
def getDateDeltaList(dateStart, dateEnd):
delta = dateEnd - dateStart
#print('getDateDeltaList #days:',delta.days)
dateObjectDeltaList = []
for i in range(delta.days +1):
dateObjectDeltaList.append(dateStart + timedelta(i))
return dateObjectDeltaList
# gets the occurrence data for the table in quality checks report
def getOccurrenceTableData(site_id, start_time, end_time):
# first get the date list. first convert datetimes to date.
dateObjectList = getDateDeltaList(start_time.date(), end_time.date())
#print('getOccurrenceTableData dateObjectList:',dateObjectList)
data = []
# then use those dates to get data from the database to sum up the values.
for day in dateObjectList:
numberOfCthEdits_column = occurrenceTable_getEditedCTH_columnValue(site_id, day)
noData_column = occurrenceTable_getNoData_columnValue(site_id, day)
stateChangesMidEvent = occurrenceTable_getStateChangesMidEvent(site_id, day)
systemChangesMidEvent = occurrenceTable_getSystemChangesMidEvent(site_id, day)
#Note: site red and blue boxes and site boxes will just be 'not input' for now till i know what they are and how to calculate them.
BlueRedBoxesNotMarked = 'not input'
SiteBoxesNotMarked = 'not input'
listToAppend = [day, numberOfCthEdits_column, noData_column, stateChangesMidEvent, systemChangesMidEvent, BlueRedBoxesNotMarked, SiteBoxesNotMarked]
data.append(listToAppend)
#print("PRINTING THIS DATA", data)
return data
# this method checks if a state change was made while the event did not change.
# A potential problem with this method is if an event starts at ~24:50 on the previous day and state changes at ~00:00 the next day, it may not be detected.
def occurrenceTable_getStateChangesMidEvent(site_id, day):
start_date = day
end_date = day + timedelta(days=1)
sum = 0
# get the events, stateid, from the t_eventdata table
events = TEventdata.objects.values('eventid','stateid').filter(siteid__exact=str(site_id)).filter(ts_start__range=(start_date,end_date)).order_by('ts_start')
if not events:
return sum # if nothing in the querryset just return 0.
else:
# if the STATE changes during an event sum it
currentEventID = events.values('eventid')[0]['eventid']
currentStateID = events.values('stateid')[0]['stateid']
#print(day,'FIRSTEVENTID',currentEventID, 'FIRSTSTATEID',currentStateID)
for row in events:
eventID = row['eventid']
stateID = row['stateid']
if eventID != currentEventID:
currentEventID = eventID
currentStateID = stateID
elif stateID != currentStateID:
sum = sum +1
currentStateID = stateID
return sum
# this method checks if a system change was made while the event did not change.
# A potential problem with this method is if an event starts at ~24:50 on the previous day and system changes at ~00:00 the next day, it may not be detected.
def occurrenceTable_getSystemChangesMidEvent(site_id, day):
start_date = day
end_date = day + timedelta(days=1)
sum = 0
# get the events, systemid, from the t_eventdata table
events = TEventdata.objects.values('eventid','systemid').filter(siteid__exact=str(site_id)).filter(ts_start__range=(start_date,end_date)).order_by('ts_start')
if not events:
return sum # if nothing in the querryset just return 0.
else:
# if the STATE changes during an event sum it
currentEventID = events.values('eventid')[0]['eventid']
currentSystemID = events.values('systemid')[0]['systemid']
#print(day,'FIRSTEVENTID',currentEventID, 'FIRSTSYSTEMID',currentSystemID)
for row in events:
eventID = row['eventid']
systemID = row['systemid']
if eventID != currentEventID:
currentEventID = eventID
currentSystemID = systemID
elif systemID != currentSystemID:
sum = sum +1
currentSystemID = systemID
return sum
# this gets the integer value of the number of times that the system was set to CTH by an edit form the t_edits table
def occurrenceTable_getEditedCTH_columnValue(site_id, day):
start_date = day
#print(site_id,day,'START_DATE',start_date)
end_date = day + timedelta(days=1)
#end_date = dayEnd(day)
#print(site_id,day,'END_DATE',end_date)
# ORM stuff
events = TEdits.objects.values('comment', 'newval').filter(siteid__exact=str(site_id)).filter(ts_edit__range=(start_date,end_date)).order_by('editid')
#print(site_id,day,events.values('comment', 'newval'))
# get the sum of the system changes to CTH from this day.
sum = 0
for row in events:
#if the newval == 0 and the comment is: "Set System = CTH", then sum it up
#print("THIS IS ROW NEWVAL:",row['newval'])
if row['newval'] == 0:
if checkSystemIsCth(row) == True:
sum = sum +1
return sum
# this function os for the occurrences table in the edits quality check report.
# this function checks if the system comment is set to CTH: "Set System = CTH"
def checkSystemIsCth(rowFromEvents):
ret = False
commentString = rowFromEvents['comment']
sp = commentString.split('=')
if 'Set System' in sp[0] and 'CTH' in sp[1]:
#print("FOUND A SYSTEM SET")
ret = True
return ret
# this is a stub function for this for now. Need to find out what its meaning is.
# might be if state code and or system code is null...
def occurrenceTable_getNoData_columnValue(site_id, day):
return 0
def dayStart(day):
return day.replace(hour=0, minute=0, second=0)
def dayEnd(day):
return day.replace(hour=23, minute=59, second=59)
```
|
{
"source": "jenaroaaugusto/RedesClienteServidor",
"score": 3
}
|
#### File: jenaroaaugusto/RedesClienteServidor/navegador.py
```python
import requisicao as ende
import re
import copy
# http://127.0.0.1:5000
def main():
url=input("URL do Website:")
if re.search(r'[http:]+[//]+[//]', url, re.IGNORECASE):
busca(url)
else:
print("Endereço Invalido")
main()
url=''
def busca(url):
padrao,link=url.split("//")
print(padrao,"E",link)
if ":"in link:
linkcompleto,porta=link.split(":")
print("Aqui",linkcompleto,"e",porta)
else:
linkcompleto=copy.copy(link)
porta='80'
# validacao=linkcompleto[0:4]
# print(linkcompleto[0:4])
if "127."in linkcompleto[0:4]:
print(linkcompleto)
if '/' in linkcompleto:
local=linkcompleto.index('/')
host=copy.copy(linkcompleto[0:local])
path=copy.copy(linkcompleto[local:len(linkcompleto)])
else:
host=copy.copy(linkcompleto)
path=''
print("Funciona",host,"e",path)
ende.servidorconect(host,path,porta)
elif "/" in linkcompleto:
local=linkcompleto.index('/')
host=copy.copy(linkcompleto[0:local])
if "www."not in host: host="www."+host
path=copy.copy(linkcompleto[local:len(linkcompleto)])
ende.requisicaohost(host,path,porta)
# print(host ,"e",path)
elif "/" not in linkcompleto:
path=""
host=copy.copy(linkcompleto)
if "www."not in host: host="www."+host
ende.requisicaohost(host,path,porta)
if __name__ == "__main__":
while(1):
main()
```
|
{
"source": "JenathanHoo/DoF-Hands",
"score": 2
}
|
#### File: JenathanHoo/DoF-Hands/dof_gpu.py
```python
import numpy as np
import tensorflow as tf
import os
import json
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import scipy.misc
def Bone_Transformation_Matrix(df,length,const_joint):
"""
df : N 1 25
length : N 1 15
const_joint : N 1 15
return : RT N 16 4 4
"""
RT_all = tf.constant([1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1],shape = [1,1,4,4],dtype=tf.float32)
RT_all = tf.tile(RT_all,[df.shape[0],1,1,1])
sin_df = tf.sin(df)
cos_df = tf.cos(df)
#N 1 77
sc_df = tf.concat([tf.constant([0],shape=[df.shape[0],df.shape[1],1],dtype=tf.float32),
tf.constant([1],shape=[df.shape[0],df.shape[1],1],dtype=tf.float32),
sin_df,cos_df,-sin_df,
],-1)
sc_df =tf.transpose(sc_df,[2,1,0]) #77 1 N
sc_t = tf.concat([tf.constant([0],shape=[df.shape[0],df.shape[1],1],dtype=tf.float32),
tf.constant([1],shape=[df.shape[0],df.shape[1],1],dtype=tf.float32),
const_joint,length,
],-1)
sc_t = tf.transpose(sc_t,[2,1,0]) # 32 1 N
for i in range(5):
#coord = tf.constant([0,0,0,1],shape=[1,1,4,1])
#coord = tf.tile(coord,[df.shape[0],1,1,1])
J_1_mat_x_ind = tf.constant([[1],[0],[0],[0], [0],[2+i*5+25],[2+i*5+50],[0], [0],[2+i*5],[2+i*5+25],[0], [0],[0],[0],[1]],dtype=tf.int32)
J_1_mat_y_ind = tf.constant([[2+i*5+1+25],[0],[2+i*5+1+0],[0], [0],[1],[0],[0], [2+i*5+1+50],[0],[2+i*5+1+25],[0], [0],[0],[0],[1]],dtype=tf.int32)
J_1_mat_z_ind = tf.constant([[2+i*5+2+25],[2+i*5+2+50],[0],[0], [2+i*5+2],[2+i*5+2+25],[0],[0], [0],[0],[1],[0], [0],[0],[0],[1]],dtype=tf.int32)
J_2_mat_x_ind = tf.constant([[1],[0],[0],[0], [0],[2+i*5+3+25],[2+i*5+3+50],[0], [0],[2+i*5+3],[2+i*5+3+25],[0], [0],[0],[0],[1]],dtype=tf.int32)
J_3_mat_x_ind = tf.constant([[1],[0],[0],[0], [0],[2+i*5+4+25],[2+i*5+4+50],[0], [0],[2+i*5+4],[2+i*5+4+25],[0], [0],[0],[0],[1]],dtype=tf.int32)
R_1_mat_x = tf.transpose(tf.gather_nd(sc_df,J_1_mat_x_ind),[2,1,0]) #N 1 16
R_1_mat_y = tf.transpose(tf.gather_nd(sc_df,J_1_mat_y_ind),[2,1,0])
R_1_mat_z = tf.transpose(tf.gather_nd(sc_df,J_1_mat_z_ind),[2,1,0])
R_2_mat = tf.transpose(tf.gather_nd(sc_df,J_2_mat_x_ind),[2,1,0])
R_3_mat = tf.transpose(tf.gather_nd(sc_df,J_3_mat_x_ind),[2,1,0])
R_1_mat_x = tf.reshape(R_1_mat_x,[-1,1,4,4]) #N 1 4 4
R_1_mat_y = tf.reshape(R_1_mat_y,[-1,1,4,4])
R_1_mat_z = tf.reshape(R_1_mat_z,[-1,1,4,4])
R_2_mat = tf.reshape(R_2_mat,[-1,1,4,4])
R_3_mat = tf.reshape(R_3_mat,[-1,1,4,4])
R_1_mat = tf.matmul(R_1_mat_z,tf.matmul(R_1_mat_y,R_1_mat_x))
#mask = tf.constant([1,-1,1,1, -1,1,-1,1, 1,-1,1,1, 1,1,1,1],shape=[1,1,4,4],dtype=tf.float32)
#mask = tf.tile(mask, [df.shape[0],1,1,1]) #N 1 4 4
#transpose
#R_1_mat = tf.multiply(R_1_mat,mask) # N 1 4 4
#R_2_mat = tf.multiply(R_2_mat,mask)
#R_3_mat = tf.multiply(R_3_mat,mask)
# R_1_mat = tf.transpose(R_1_mat,[0,1,3,2])
# R_2_mat = tf.transpose(R_2_mat,[0,1,3,2])
# R_3_mat = tf.transpose(R_3_mat,[0,1,3,2])
J_0_mat_ind = tf.constant([[1],[0],[0],[2+i*3], [0],[1],[0],[2+i*3+1], [0],[0],[1],[2+i*3+2], [0],[0],[0],[1]],dtype=tf.int32)
J_1_mat_ind = tf.constant([[1],[0],[0],[0], [0],[1],[0],[2+i*3+15], [0],[0],[1],[0], [0],[0],[0],[1]],dtype=tf.int32)
J_2_mat_ind = tf.constant([[1],[0],[0],[0], [0],[1],[0],[2+i*3+16], [0],[0],[1],[0], [0],[0],[0],[1]],dtype=tf.int32)
J_3_mat_ind = tf.constant([[1],[0],[0],[0], [0],[1],[0],[2+i*3+17], [0],[0],[1],[0], [0],[0],[0],[1]],dtype=tf.int32)
T_0_mat = tf.transpose(tf.gather_nd(sc_t,J_0_mat_ind),[2,1,0]) # N 1 16
T_1_mat = tf.transpose(tf.gather_nd(sc_t,J_1_mat_ind),[2,1,0])
T_2_mat = tf.transpose(tf.gather_nd(sc_t,J_2_mat_ind),[2,1,0])
T_3_mat = tf.transpose(tf.gather_nd(sc_t,J_3_mat_ind),[2,1,0])
T_0_mat = tf.reshape(T_0_mat,[-1,1,4,4])
T_1_mat = tf.reshape(T_1_mat,[-1,1,4,4])
T_2_mat = tf.reshape(T_2_mat,[-1,1,4,4])
T_3_mat = tf.reshape(T_3_mat,[-1,1,4,4])
#RT_all = tf.concat([RT_all,T_0_mat],1) #push back the first joint of finger
RT_1 = tf.matmul(T_0_mat,R_1_mat)
RT_all = tf.concat([RT_all,RT_1],1)
RT_2 = tf.matmul(T_0_mat,tf.matmul(R_1_mat,tf.matmul(T_1_mat,R_2_mat)))
RT_all = tf.concat([RT_all,RT_2],1)
RT_3 = tf.matmul(T_0_mat,tf.matmul(R_1_mat,tf.matmul(T_1_mat,tf.matmul(R_2_mat,tf.matmul(T_2_mat,R_3_mat)))))
RT_all = tf.concat([RT_all,RT_3],1)
#transfer to blender format (x,-z,y)
# mask_blender = tf.constant([1,0,0,0, 0,0,-1,0, 0,1,0,0, 0,0,0,1],shape=[1,1,4,4],dtype=tf.float32)
# mask_blender = tf.tile(mask_blender,[df.shape[0],16,1,1])
#RT_all = tf.matmul(mask_blender,RT_all)
return RT_all
def load_weight():
weights_dict = json.load(open("skin_weight.json"))
weights =np.zeros((4406,16))
for vId in weights_dict.keys():
for infID in weights_dict[vId].keys():
i=int(vId)
j=int(infID)
weights[i][j]=weights_dict[vId][infID]
return weights
if __name__ == '__main__':
W_input = tf.placeholder(tf.float32,shape=(1,4406,16))
restpose_input = tf.placeholder(tf.float32,shape=(1,4406,4))
dof_input = tf.placeholder(tf.float32,shape=(1,1,25))
R_input = tf.placeholder(tf.float32,shape=(1,3,4))
#
init_dof = tf.constant([-1.688597, 2.153835, -0.753169, 0.252418, 0.000000,
0.061404, -0.000000, 0.164297, 0.000000, 0.000000,
0.039628, 0.000000, 0.014808, 0.000000, 0.000000,
0.060280, 0.000000, -0.194745, 0.000000, 0.000000,
0.080853, -0.000000, -0.522267, 0.000000, 0.000000],shape=[1,1,25])
f_length = tf.constant([-2.707528, -2.002970, -2.583603,
-2.784939, -2.037804, -2.252724,
-3.580668, -2.254245, -2.568150,
-3.141463, -2.228459, -2.254165,
-2.698370, -1.875399, -1.893391],shape=[1,1,15])
#
c_joint = tf.constant([2.415248, -0.771598, -0.232259,
2.848633, -5.351968, -0.052151,
0.844809, -5.531802, 0.253453,
-1.070637, -5.270669, 0.270341,
-2.574195, -4.721943, 0.158358],shape=[1,1,15])
K = tf.constant([280.000000, 0.000000, 128.000000,
0.000000, 280.000000, 128.000000,
0.000000, 0.000000, 1.000000 ],shape=[1,3,3])
T_restpose = Bone_Transformation_Matrix(init_dof,f_length,c_joint)
T_ = Bone_Transformation_Matrix(dof_input,f_length,c_joint)
T = tf.matmul(T_,tf.matrix_inverse(T_restpose)) # N 16 4 4
#print(T)
W = tf.reshape(W_input,(W_input.shape[0],W_input.shape[1],W_input.shape[2],1,1))
W = tf.tile(W,(1,1,1,4,4)) # N 4406 16 4 4
T_all = tf.tile(tf.expand_dims(T,axis=1),(1,4406,1,1,1)) # N 4406 16 4 4
weighted_T = tf.reduce_sum(tf.multiply(W,T_all),axis=2) # N 4406 4 4
restpose = tf.expand_dims(restpose_input,axis=-1) # N 4406 4 1
curpose = tf.matmul(weighted_T,restpose) # N 4406 4 1
#mask_blender = tf.constant([1,0,0,0, 0,0,-1,0, 0,1,0,0, 0,0,0,1],shape=[1,1,4,4],dtype=tf.float32)
#curpose = tf.matmul(tf.tile(mask_blender,(1,4406,1,1)),curpose)
#curpose = tf.matmul(tf.tile(tf.expand_dims(R_input,axis=1),(1,4406,1,1)),curpose) # N 4406 3 1
proj_img = curpose
#proj_img = tf.matmul(tf.tile(tf.expand_dims(K,axis=1),(1,4406,1,1)),curpose) # N 4406 3 1 uv
pcl = []
with open('PCL.txt','r') as f:
lines = f.readlines()
for line in lines:
pcl.append(np.asarray([line.split(" ")[1],line.split(" ")[2],line.split(" ")[3],1.0],dtype=np.float32))
pcl = np.asarray(pcl) # 4406 4
mesh_data = {}
vertices = []
normals = []
faces = []
with open('000091.txt','r') as f:
lines = f.readlines()
for line in lines:
content = line.split(" ")
if content[0] == 'v':
vertices.append(np.asarray([content[1],-1*float(content[3]),content[2]],dtype=np.float32))
elif content[0] == 'vn':
normals.append(np.asarray([content[1],-1*float(content[3]),content[2]],dtype=np.float32))
elif content[0] =='f':
faces.append(np.asarray([content[1].split("/")[0],content[2].split("/")[0],content[3].split("/")[0],
content[1].split("/")[2],content[2].split("/")[2],content[3].split("/")[2]],dtype=np.int))
vertices = np.asarray(vertices)
normals = np.asarray(normals)
faces = np.asarray(faces)
print(vertices.shape)
faces -= 1
triangles = faces[:,:3]
vertex_faces_norm = {}
for idx in range(triangles.shape[0]):
v_key = str(triangles[idx][0])
if vertex_faces_norm.has_key(v_key):
prev_ = vertex_faces_norm[v_key]
prev_.append(faces[idx][3])
vertex_faces_norm[v_key] = prev_
else:
vertex_faces_norm[v_key] = [faces[idx][3]]
v_key = str(triangles[idx][1])
if vertex_faces_norm.has_key(v_key):
prev_ = vertex_faces_norm[v_key]
prev_.append(faces[idx][4])
vertex_faces_norm[v_key] = prev_
else:
vertex_faces_norm[v_key] =[faces[idx][4]]
v_key = str(triangles[idx][2])
if vertex_faces_norm.has_key(v_key):
prev_ = vertex_faces_norm[v_key]
prev_.append(faces[idx][5])
vertex_faces_norm[v_key] = prev_
else:
vertex_faces_norm[v_key] =[faces[idx][5]]
vertex_norms = np.zeros_like(vertices)
for idx in range(vertex_norms.shape[0]):
vfns = vertex_faces_norm[str(idx)]
norm_sum=np.array([0.,0.,0.])
for vfn in vfns:
norm_sum +=normals[vfn]
vertex_norms[idx] = norm_sum/len(vfns)
color_ =np.asarray([[0.,0.,0.],[0.15,0.15,0.15],[0.15,0.15,0.15],[0.15,0.15,0.15],[0.3,0.3,0.3],[0.3,0.3,0.3],
[0.3,0.3,0.3],[0.45,0.45,0.45],[0.45,0.45,0.45],[0.45,0.45,0.45],[0.6,0.6,0.6],
[0.6,0.6,0.6],[0.6,0.6,0.6],[0.75,0.75,0.75],[0.75,0.75,0.75],[0.75,0.75,0.75]])
weights_dict = json.load(open("skin_weight.json"))
weights =np.zeros((4406,16),dtype=np.float32)
for vId in weights_dict.keys():
for infID in weights_dict[vId].keys():
i=int(vId)
j=int(infID)
weights[i][j]=weights_dict[vId][infID]
weights_max = np.argmax(weights,axis=1)
vertex_diffuse_colors = np.zeros((4406,3))
for idx in range(weights_max.shape[0]):
vertex_diffuse_colors[idx]=color_[weights_max[idx]]
mesh_data = {}
mesh_data['vertices'] = vertices
mesh_data['normals'] = vertex_norms
mesh_data['triangles'] = triangles
mesh_data['diffuse_clors'] = vertex_diffuse_colors
np.save("hand_mesh/vertices_90.npy",vertices)
np.save("hand_mesh/normals_90.npy",vertex_norms)
np.save("hand_mesh/triangles_90.npy",triangles)
np.save("hand_mesh/diffuse_clors_90.npy",vertex_diffuse_colors)
np.save("hand_mesh/weights_90.npy",weights)
mesh_data = np.asarray(mesh_data)
np.save("restpose.npy",mesh_data)
weights_dict = json.load(open("skin_weight.json"))
weights =np.zeros((4406,16))
for vId in weights_dict.keys():
for infID in weights_dict[vId].keys():
i=int(vId)
j=int(infID)
weights[i][j]=weights_dict[vId][infID]
np.savetxt("weight.txt",weights, delimiter=" ", fmt="%.9f")
pcl = np.expand_dims(pcl,axis=0)
weights = np.expand_dims(weights,axis=0)
cur_dof = np.array([1.680591, 1.327397, 2.464877, 0.549834, 0.000000,
1.353887, -1.021418, 0.520186, 0.708544, 0.000000,
0.772618, 0.000000, 0.014808, 0.720134, 0.000000,
1.118698, 0.000000, -0.194745, 0.794283, 0.409671,
0.299145, -0.000000, -0.522267, 1.579156, 0.658056 ])
cur_dof = cur_dof.reshape((1,1,25))
R = np.array([0.420242, 0.871705, -0.252046, -2.955272,
-0.434390, 0.437127, 0.787544, 1.915788,
0.796682, -0.221473, 0.562359, 19.687462])
R = R.reshape((1,3,4))
with tf.Session() as sess:
pred_pcl = sess.run([proj_img],feed_dict={W_input:weights,restpose_input:pcl,dof_input:cur_dof,R_input:R})
pred_pcl = np.asarray(pred_pcl)
pred_pcl = pred_pcl.reshape((-1,3))
ax = plt.gca(projection='3d')
for point in pred_pcl:
ax.scatter(point[0],point[1],point[2],color='green')
flat_obj = open("flat.obj","r")
lines = flat_obj.readlines() # v:4---4406
f = open('tmppp.obj','w')
for i in range(len(lines)):
if i<=2 or i>=2+4407:
f.write(lines[i])
else:
f.write('v %.6f %.6f %.6f\n'%(pred_pcl[i-3][0], pred_pcl[i-3][1], pred_pcl[i-3][2]))
# for point in pred_pcl:
# f.write('v %.6f %.6f %.6f\n'%(point[0], point[1], point[2]))
f.close()
#plt.show()
#plt.savefig('df1.png')
```
|
{
"source": "Jenazads/goAlg",
"score": 3
}
|
#### File: sort/toCode/librarysort.py
```python
def library_sort(l):
# Initialization
d = len(l)
k = [None]*(d<<1)
m = d.bit_length() # floor(log2(n) + 1)
for i in range(d): k[2*i+1] = l[i]
# main loop
a,b = 1,2
for i in range(m):
# Because multiplication by 2 occurs at the beginning of the loop,
# the first element will not be sorted at first pass, wich is wanted
# (because a single element does not need to be sorted)
a <<= 1
b <<= 1
for j in range(a,min(b,d+1)):
p = 2*j-1
s = k[p]
# Binary search
x, y = 0, p
while y-x > 1:
c = (x+y)>>1
if k[c] != None:
if k[c] < s: x = c
else: y = c
else:
e,f = c-1,c+1
while k[e] == None: e -= 1
while k[f] == None: f += 1
if k[e] > s: y = e
elif k[f] < s: x = f
else:
x, y = e, f
break
if y-x > 1: k[ (x+y)>>1 ] = s
else:
if k[x] != None:
if k[x] > s: y = x # case may occur for [2,1,0]
while s != None:
k[y], s = s, k[y]
y += 1
else: k[x] = s
k[p] = None
if b > d: break
if i < m-1:
s = p
while s >= 0:
if k[s] != None:
# In the following line, the order is very important
# because s and p may be equal in which case we want
# k[s] (which is k[p]) to remain unchanged
k[s], k[p] = None, k[s]
p -= 2
s -= 1
return [ x for x in k if x != None ]
arr = [10, 6, 8, 8, 3, 2, 7, 4,8, 8, 3, 2, 7, 4, 6, 8, 8, 3, 2, 7, 4,9, 11, 10, 9, 6, 10, 3, 5, 8, 8, 3, 2, 7, 4,8, 8, 3, 2, 7, 4, 6, 8, 8, 3, 2, 7, 4,9, 11, 10, 9, 6, 8, 8, 3, 5, 5, 7, 8, 15, 49, 65, 2]
mm = library_sort(arr)
print arr
print mm
```
|
{
"source": "JenBanks8585/cityspire-ds-h",
"score": 3
}
|
#### File: cityspire-ds-h/app/external.py
```python
import os
import requests
import json
import sqlalchemy
import pandas as pd
from dotenv import load_dotenv
from fastapi import APIRouter, Depends
from pydantic import BaseModel, SecretStr
from walkscore import WalkScoreAPI
from app import config
from app.helper import *
load_dotenv()
router = APIRouter()
headers={'x-rapidapi-key': os.getenv('api_key'),
'x-rapidapi-host': os.getenv('host')}
@router.get('/streamlined_rent_list')
async def streamlined_rent_list(api_key=config.settings.api_key,
city: str="New York City",
state: str="NY",
prop_type: str="condo",
limit: int=4):
"""
Parameters:
api_key
city: str
state: str Two-letter abbreviation
prop_type: str ('condo', 'single_family', 'multi_family')
limit: int number of results to populate
Returns: dict
Chosen information of the requested parameters such
as addresses, state, ciy, lat, lon, photos, walk score, pollution info
"""
url=os.getenv('url_list_for_rent')
querystring={"city": city,
"state_code": state,
"limit": limit,
"offset": "0",
"sort":"relevance",
"prop_type": prop_type}
response_for_rent=requests.request("GET", url, params=querystring, headers=headers,)
response = response_for_rent.json()['properties']
pollution_res = pollution(city)
rental_list=[]
for i in range(limit):
line=response[i]['address']['line']
city=response[i]['address']['city']
state=response[i]['address']['state']
lat=response[i]['address']['lat']
lon=response[i]['address']['lon']
photos=response[i]['photos']
element={ 'lat': lat,
'lon': lon,
'city':city,
'state':state,
'photos': photos,
'pollution': pollution_res}
rental_list.append(element)
return rental_list
@router.get('/for_rent_list')
async def for_rent_list(api_key=config.settings.api_key,
city: str="New York City",
state: str="NY",
prop_type: str="condo",
limit: int=4):
"""
Parameters:
api_key
city: str
state: str
prop_type: str ('condo', 'single_family', 'multi_family')
limit: int number of results to populate
Returns: dict
Expanded information about the city
"""
url=os.getenv('url_list_for_rent')
querystring={"city": city,
"state_code": state,
"limit": limit,
"offset": "0",
"sort":"relevance",
"prop_type": prop_type}
response_for_rent=requests.request("GET", url,
params=querystring,
headers=headers)
return response_for_rent.json()['properties']
@router.get('/for_rent_list/{property_id}')
async def property_detail(property_id: str="O3599084026"):
"""
Parameters:
property_id
Returns: dict
detailed information about the property
"""
url=os.getenv('url_property_detail')
querystring={"property_id":property_id}
response_prop_detail=requests.request("GET", url,
headers=headers,
params=querystring)
return response_prop_detail.json()['properties']
@router.get('/for_sale_list')
async def for_sale_list(api_key=config.settings.api_key,
city="New York City",
state="NY",
limit=4):
"""
Parameters:
city: str
state: str
limit: int number of results to populate
Returns: dict
detailed information about the property
"""
url=os.getenv('url_list_for_sale')
querystring={"city": city ,
"limit": limit,
"offset":"0",
"state_code": state,
"sort":"relevance"}
response_for_sale=requests.request("GET", url, headers=headers, params=querystring)
return response_for_sale.json()['properties']
@router.get('/walk_score')
async def get_walk_score(city:str, state: str):
"""
Parameters:
city: string
state: 2-letter state Abbreviation
Returns: dict
Returns walkscore, description, transit score and bike score
"""
response = just_walk_score(city, state)
return response
@router.get('/pollution')
async def get_pollution(city:str, state: str):
""" Input: City, 2 letter abbreviation for state
Returns a list containing WalkScore, BusScore, and BikeScore in that order
"""
response = pollution(city)
return response
```
|
{
"source": "JenBanks8585/Labs_CitySpireDS",
"score": 3
}
|
#### File: Labs_CitySpireDS/app/realtybasemodel.py
```python
import os
import requests
from dotenv import load_dotenv
from fastapi import APIRouter, Depends
import sqlalchemy
from pydantic import BaseModel, SecretStr
from app import config
router = APIRouter()
headers = {'x-rapidapi-key': os.getenv('api_key'),
'x-rapidapi-host': os.getenv('host') }
class RentalList(BaseModel):
api_key: SecretStr = config.settings.api_key
city: str = "New York"
state: str = "NY"
prop_type: str = "condo"
limit: int = 5
@router.get('/for_rent_list_base')
async def for_rent_list_base(rentallist: RentalList):
"""
Parameters:
api_key
city: str
state: str
prop_type: str ('condo', 'single_family', 'multi_family')
limit: int number of results to populate
Returns:
information about properties for rent
"""
url = os.getenv('url_list_for_rent')
querystring = {"city": rentallist.city,
"state_code": rentallist.state,
"limit": rentallist.limit,
"offset": "0",
"sort":"relevance",
"prop_type": rentallist.prop_type}
response_for_rent = requests.request("GET", url, params = querystring, headers = headers,)
return response_for_rent.json()
```
#### File: Labs_CitySpireDS/app/realty.py
```python
import os
import requests
from dotenv import load_dotenv
from fastapi import APIRouter, Depends
import sqlalchemy
from pydantic import BaseModel, SecretStr
from app import config
from app.walk_score import *
load_dotenv()
router = APIRouter()
headers = {'x-rapidapi-key': os.getenv('api_key'),
'x-rapidapi-host': os.getenv('host') }
@router.get('/streamlined_rent_list')
async def streamlined_rent_list(api_key = config.settings.api_key,
city: str = "New York City",
state: str= "NY",
prop_type: str = "condo",
limit: int = 4):
"""
Parameters:
api_key
city: str
state: str
prop_type: str ('condo', 'single_family', 'multi_family')
limit: int number of results to populate
Returns:
information about properties for rent
"""
url = os.getenv('url_list_for_rent')
querystring = {"city": city,
"state_code": state,
"limit": limit,
"offset": "0",
"sort":"relevance",
"prop_type": prop_type}
response_for_rent = requests.request("GET", url, params = querystring, headers = headers,)
response = response_for_rent.json()['properties']
rental_list = []
for i in range(limit):
line = response[i]['address']['line']
city = response[i]['address']['city']
state = response[i]['address']['state']
lat = response[i]['address']['lat']
lon = response[i]['address']['lon']
photos = response[i]['photos']
address = line +" "+ city + " "+ state
walk_score = just_walk_score(address, lat, lon)
element = {'address': address,
'lat': lat,
'lon': lon,
'city':city,
'state':state,
'photos': photos,
'walk_score': walk_score}
rental_list.append(element)
return rental_list
@router.get('/for_rent_list')
async def for_rent_list(api_key = config.settings.api_key,
city: str = "New York City",
state: str= "NY",
prop_type: str = "condo",
limit: int = 4):
"""
Parameters:
api_key
city: str
state: str
prop_type: str ('condo', 'single_family', 'multi_family')
limit: int number of results to populate
Returns:
information about properties for rent
"""
url = os.getenv('url_list_for_rent')
querystring = {"city": city,
"state_code": state,
"limit": limit,
"offset": "0",
"sort":"relevance",
"prop_type": prop_type}
response_for_rent = requests.request("GET", url, params = querystring, headers = headers,)
return response_for_rent.json()['properties']
@router.get('/for_rent_list/{property_id}')
async def property_detail(property_id: str = "O3599084026"):
"""
Parameters:
property_id
Returns:
detailed information about the property
"""
url = os.getenv('url_property_detail')
querystring = {"property_id":property_id}
response_prop_detail = requests.request("GET", url, headers=headers, params=querystring)
return response_prop_detail.json()['properties']
@router.get('/for_sale_list')
async def for_sale_list(api_key = config.settings.api_key,
city = "New York City",
state= "NY",
limit = 4):
url = os.getenv('url_list_for_sale')
querystring = {"city": city ,"limit": limit,"offset":"0","state_code": state,"sort":"relevance"}
response_for_sale = requests.request("GET", url, headers=headers, params=querystring)
return response_for_sale.json()['properties']
```
|
{
"source": "JenBanks8585/lambdata-JenBanks",
"score": 3
}
|
#### File: lambdata-JenBanks/lambdata_jbanks/mod.py
```python
def enlarge(n):
return n*100
x= int(input("enter an integer"))
print(enlarge(x))
```
|
{
"source": "JenBanks8585/Spotmefy",
"score": 2
}
|
#### File: Spotmefy/app/__init__.py
```python
import os
from flask import Flask
from app.model import model
from app.appli import appli
def create_app():
app = Flask(__name__)
app.register_blueprint(model)
app.register_blueprint(appli)
return app
if __name__ == '__main__':
my_app = create_app()
my_app.run(debug=True)
```
|
{
"source": "JenBanks8585/twitoff_Banks",
"score": 3
}
|
#### File: flask_app/routes/home_routes.py
```python
from flask import Blueprint, render_template
home_routes = Blueprint("home_routes", __name__)
# add def index():
@home_routes.route('/')
def hello_world():
print("You visited the homepage")
#return 'Hello, World!'
return render_template("prediction_form.html")
@home_routes.route('/about')
def about():
print("You visited the About-Me page")
return "<h2 style = 'color:red'> About Me (TODO)</h2>"
```
|
{
"source": "JenBanks8585/weather_wqu",
"score": 3
}
|
#### File: weather_wqu/wqu_app/app.py
```python
import os
from flask import Flask, request, render_template
from message import *
#import pandas as pd
import json
app = Flask(__name__)
DEPLOY = os.getenv('DEPLOY')
@app.route('/')
def main():
if DEPLOY == 'heroku':
ip_address = request.headers['X-Forwarded-For']
else:
ip_address = retrieve_local_ip_address()
return render_template('index.html', message= str(greet(ip_address)))
@app.route('/weather')
def weather():
if DEPLOY == 'heroku':
ip_address = request.headers['X-Forwarded-For']
else:
ip_address = retrieve_local_ip_address()
return render_template('weather.html', message = str(weather_data(ip_address)))
@app.route('/myweather', methods = ["GET","POST"])
def myweather():
if request.method =="POST":
latitude = request.form["lat"]
longitude = request.form["lon"]
url = 'https://api.met.no/weatherapi/locationforecast/2.0/compact'
headers = {
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Sec-Fetch-Site": "same-origin",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-User": "?1",
"Sec-Fetch-Dest": "document",
"Referer": "https://www.google.com/",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-US,en;q=0.9"}
params = {'lat': latitude, 'lon': longitude}
response = requests.get(url, params=params, headers = headers)
data = response.json()
dat = data['properties']['timeseries'][0]['data']['instant']['details']
#data_df= pd.DataFrame.from_dict(dat, orient = 'index')
#data_df
dat_pretty = json.dumps(dat, indent = 4)
return render_template('myweather.html', message = str(dat))
else:
return render_template('myweather.html')
if __name__ =='__main__':
app.run(debug =True)
```
|
{
"source": "JenBanks8585/wqu_project1",
"score": 3
}
|
#### File: wqu_project1/prescription_drug_analysis/drug_data_analysis.py
```python
import pandas as pd
def load_and_clean_data():
''' Returns the cleaned scripts, practices and chem data sets'''
scripts = pd.read_csv('~/datacourse/data-wrangling/miniprojects/dw-data/201701scripts_sample.csv.gz')
col_names = ['code', 'name', 'addr_1', 'addr_2', 'borough', 'village', 'post_code']
practices = pd.read_csv('~/datacourse/data-wrangling/miniprojects/dw-data/practices.csv.gz', names = col_names)
# Need to drop duplicate CHEM SUB rows
chem = pd.read_csv('~/datacourse/data-wrangling/miniprojects/dw-data/chem.csv.gz')
chem = chem.sort_values('CHEM SUB').drop_duplicates(subset = 'CHEM SUB', keep='first')
return scripts, practices, chem
def flag_opioids(chem):
'''Add column to dataframe flagging prescription if it is an opioid'''
cheme= chem.copy()
opioids = ['morphine',
'oxycodone',
'methadone',
'fentanyl',
'pethidine',
'buprenorphine',
'propoxyphene',
'codeine']
chem['is_opioids']= chem['NAME'].str.lower().str.contains(r'|'.join(opioids))
return chem
def calculate_z_score(scripts, chem):
'''Returns a Series of Z-scores of each practice'''
scripts_with_chem = (scripts
.merge(chem[['CHEM SUB', 'is_opioids']],
left_on = 'bnf_code',
right_on = 'CHEM SUB',
how = 'left')
.fillna(False))
# Calculate z-score for each practice
opioids_per_practice = scripts_with_chem.groupby('practice')['is_opioids'].mean()
relative_opioids_per_practice = opioids_per_practice-scripts_with_chem['is_opioids'].mean()
std_eror_per_practice = scripts_with_chem['is_opioids'].std()/(scripts_with_chem['practice'].value_counts())**.5
opioid_scores = relative_opioids_per_practice/std_eror_per_practice
return opioid_scores
def dump_data(results):
'''Dumps pandas dataframe of the results to disk'''
results.to_csv('practices_flagged.csv', index = False)
def flag_anomalous_practices(practices, scripts, opioid_scores, z_score_cutoff = 2, raw_count_cutoff = 50):
'''Returns practices that have z-score and raw count greater than cutoff'''
unique_practices = practices.sort_values('name').drop_duplicates(subset = 'code', keep = 'first')
unique_practices = unique_practices.set_index('code')
unique_practices['z_scores'] = opioid_scores
unique_practices['count']= scripts['practice'].value_counts()
results = unique_practices.sort_values('z_scores', ascending = False).head(100)
return results.query('z_scores > @z_score_cutoff and count > @raw_count_cutoff')
if __name__ == '__main__':
#import sys
#print(f"Running {sys.argv[0]}")
#z_score_cutoff = int(sys.argv[1])
#raw_count_cutoff = int(sys.argv[2])
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--z_score_cutoff',
default = 3,
type = int,
help = 'The Z- score cutoff for flagging practices')
parser.add_argument('--raw_count_cutoff',
default = 50,
type = int,
help = 'The raw count cutoff for flagging practices')
args = parser.parse_args()
print(args)
scripts, practices, chem = load_and_clean_data()
chem = flag_opioids(chem)
opioid_scores = calculate_z_score(scripts, chem)
anomalous_practices = flag_anomalous_practices(practices,
scripts,
opioid_scores,
z_score_cutoff = args.z_score_cutoff,
raw_count_cutoff = args.raw_count_cutoff)
dump_data(anomalous_practices)
```
|
{
"source": "jenboyleibm/qpylib",
"score": 2
}
|
#### File: qpylib/test/test_util.py
```python
import os
import pytest
from qpylib import util_qpylib
@pytest.fixture(scope='function')
def set_env_vars():
os.environ['QRADAR_APPFW_SDK'] = 'true'
yield
del os.environ['QRADAR_APPFW_SDK']
def test_is_sdk_with_env_not_set():
assert not util_qpylib.is_sdk()
def test_is_sdk_with_env_set(set_env_vars):
assert util_qpylib.is_sdk()
```
|
{
"source": "jenca-adam/ratcave",
"score": 2
}
|
#### File: ratcave/examples/collision.py
```python
import pyglet
import ratcave as rc
import numpy as np
np.set_printoptions(suppress=True, precision=4)
window = pyglet.window.Window()
cube = rc.Mesh.from_primitive('Cube')
cube.position.z = -3
cube.scale.xyz = .3, .3, .3
# cube.visible = False
cube.collider = rc.ColliderCylinder(visible=True, ignore_axis=1)
sphere = rc.Mesh.from_primitive('Sphere', position=(0.99, 0, 0))
sphere.scale.xyz = 1.00, 1, 4
sphere.parent = cube
sphere.collider = rc.ColliderSphere(visible=True, position=(0, 0, 0))
tt = 0.
def update(dt):
global tt
tt += dt
sphere.position.x = np.sin(1.5 * tt) * 5
sphere.rotation.y += 210 * dt
cube.rotation.y += 60 * dt
sphere.uniforms['diffuse'] = (0., 1., 0.) if sphere.collider.collides_with(cube) else (0., 0., 1.)
cube.uniforms['diffuse'] = (0., 1., 0.) if cube.collider.collides_with(sphere) else (0., 0., 1.)
print(sphere.position.xyz)
# print(sphere.collider.collides_with(cube), cube.collider.collides_with(sphere))
pyglet.clock.schedule(update)
@window.event
def on_draw():
window.clear()
with rc.default_shader, rc.default_states, rc.default_camera:
for mesh in cube:
mesh.draw()
pyglet.app.run()
```
#### File: ratcave/examples/gen_texture_from_array.py
```python
import numpy as np
import pyglet
import ratcave as rc
window = pyglet.window.Window()
cube = rc.Mesh.from_primitive('Cube', position=(0, 0, -3), rotation=(45, 45, 0))
arr = np.random.randint(0, 255, size=(128, 128, 4))
arr = np.zeros_like(arr)# + 255
arr[:, :, 0] = 255
tex2 = rc.Texture(values=arr)
tex2.values = np.random.randint(0, 255, size=(128, 128, 4))
cube.textures.append(tex2)
@window.event
def on_draw():
window.clear()
with rc.default_shader, rc.default_camera, rc.default_states:
cube.draw()
def randomize_texture(dt):
tex2.values = np.random.randint(0, 255, size=(128, 128, 4))
pyglet.clock.schedule(randomize_texture)
pyglet.app.run()
```
#### File: ratcave/ratcave/materials.py
```python
class Material:
def __init__(self, diffuse=[.8, .8, .8], spec_weight=0., specular=[0., 0., 0.],
ambient=[0., 0., 0.], opacity=1., flat_shading=False, texture_file=None):
self.diffuse = diffuse
self.spec_weight = spec_weight
self.specular = specular
self.ambient = ambient
self.opacity = opacity
self.flat_shading = flat_shading
self.texture_file = texture_file
```
#### File: ratcave/ratcave/wavefront.py
```python
from .mesh import Mesh
from wavefront_reader import read_wavefront
from . import Texture
class WavefrontReader:
material_property_map = {'Kd': 'diffuse',
'Ka': 'ambient',
'Ke': 'emission',
'Ks': 'specular',
'Ni': 'Ni',
'Ns': 'spec_weight',
'd': 'd',
'illum': 'illum',
'map_Kd': 'map_Kd',
}
def __init__(self, file_name):
"""
Reads Wavefront (.obj) files created in Blender to build ratcave.graphics Mesh objects.
:param file_name: .obj file to read (assumes an accompanying .mtl file has the same base file name.)
:type file_name: str
:return:
:rtype: WavefrontReader
"""
self.file_name = file_name
self.bodies = read_wavefront(file_name)
self.textures = {}
def get_mesh(self, body_name, **kwargs):
"""Builds Mesh from geom name in the wavefront file. Takes all keyword arguments that Mesh takes."""
body = self.bodies[body_name]
vertices = body['v']
normals = body['vn'] if 'vn' in body else None
texcoords = body['vt'] if 'vt' in body else None
mesh = Mesh.from_incomplete_data(vertices=vertices, normals=normals, texcoords=texcoords, **kwargs)
uniforms = kwargs['uniforms'] if 'uniforms' in kwargs else {}
if 'material' in body:
material_props = {self.material_property_map[key]: value for key, value in body['material'].items()}
for key, value in material_props.items():
if isinstance(value, str):
if key == 'map_Kd':
if not value in self.textures:
self.textures[value] = Texture.from_image(value)
mesh.textures.append(self.textures[value])
else:
setattr(mesh, key, value)
elif hasattr(value, '__len__'): # iterable materials
mesh.uniforms[key] = value
elif key in ['d', 'illum']: # integer materials
mesh.uniforms[key] = value
elif key in ['spec_weight', 'Ni']: # float materials: should be specially converted to float if not already done.
mesh.uniforms[key] = float(value)
else:
print('Warning: Not applying uniform {}: {}'.format(key, value))
return mesh
```
|
{
"source": "jenca-adam/timeago",
"score": 3
}
|
#### File: timeago/locales/sk.py
```python
base = [
["práve teraz", "pred chvíľou"],
["pred %s sekundami", "o %s sekúnd","o %s sekundy"],
["pred minútou", "o minútu"],
["pred %s minútami", "o %s minút","o %s minúty"],
["pred hodinou", "o hodinu"],
["pred %s hodinami", "o %s hodín","o %s hodiny"],
["pred dňom", "o deň"],
["pred %s dňami", "o %s dni"],
["pred týždňom", "o týždeň"],
["pred %s týždňami", "o %s týždňov","o %s týždne"],
["pred mesiacom", "o mesiac"],
["pred %s mesiacmi", "o %s mesiacov","o %s mesiace"],
["pred rokom", "o rok"],
["pred %s rokmi", "o %s rokov","o %s roky"],
]
def generate(row,y):
def formatting(time):
if y==1 and time<5:
try:
return base[row][y+1]
except IndexError:
pass
return base[row][y]
return formatting
LOCALE=generate
```
|
{
"source": "jenca-adam/weather",
"score": 3
}
|
#### File: src/old/day.py
```python
import json
class Day:
def __init__(self,dayname,highest,lowest,desc):
self.dayname=dayname
self.highest=highest
self.lowest=lowest
self.desc=desc
def __repr__(self):
return repr(self.__dict__)
```
#### File: src/old/parse.py
```python
from bs4 import BeautifulSoup as bs
from .forecast import Forecast
from .day import Day
def parsetemp(t):
return int(t.find(class_="wob_t").text)
def parseday(d):
s=bs(str(d),'html.parser')
dayname=s.find(class_="QrNVmd Z1VzSb")['aria-label']
desc=s.find(class_="DxhUm").img['alt']
tmps=bs(str(s.find(class_="wNE31c")),'html.parser')
highest=parsetemp(tmps.find(class_="vk_gy gNCp2e"))
lowest=parsetemp(tmps.find(class_="QrNVmd ZXCv8e"))
return Day(dayname,highest,lowest,desc)
def parsefcast(d,temp):
soup=bs(d,'html.parser')
g=soup.find_all(class_="wob_df")
g=[parseday(i) for i in g]
first=g[0]
nxt=g[1:]
return Forecast(temp,first,nxt)
```
#### File: weather/src/weather.py
```python
if __name__=='__main__':
print('Importing module, please wait...\r',end='')
import json
import urllib3
from bs4 import BeautifulSoup as bs
from httplib2 import Http
import httplib2
import random
import time
from geopy.geocoders import Nominatim
from selenium import webdriver
from selenium.webdriver import *
import warnings
import os
import re
import platform
import subprocess
from pathlib import Path
import selenium
import colorama
import inspect
import sys
import importlib
import difflib
from unitconvert.temperatureunits import TemperatureUnit as tu
from lxml import etree
import locale
import langdetect
import argparse
import termcolor
import tabulate
import calendar
import datetime
import termutils
import getchlib
'''Python library for getting weather from different sources.
Example:
>>> import weather
#Get temperature in Chicago tommorow at midnight ( Returns temperature in Fahrenheits )
>>> weather.forecast("Chicago").tomorrow["0:00"].temp
#Get temperature in current location today at noon
>>> weather.forecast().today["12:00"].temp
#Get precipitation amount after two days in Seoul at 4 o'clock
>>> weather.forecast('Seoul').day(2)["16:00"].precip
'''
termcolor.COLORS={key:value+60 for key,value in termcolor.COLORS.items()}
#__OWM_TOKEN="<PASSWORD>"
DEBUG=False
locator=Nominatim(user_agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36")
_h=Http('.cache')
CELSIUS=0
UNIT=CELSIUS
FAHRENHEIT=1
_DONTCHECK=-1
'''Google chrome headers, used in BetaChrome'''
_HDR={"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36"}
_URL="https://translate.googleapis.com/translate_a/single?client=gtx&ie={pe}&oe={pe}dt=bd&dt=ex&dt=ld&dt=md&dt=rw&dt=rm&dt=ss&dt=t&dt=at&dt=qc&sl={sl}&tl={tl}&hl={tl}&q={string}"
def dateadd1(date):
date=[date.year,date.month,date.day]
if date[-1]==calendar.monthrange(2021,date[1])[1]:
date[-1]=1
if date[1]==12:
date[1]=1
date[0]+=1
else:
date[1]+=1
else:
date[-1]+=1
return datetime.date(*date)
def foc2t(foc):
u=makeunit(foc.today.weather_as_list[0].unit)
date=datetime.date.today()
dayc=0
table=[]
while True:
begintime=datetime.datetime.now().hour if not dayc else 1
try:
day=foc.day(dayc)
except IndexError:
break
for h in range(begintime,24):
weah=day[f'{h}:00']
table.append([str(date),f'{h}:00',str(weah.temp)+"°"+u,weah.precip,weah.humid,weah.wind.direction.direction,weah.wind.speed])
dayc+=1
date=dateadd1(date)
return tabulate.tabulate(table,['Date','Time',"Temperature",'Precipitation','Humidity','Wind direction','Wind speed'],tablefmt='fancy_grid')
class TooManyRequestsError(ValueError):pass
class Translator:
def __init__(self,sl,tl):
self._pe=locale.getpreferredencoding()
self._h=httplib2.Http('.cache')
self.sl=sl
self.tl=tl
def translate(self,string):
if self.sl is None:
self.sl=langdetect.detect(string)
u=_URL.format(pe=self._pe,sl=self.sl,tl=self.tl,string=string)
resp,c=self._h.request(u)
if resp.status==419:
raise TooManyRequestsError('server replied 419. Try again later')
return json.loads(c.decode(self._pe))[0][0][0]
def translate(string,sl=None,tl='en'):
t=Translator(sl,tl)
return t.translate(string)
class _debugger:
class BadStatus(Exception):pass
def debug(self,text,status="INFO"):
if not DEBUG:
return
colors={"INFO":colorama.Fore.LIGHTBLUE_EX,"ERROR":colorama.Fore.LIGHTRED_EX,"DEBUG":colorama.Fore.LIGHTMAGENTA_EX,"WARNING":colorama.Fore.LIGHTYELLOW_EX}
if status not in colors:
raise self.BadStatus("BAD STATUS")
previous_frame = inspect.currentframe().f_back
(filename, lineno,
function_name, lines, index) = inspect.getframeinfo(previous_frame)
sys.stdout.write( f'{colorama.Fore.CYAN}{filename}:{lineno}:{function_name} {colorama.Fore.RESET}-> {colors[status]}{status}{colorama.Style.RESET_ALL}:{colors[status]}{text}{colorama.Style.RESET_ALL}\n')
debugger=_debugger()
class Browser:
'''Metaclass of BetaChrome used to bypass Google bot detection'''
def request(self,url,headers={}):
'''Request url and set USER-AGENT headers'''
debugger.debug('requested url')
hdr=self.HEADERS
hdr.update(headers)
return _h.request(url,headers=hdr)
class BetaChrome(Browser):
'''Used to bypass Google bot detection'''
HEADERS=_HDR
class DriverWarning(Warning):
'''Metaclass of NoHeadlessWarning'''
class WeatherWarning(Warning):
'''Metaclass of SVGWarning'''
class SVGWarning(WeatherWarning):
'''User is warned when SVG passed to analyzesvg() is already analyzed.
For debugging'''
class NoHeadlessWarning(DriverWarning):
'''User is warned when no headless driver is not avaliable'''
class WeatherError(BaseException):
'''Metaclass of most errors defined here'''
class NoSuchCityError(WeatherError):
'''Raised when city was not found using Nominatim'''
class DriverError(Exception):
'''Raised when no driver is avaliable'''
h=httplib2.Http('.cache')
class IpError(ValueError):
'''Metaclass of Fail'''
class Fail(IpError):
'''Metaclass of InvalidQuery and ReservedRange'''
class InvalidQuery(Fail):
'''Raised when user entered non-existing IP to track()'''
class ReservedRange(Fail):
'''Raised when user entered
localhost or 127.0.0.1 or 0.0.0.0 to track()'''
class SetupError(WeatherError):
'''
Raised when setup.setup() was called but Chrome neither Firefox are installed
'''
class ReadonlyError(Exception):
'''
Raised when user want to change Forecast, Day or SearchResult's attributes'''
class UnknownServiceError(WeatherError):
'''
Raised when user tried to use service that does not exist.
Should be served along with some " Did you mean <>?" suggestions
'''
class PropertyError(WeatherError):
'''
Metaclass of WindError
'''
class WindError(PropertyError):
'''
Raised when invalid wind properties are given
'''
class DirectionError(WindError):
'''
Raised when direction is not "N","NE","E","SE","S","SW","W" or "NW"(if it is a string) or higher than 360(if it is a integer)
'''
class _fastener:
def __init__(self):
os.makedirs('.cache/weather/',exist_ok=True)
if not os.path.exists('.cache/weather/citycountry.json'):
self.file=open('.cache/weather/citycountry.json','w')
self.file.write('{}')
self.file.close()
self.file=open('.cache/weather/citycountry.json')
self.data=json.load(self.file)
def dump(self):
c=open('.cache/weather/citycountry.json','w')
json.dump(self.data,c)
c.close()
def isrg(self,city):
return city in self.data
def register(self,city,country):
self.data[city]=country
self.dump()
def getcountry(self,city):
if self.isrg(city):
return self.data[city]
else:
country=_getcountry(city)
self.register(city,country)
return country
def makeunit(u):
if u==CELSIUS:
return 'C'
elif u==FAHRENHEIT:
return "F"
return u
def searchurl(query):
'''Returns Google Search URL according to query'''
return f'https://google.com/search?q={query}'
def _getcountry(city):
'''Return country that city is located in'''
a=locator.geocode(city).address.split(',')[-1].strip()
return translate(a)
def _rq(h,u):
'''Handles IpApi ConnectionResetError'''
try:
return h.request(u)
except ConnectionResetError:
return _rq(h,u)
def _track(ip):
'''Requests IpApi and returns result as dict'''
r,c=_rq(h,f'http://ip-api.com/json/{ip}')
return json.loads(c)
def _checkresp(msg,stat):
'''Raises error according to message and status returned by track()'''
if stat=='success':return
if stat=='fail':
if msg=='reserved range':
raise ReservedRange(msg)
elif msg=='invalid query':
raise InvalidQuery(msg)
raise Fail(msg)
raise IpError(msg)
def checkresp(resp,ip=''):
'''Calls _checkresp with resp'''
if 'message' not in resp:return
_checkresp(resp['message'],resp['status'])
def track(ip=''):
'''Tracks according to IP. Used to detect location (when is called with no arguments, returns data for current IP)'''
resp=_track(ip)
checkresp(resp,ip)
return resp
class Direction:
__ANGLEDICT={0:'N',45:'NE',90:'E',135:'SE',180:'S',225:'SW',270:'W',315:'NW',360:'N'}
__rvang={value:key for key,value in __ANGLEDICT.items()}
def __init__(self,ang):
if isinstance(ang,bool):
raise TypeError(
f"angle must be 'str' or 'int', not {ang.__class__.__name__!r}"
)
if ang is None:
self.direction='UNKNOWN'
self.angle=None
return
if isinstance(ang,(int,float)):
ang=int(ang)
if ang>360:
raise DirectionError("wind angle is more than 360")
self.direction=self.__ANGLEDICT[min(self.__ANGLEDICT, key=lambda x:abs(x-ang))]
self.angle=ang
elif isinstance(ang,str):
if ang.upper() not in self.__rvang:
raise DirectionError(
f"invalid wind direction : {ang!r} ( valid are {','.join(self.__ANGLEDICT.values())} )"
)
self.direction=ang.upper()
self.angle=self.__rvang[ang.upper()]
else:
raise TypeError(
f"angle must be 'str' or 'int', not {ang.__class__.__name__!r}"
)
def __repr__(self):
return self.direction
class Location:
'''Implementation of location'''
def __init__(self,lat,lon,country,country_code,region_code,region,city,zipcode,tzone):
self.lat=lat
self.lon=lon
self.country=country
self.country_code=country_code
self.region=region
self.region_code=region_code
self.city=city
self.zipcode=zipcode
self.tzone=tzone
class Wind():
def __init__(self,speed=None,direction=None):
self.direction=Direction(direction)
self.speed=speed
def _parse_loc(resp):
'''Returns Location object from data returned by track'''
return Location(
resp['lat'],
resp['lon'],
resp['country'],
resp['countryCode'],
resp['region'],
resp['regionName'],
resp['city'],
resp['zip'],
resp['timezone'],
)
def iploc(ip):
'''Gets location according to IP'''
return _parse_loc(track(ip))
def curloc():
'''Gets current location'''
return iploc('')
_br=BetaChrome()
request=_br.request
def search(query):
'''Return search results from query'''
return request(searchurl(query))[1]
### Set global variables according to current location
LOCATION=curloc()
CITY=LOCATION.city
COUNTRY=LOCATION.country
### Set temperature unit to Fahrenheit if user is in US
if COUNTRY=='United States':
UNIT=FAHRENHEIT
def refresh(ip=''):
'''Gets city and country from current location(or IP location)'''
if not ip:
city=curloc().city
country=curloc().country
else:
city=iploc(ip).city
country=iploc(ip).country
return city,country
class _fcdumper:
def dump(self,forecast,file):
debugger.debug(f'Dumping data into {file!r}')
if isinstance(forecast,_WeatherChannel.Forecast):
source='google'
elif isinstance(forecast,_YR_NORI.Forecast):
source='yrno'
elif isinstance(forecast,_7Timer.Forecast):
source='7timer'
else:
raise TypeError("bad forecast type")
froot=etree.Element('forecast')
debugger.debug('Created root element')
forelem=etree.SubElement(froot,'location')
cityelem=etree.SubElement(forelem,'city')
countryelem=etree.SubElement(forelem,'country')
countryelem.text=forecast.country
unt=forecast.today.weather_as_list[0].unit
untel=etree.SubElement(froot,'unit')
untel.text='C' if unt==CELSIUS else 'F'
cityelem.text=forecast.city
srcel=etree.SubElement(froot,'source')
srcel.text=source
root=etree.SubElement(froot,'days')
dind=0
debugger.debug('iterating days')
for d in forecast.days:
delem=etree.SubElement(root,'day',index=str(dind))
debugger.debug(f'iterating weather for day {dind}')
for w in d.weather_as_dict:
recelem=etree.SubElement(delem,'record')
wa=d.weather_as_dict[w]
timelem=etree.SubElement(recelem,'time')
timelem.text=w
weather=etree.SubElement(recelem,'weather')
telem=etree.SubElement(weather,'temp')
telem.text=str(wa.temp)
pelem=etree.SubElement(weather,'precip')
pelem.text='0.0' if not wa.precip else str(float(wa.precip))
welem=etree.SubElement(weather,'wind')
wspeed=etree.SubElement(welem,'speed')
wdir=etree.SubElement(welem,'direction')
wang=etree.SubElement(wdir,'angle')
wcomp=etree.SubElement(wdir,'compass')
wspeed.text=str(wa.wind.speed)
wang.text=str(wa.wind.direction.angle)
wcomp.text=wa.wind.direction.direction
helem=etree.SubElement(weather,'humid')
helem.text="0" if not wa.humid else str(wa.humid)
dind+=1
with open(file,'w') as f:
debugger.debug('saving file')
f.write(etree.tounicode(froot,pretty_print=True))
def load(self,xmlfile):
debugger.debug('loading weather from cache')
t=etree.parse(open(xmlfile))
unit=t.find('.//unit').text
city=t.find('.//city').text
src=t.find('.//source').text
srcsvc=SERVICES[src]
country=t.find('.//country').text
times=t.findall('.//time')
temps=t.findall('.//temp')
precps=t.findall('.//precip')
humids=t.findall('.//humid')
wspeeds=t.findall('.//speed')
wangs=t.findall('.//angle')
wcomps=t.findall('.//compass')
records=len(times)
cdix=0
weather_as_dict={}
forecast=[]
debugger.debug(f'{records} records found')
for wi in range(records):
tm=times[wi].text
temp=float(temps[wi].text)
precip=float(precps[wi].text)
humid=float(humids[wi].text)
wind=float(wspeeds[wi].text)
wdirct=float(wangs[wi].text)
dix=int(temps[wi].getparent().getparent().getparent().attrib['index'])
if dix>cdix:
forecast.append(srcsvc.__class__.Day(list(weather_as_dict.values()),weather_as_dict))
weather_as_dict={}
weather_as_dict[tm]=srcsvc.__class__.Weather(Wind(wind,wdirct),precip,temp,humid,unit)
cdix=dix
return srcsvc.__class__.Forecast(forecast,city,country)
class _cacher:
def __init__(self):
os.makedirs('.cache/weather/fc/xml',exist_ok=True)
def iscached(self,cityname,ctr):
pt=f'.cache/weather/fc/xml/{ctr}/{cityname}/'
return os.path.exists(pt) and os.listdir(pt)
def getcached(self,city,ctr):
if not self.iscached(city,ctr):
return []
pt=f'.cache/weather/fc/xml/{ctr}/{city}/'
t=time.time()
ress=[]
for file in os.listdir(pt):
rt=int(file)
if -1<t-rt<3600:
ress.append(os.path.join(pt,file))
return ress
def cache(self,forecast):
pt=f'.cache/weather/fc/xml/{forecast.country}/{forecast.city}/'
os.makedirs(pt,exist_ok=True)
dumper.dump(forecast,os.path.join(pt,str(int(time.time()))))
class _WeatherChannel:
'''weather.google type'''
def __init__(self):
'''Set driver to None for fututre forecast requests will be faster'''
self.driver=None
class Weather:
'''Implementation of weather'''
def __init__(self,wind,precip,temp,humid=None,unit=CELSIUS):
self.temp=temp
self.precip=precip/10
self.wind=wind
self.humid=humid
self.unit=unit
class Day:
'''Implementation of day'''
def __init__(self,weatherlist,wdict):
'''
self.highest=max(
max(i.temp for i in weatherlist)
)
self.lowest=min(
min(i.temp for i in weatherlist)
)'''
self.weather_as_list=weatherlist
self.weather_as_dict=wdict
def __getitem__(self,i):
'''Return weather at time'''
return self.gettemp(i)
def splittime(self,time,after=int):
'''Splits time to hours and minutes and calling to result after'''
try:
return tuple(after(i) for i in time.split(':'))
except ValueError:
raise ValueError(
f"invalid value for 'splittime':{time}"
)
def fillin(self,time):
'''Fills the time in. E.g.:fillin("8:00")="08:00"'''
return self.jointime(self.formatsec(i) for i in self.splittime(time,after=str))
def formatsec(self,time):
'''Formats seconds E.g:formatsec("8")="08"'''
if len(str(time))>2:
raise ValueError('not 2-digit or 1-digit time')
if not time:
return "00"
if len(str(time))<2:
return "0"+time
return time
def jointime(self,time):
'''Gets analog time according to hours and minutes'''
return ':'.join(str(i) for i in time)
def gettemp(self,time):
'''Returns weather at time'''
if isinstance(time,str):
time=self.splittime(time)
return self.weather_as_dict[self.fillin(self.repairtime(self.jointime(time)))]
def timetoint(self,time):
'''Converts time to integer'''
if isinstance(time,str):
return self.splittime(time)[1]+self.splittime(time)[0]*60
return time[1]+time[0]*60
def inttotime(self,i):
'''Converts integer to time. Reverse function to timetoint'''
return (i//60,i%60)
def repairtime(self,time):
'''Gets closest time that is in weather list'''
closest=lambda num,collection:min(collection,key=lambda x:abs((x-num)+1))
dy=self.weather_as_dict
dy=[self.timetoint(time) for time in dy]
qr=self.timetoint(self.roundtime(time))
return self.jointime(self.inttotime(closest(qr,dy)))
def roundtime(self,time):
'''Rounds time; e.g. roundtime("8:10")="08:00"'''
mins=int(time.split(':')[-1])
if mins==0:
return time
hrs=int(time.split(':')[0])
if mins<30:
return f'{self.formatsec(hrs)}:00'
return f'{self.formatsec(hrs+1)}:00'
def convert(self,d):
''' Converts dictionary to JSON serializable form'''
l={}
for a in d:
i=d[a]
m=i
if isinstance(i,self.Day):
m=i.__dict__
if isinstance(i,list):
m=self.convlist(self,i)
l[a]=m
return l
def convlist(self,d):
'''Converts list to JSON serializable form'''
l=[]
for i in d:
m=i
if isinstance(i,self.Day):
m=i.__dict__
l.append(m)
return l
class Forecast:
'''Implemantation of weather forecast'''
def __init__(self,days,city,ctr):
self.city=city
self.country=ctr
debugger.debug("created forecast")
e=None
self.temp=days[0].weather_as_list[0].temp
self.today=days[0]
self.days=days
self.tomorrow=days[1]
'''if not isinstance(self.today,_WeatherChannel.Day):
raise TypeError(
f"'today' argument must be weather._WeatherChannel.Day, not {today.__class__.__name__}'")
try:
iter(nxt)
except:
e=TypeError("'nxt' argument is not iterable")
if e is not None:
raise e
for i in nxt:
if not isinstance(i,_WeatherChannel.Day):
raise TypeError(
f"all members of 'nxt' argument must be \"_WeatherChannel.Day\", not {i.__class__.__name__}'")'''
def day(self,num):
'''Returns weather at day n'''
return self.days[num]
def splittime(self,time,after=int):
try:
return tuple(after(i) for i in time.split(':'))
except ValueError:
raise ValueError(
f"invalid value for 'splittime':{time}"
)
def fillin(self,time):
return self.jointime(self.formatsec(i) for i in self.splittime(time,after=str))
def formatsec(self,time):
if len(time)>2:
raise ValueError('not 2-digit or 1-digit time')
if len(time)<2:
return "0"+time
return time
def jointime(self,time):
return ':'.join(str(i) for i in time)
def gettemp(self,daynum,time):
if isinstance(time,str):
time=self.splittime(time)
return self.days[daynum][self.fillin(self.repairtime(self.jointime(time),daynum))]
def timetoint(self,time):
if isinstance(time,str):
return self.splittime(time)[1]+self.splittime(time)[0]*60
return time[1]+time[0]*60
def inttotime(self,i):
return (i//60,i%60)
def repairtime(self,time,day):
closest=lambda num,collection:min(collection,key=lambda x:abs((x-num)+1))
dy=self.days[day]
dy=[self.timetoint(time) for time in dy]
qr=self.timetoint(self.roundtime(time))
return self.jointime(self.inttotime(closest(qr,dy)))
def roundtime(self,time):
mins=int(time.split(':')[-1])
if mins==0:
return time
hrs=int(time.split(':')[0])
if mins<50:
return f'{self.formatsec(hrs)}:00'
return f'{self.formatsec(hrs+1)}:00'
def splittime(self,time,after=int):
try:
return tuple(after(i) for i in time.split(':'))
except ValueError:
raise ValueError(
f"invalid value for 'splittime':{time}"
)
def parsetemp(self,t,unit=UNIT):
'''Parses temperature HTML'''
return int(t.find_all(class_="wob_t")[unit].text)
def parseday(self,d,unit=UNIT):
'''Parses one day'''
s=bs(str(d),'html.parser')
dayname=s.find(class_="QrNVmd Z1VzSb")['aria-label']
desc=s.find(class_="DxhUm").img['alt']
tmps=bs(str(s.find(class_="wNE31c")),'html.parser')
highest=self.parsetemp(tmps.find(class_="vk_gy gNCp2e"),unit=unit)
lowest=self.parsetemp(tmps.find(class_="QrNVmd ZXCv8e"),unit=unit)
return self.Day(dayname,highest,lowest,desc)
def getsvg(self,ch,unit):
debugger.debug("Getting temperature")
'''Gets SVG with temperature'''
try:
ch.find_elements_by_class_name("jyfHyd")[1].click()
except:pass
#wait until forecast loads
time.sleep(0.7)
svg=ch.find_element_by_id('wob_gsvg')
svg=svg.get_attribute('outerHTML')
return self.analyzesvg(svg,unit)
def getprecip(self,ch):
'''Gets precipitation data'''
debugger.debug("Analyzing precipitation")
precip_html_element=ch.find_element_by_id('wob_pg')
precip_html=precip_html_element.get_attribute('outerHTML')
precip_soup=bs(precip_html,
'html.parser')
columns=precip_soup.findAll(class_="wob_hw")
days=[]
graph={}
lastTime=0
for col in columns:
time=col.div['aria-label'].split(' ')[-1]
perc=int(
col.div['aria-label'].
split(' ')[0].
replace('%',''))
if self.splittime(time)[0]<lastTime:
days.append(graph)
graph={}
graph[time]=perc
lastTime=self.splittime(time)[0]
return days
def getwind(self,ch):
'''Gets wind data'''
debugger.debug("Analyzing wind")
wind_html_element=ch.find_element_by_id('wob_wg')
wind_html=wind_html_element.get_attribute('outerHTML')
wind_soup=bs(wind_html,
'html.parser')
spds=wind_soup.findAll(class_="wob_hw")
days=[]
graph={}
lastTime=0
for selem in spds:
time=selem.div.span['aria-label'].split(' ')[-1]
spd=selem.div.span.text.split(' ')[0]
if self.splittime(time)[0]<lastTime:
days.append(graph)
graph={}
graph[time]=int(spd)
lastTime=self.splittime(time)[0]
return days
def getgraph(self,ch,unit):
'''Gets full data and formats them into Forecast object'''
debugger.debug("Parser has started!")
svg=self.getsvg(ch,unit)
precip=self.getprecip(ch)
wind=self.getwind(ch)
svglist=[list(a.keys()) for a in svg]
preciplist=[list(a.keys()) for a in precip]
windlist=[list(a.keys()) for a in wind]
wthrs=[]
wthrs_inner=[]
ind=0
debugger.debug("Formating weather data into forecast")
for a in wind:
inner=0
wthrs_inner=[]
for j in a:
t=a[j]
wthrs_inner.append(
self.Weather(
Wind(t*3.6666667,0),
precip[ind][j],
svg[ind][j],
unit=unit
)
)
inner+=1
ind+=1
wthrs.append(wthrs_inner)
wtdct=[]
wtdc_inner={}
ind=0
for s in wind:
inner=0
wtdc_inner={}
for j in s:
t=a[j]
wtdc_inner[j]=self.Weather(
Wind(t,0),
precip[ind][j],
svg[ind][j]
)
inner+=1
ind+=1
wtdct.append(wtdc_inner)
days=[]
yndex=0
for day in wtdct:
days.append(
self.Day(
wthrs[yndex],
wtdct[yndex],
)
)
yndex+=1
return days
def analyzesvg(self,svg,unit):
'''Changes svg to dict of temperatures'''
debugger.debug("Analyzing temperature")
if isinstance(svg,list) :
warnings.warn(
SVGWarning(
'''Looks like temperature SVG file is already analyzed,
but check it twice!'''
)
)
return svg
soup=bs(svg,'html.parser')
labels=soup.findAll('text')
days=[]
graph={}
curcels=not unit
lastTime=0
for l in labels:
if curcels:
time=l['aria-label'].split(' ')[-1]
tu=self._prstmpstr(l['aria-label'])
if self.splittime(time)[0]<lastTime:
days.append(graph)
graph={}
graph[time]=tu
lastTime=self.splittime(time)[0]
curcels=not curcels
return days
def parsefcast(self,days,temp,unit,city,ctr):
'''Parses forecast'''
return self.Forecast(days,city,ctr)
def forecast(self,cityname=CITY,countryname='',unit=None,driver=None):
'''Gets forecast'''
err=None
self.driver=driver
if self.driver is None:
driver=_driverSearch()
self.driver=driver.best()
wd=self.driver
if not countryname:
try:
countryname=getcountry(cityname)
except AttributeError:
err=NoSuchCityError(f"no such city: '{cityname}'")
if err:
raise err
if cityname==CITY and not countryname:
countryname=COUNTRY
if unit is None:
if countryname.lower()=='united states':
unit=FAHRENHEIT
else:
unit=CELSIUS
ca=cacher.getcached(cityname,countryname)
for caf in ca:
foc=dumper.load(caf)
if isinstance(foc,self.__class__.Forecast):
return foc
if countryname==_DONTCHECK:
query=f'weather {cityname}'
else:
query=f'weather {cityname} {countryname}'
c=search(query)
soup=bs(c,'html.parser')
wd.get(searchurl(query))
try:
svg=self.getgraph(wd,unit)
tempnow=int(soup.body.find_all('span',class_="wob_t")[unit].text)
fli=soup.body.find('div',id="wob_dp")
foc=self.parsefcast(svg,tempnow,unit,cityname,countryname)
cacher.cache(foc)
return foc
except Exception as e:
debugger.debug(f"could not load forecast for {cityname}, trying without country; ({str(e)} throwed)","ERROR")
err=WeatherError(f"could not get forecast for city {cityname}({str(e)} throwed)")
if countryname==_DONTCHECK:
raise err
return self.forecast(cityname,_DONTCHECK,unit)
def ipforecast(self,ip):
return self.forecast(*refresh(ip))
def _prstmpstr(self,string):
pattern=re.compile(r'^([0-9\-]+)°')
match=pattern.search(string)
if not match:
raise ValueError(
'Could not parse temperature string')
return int(match.group(0).replace('°',''))
class _YR_NORI:
'''yr.no source'''
def __init__(self):
self.driver=None
self.ForecastParser=self._ForecastParser()
class SearchResults:
'''Implementation of search results'''
def __init__(self,l):
self.res=l
self.first=l[0]
def __getitem__(self,item):
return self.result(item)
def __setitem__(self,item,what):
raise ReadonlyError('read-only')
def result(self,i):
return self.res[i]
def __repr__(self):
return repr(self.res)
class Day:
'''Implementation of one day'''
def __init__(self,wlst,wdict):
self.weather_as_list=wlst
self.weather_as_dict=wdict
def __getitem__(self,i):
return self.gettemp(i.split(':')[0])
def splittime(self,time,after=int):
try:
return tuple(after(i) for i in time.split(':'))
except ValueError:
raise ValueError(
f"invalid value for 'splittime':{time}"
)
def fillin(self,time):
return self.jointime(self.formatsec(i) for i in self.splittime(time,after=str))
def formatsec(self,time):
if len(str(time))>2:
raise ValueError('not 2-digit or 1-digit time')
if not time:
return ""
if len(str(time))<2:
return "0"+str(time)
return time
def jointime(self,time):
return ':'.join(str(i) for i in time)
def gettemp(self,time):
if isinstance(time,str):
time=self.splittime(time)
return self.weather_as_dict[self.fillin(self.repairtime(self.jointime(time)))]
def timetoint(self,time):
if isinstance(time,str):
return self.splittime(time)[1]+self.splittime(time)[0]*60
return time[1]+time[0]*60
def inttotime(self,i):
return (i//60,i%60)
def repairtime(self,time):
closest=lambda num,collection:min(collection,key=lambda x:abs((x-num)+1))
dy=self.weather_as_dict
dy=[self.timetoint(time) for time in dy]
qr=self.timetoint(self.roundtime(time))
return self.jointime(self.inttotime(closest(qr,dy)))
def roundtime(self,time):
mins=int(time.split(':')[-1])
if mins==0:
return time
hrs=int(time.split(':')[0])
if mins<50:
return f'{self.formatsec(hrs)}:00'
return f'{self.formatsec(hrs+1)}:00'
class Forecast:
'''implementation of weather forecast'''
def __init__(self,days,city,country,*args,**kwargs):
self.today=days[0]
self.tomorrow=days[1]
self.days=days
self.city=city
self.country=country
def __getitem__(self,i):
return self.day(i)
def __setitem__(self,item,what):
raise ReadonlyError('read-only')
def day(self,daynum):
return self.days[daynum]
class Weather:
'''Implementation of weather'''
def __init__(self,wind,precip,temp,humid=None,unit=CELSIUS):
self.temp=temp
self.precip=precip
self.wind=wind
self.humid=humid
self.unit=unit
class _ForecastParser:
'''Parses forecast'''
def parse(self,content,unit,ci,co):
content=json.loads(content)
timeseries=content["properties"]["timeseries"]
weather_as_dict={}
fcast=[]
lastday=None
for wr in timeseries:
war=wr["data"]
tm=self.parsetime(wr["time"])
dy=self.parseday(wr["time"])
instant=war["instant"]["details"]
temp=instant["air_temperature"]
wind=instant["wind_speed"]
wdirct=instant["wind_from_direction"]
precip=war["next_1_hours"]["details"]["precipitation_amount"] if "next_1_hours" in war else 0.0
humid=instant["relative_humidity"]
weather_as_dict[tm]=_YR_NORI.Weather(Wind(wind,wdirct),precip,temp,humid,unit)
if lastday is not None:
if dy!=lastday:
fcast.append(_YR_NORI.Day(list(weather_as_dict.values()),weather_as_dict))
weather_as_dict={}
lastday=dy
foc= _YR_NORI.Forecast(fcast,ci,co)
cacher.cache(foc)
return foc
def close_ad(self,driver):
'''Closes "We have a new graph" pop-up'''
try:
driver.find_elements_by_class_name("feature-promo-modal-meteogram__link")[0].click()
except:pass
def parsetime(self,time):
pattern=re.compile('(\d{2}\:\d{2})\:\d{2}')
return pattern.search(time).group(1)
def parseday(self,time):
pattern=re.compile('\d{4}-\d{2}-(\d{2})')
return pattern.search(time).group(1)
def searchurl(self,q):
'''Returns yr.no search URL'''
return f'https://www.yr.no/en/search?q={q}'
def expandhref(self,href):
'''Expands href'''
return f'https://www.yr.no{href}'
def search(self,q):
'''Searches yr.no'''
self.driver=_driverSearch().best()
self.driver.get(
self.searchurl(q)
)
results=bs(self.driver.find_elements_by_class_name('search-results-list')[0].get_attribute('outerHTML'),
'html.parser')
results=results.findAll('li')
results=[self.expandhref(result.a['href']) for result in results]
return self.SearchResults(results)
def forecast(self,cityname=CITY,countryname=None,unit=None):
'''Gets forecast'''
err=None
if not countryname:
if cityname==CITY:
countryname=COUNTRY
try:
countryname=getcountry(cityname)
except AttributeError:
err=NoSuchCityError(f"no such city: '{cityname}'")
if err:
raise err
if cityname==CITY and not countryname:
countryname=COUNTRY
ca=cacher.getcached(cityname,countryname)
for caf in ca:
foc=dumper.load(caf)
if isinstance(foc,self.__class__.Forecast):
return foc
if cityname==CITY and countryname==COUNTRY:
lat,lon=LOCATION.lat,LOCATION.lon
else:
loct=locator.geocode(f'{cityname},{countryname}')
lat,lon=loct.latitude,loct.longitude
if unit is None:
if countryname.lower()=='united states':
unit=FAHRENHEIT
else:
unit=CELSIUS
apiurl=f'https://api.met.no/weatherapi/locationforecast/2.0/compact?lat={lat}&lon={lon}'
return self.ForecastParser.parse(BetaChrome().request(apiurl)[1],unit,cityname,countryname)
class _7Timer:
'''7timer.info source'''
def __init__(self):
self.driver=None
self.ForecastParser=self._ForecastParser()
class Day:
'''Implementation of one day'''
def __init__(self,wlst,wdict):
self.weather_as_list=wlst
self.weather_as_dict=wdict
def __getitem__(self,i):
return self.gettemp(i.split(':')[0])
def splittime(self,time,after=int):
try:
return tuple(after(i) for i in time.split(':'))
except ValueError:
raise ValueError(
f"invalid value for 'splittime':{time}"
)
def fillin(self,time):
return self.jointime(self.formatsec(i) for i in self.splittime(time,after=str))
def formatsec(self,time):
if len(str(time))>2:
raise ValueError('not 2-digit or 1-digit time')
if not time:
return ""
if len(str(time))<2:
return "0"+str(time)
return time
def jointime(self,time):
return ':'.join(str(i) for i in time)
def gettemp(self,time):
if isinstance(time,str):
time=self.splittime(time)
return self.weather_as_dict[self.fillin(self.repairtime(self.jointime(time)))]
def timetoint(self,time):
if isinstance(time,str):
return (self.splittime(time)[1]+self.splittime(time)[0]*60)%1440
return (time[1]+time[0]*60)%1440
def inttotime(self,i):
return (i//60,i%60)
def repairtime(self,time):
closest=lambda num,collection:min(collection,key=lambda x:abs((x-num)+1))
dy=self.weather_as_dict
dy=[self.timetoint(time) for time in dy]
qr=self.timetoint(self.roundtime(time))
return self.jointime(self.inttotime(closest(qr,dy)))
def roundtime(self,time):
mins=int(time.split(':')[-1])
if mins==0:
return time
hrs=int(time.split(':')[0])
if mins<50:
return f'{self.formatsec(hrs)}:00'
return f'{self.formatsec(hrs+1)}:00'
class Forecast:
'''implementation of weather forecast'''
def __init__(self,days,ci,co):
self.days=days
self.today=days[0]
self.tomorrow=days[1]
self.city=ci
self.country=co
def __getitem__(self,i):
return self.day(i)
def __setitem__(self,item,what):
raise ReadonlyError('read-only')
def day(self,daynum):
return self.days[daynum]
class Weather:
'''Implementation of weather'''
def __init__(self,wind,precip,temp,humid=None,unit=CELSIUS):
self.temp=temp
self.precip=precip
self.wind=wind
self.humid=humid
self.unit=unit
class _ForecastParser:
'''Parses forecast'''
def parse(self,content,unit,ci,co):
content=json.loads(content)
lit=content['dataseries']
weather_as_dict={}
fcast=[]
dnum=1
for qeqeq in lit:
tm=self.mktime(
self.inttotime(
self.timetoint(
str(
qeqeq['timepoint']
)+':00'
)+self.timetoint(
self.roundtime(
self.parsetime(
time.ctime(
time.time()
)
)
)
)
)
)
tmp=self.c2f(qeqeq['temp2m'],unit)
humd=None
precip=False if qeqeq['prec_type']=='none' else True
wind=qeqeq['wind10m']['speed']
windir=qeqeq['wind10m']['direction']
weather_as_dict[tm]=_7Timer.Weather(Wind(wind,windir),precip,tmp,humd,unit)
if qeqeq['timepoint']>=dnum*24:
fcast.append(_7Timer.Day(list(weather_as_dict.values()),weather_as_dict))
weather_as_dict={}
dnum+=1
ltime=self.timetoint(tm)
foc= _7Timer.Forecast(fcast,ci,co)
cacher.cache(foc)
return foc
def splittime(self,time,after=int):
try:
return tuple(after(i) for i in time.split(':'))
except ValueError:
raise ValueError(
f"invalid value for 'splittime':{time}"
)
def fillin(self,time):
return self.jointime(self.formatsec(i) for i in self.splittime(time,after=str))
def formatsec(self,time):
if len(str(time))>2:
raise ValueError('not 2-digit or 1-digit time')
if not time:
return ""
if len(str(time))<2:
return "0"+str(time)
return time
def jointime(self,time):
return ':'.join(str(i) for i in time)
def gettemp(self,time):
if isinstance(time,str):
time=self.splittime(time)
return self.weather_as_dict[self.fillin(self.repairtime(self.jointime(time)))]
def timetoint(self,time):
if isinstance(time,str):
return (self.splittime(time)[1]+self.splittime(time)[0]*60)%1440
return (time[1]+time[0]*60)%1440
def inttotime(self,i):
return (i//60,i%60)
def repairtime(self,time):
closest=lambda num,collection:min(collection,key=lambda x:abs((x-num)+1))
dy=self.weather_as_dict
dy=[self.timetoint(time) for time in dy]
qr=self.timetoint(self.roundtime(time))
return self.jointime(self.inttotime(closest(qr,dy)))
def roundtime(self,time):
mins=int(time.split(':')[-1])
if mins==0:
return time
hrs=int(time.split(':')[0])
if mins<50:
return f'{self.formatsec(hrs)}:00'
return f'{self.formatsec(hrs+1)}:00'
def mktime(self,time):
return self.fillin(self.jointime(self.inttotime(self.timetoint(time))))
def parsetime(self,time):
pattern=re.compile('(\d{2}\:\d{2})\:\d{2}')
return pattern.search(time).group(1)
def parseday(self,time):
pattern=re.compile('\d{4}-\d{2}-(\d{2})')
return pattern.search(time).group(1)
def c2f(self,fah,unit=CELSIUS):
if unit==FAHRENHEIT:
return tu(fah,'C','F').doconnvert()
return fah
def forecast(self,cityname=CITY,countryname=COUNTRY,unit=None):
'''Gets forecast'''
err=None
if not countryname:
try:
countryname=getcountry(cityname)
except AttributeError:
err=NoSuchCityError(f"no such city: '{cityname}'")
if err:
raise err
if cityname==CITY and not countryname:
countryname=COUNTRY
if unit is None:
if countryname.lower()=='united states':
unit=FAHRENHEIT
else:
unit=CELSIUS
if cityname==CITY and countryname==COUNTRY:
lat,lon=LOCATION.lat,LOCATION.lon
else:
loct=locator.geocode(f'{cityname},{countryname}')
lat,lon=loct.latitude,loct.longitude
ca=cacher.getcached(cityname,countryname)
for caf in ca:
foc=dumper.load(caf)
if isinstance(foc,self.__class__.Forecast):
return foc
apiurl=f'https://www.7timer.info/bin/astro.php?lon={lon}&lat={lat}&ac=0&lang=en&unit=metric&output=json&tzshift=0'
return self.ForecastParser.parse(BetaChrome().request(apiurl)[1],unit,cityname,countryname)
ForecastParser=_ForecastParser()
class _driverSearch:
'''Search drivers'''
def __init__(self,throw=False):
if throw:
debugger.debug("Lost connection to the driver,attempting reconnect...","ERROR")
debugger.debug("initialized driver search")
self.browsers=[Chrome,Firefox,Safari,Ie,Edge]
self.reprs={repr(i):i for i in self.browsers}
'''If it is possible, initiate in headless mode'''
_CHOPT=chrome.options
_FFXOPT=firefox.options
_IEXOPT=ie.options
opt=[_CHOPT,
_FFXOPT,
_IEXOPT,]
opt=[i.Options() for i in opt]
headlessopt=opt[:]
hopt=[]
for br in headlessopt:
br.headless=True
hopt.append(br)
headlessopt=hopt
self.headlessopt=[[Chrome,headlessopt[0]],[Firefox,headlessopt[1]],[Ie,headlessopt[2]]]
del hopt
if ('.cache/weather')not in os.listdir():
os.makedirs('.cache/weather',exist_ok=True)
debugger.debug("Getting browser avaliability data")
if ('aval') not in os.listdir('.cache/weather'):
debugger.debug("Avaliability data not in cache!","WARNING")
chrome_aval=False
firefox_aval=False
safari_aval=False
ie_aval=False
pjs_aval=False
try:
c=Chrome()
c.quit()
chrome_aval=True
except:
try:
f=Firefox()
f.quit()
firefox_aval=True
except:
try:
s=Safari()
s.quit()
safari_aval=True
except:
try:
i=Ie()
i.quit()
ie_aval=True
self.aval=[[Chrome,chrome_aval],[Firefox,firefox_aval],[Safari,safari_aval],[Ie,ie_aval]]
with open('.cache/weather/aval','w')as f:
res=[]
for i,j in self.aval:
res.append([repr(i),j])
debugger.debug("Json dumping data")
json.dump(res,f)
else:
debugger.debug("Loading data from cache")
with open('.cache/weather/aval')as f:
try:
self.aval=json.load(f)
except:
raise WeatherError(
'Could not get browser avaliability data because file .cache/weather/aval is malformed, maybe try to delete it?'
)
result=[]
for i in self.aval:
if i[0] in self.reprs:
result.append([self.reprs[i[0]],i[1]])
else:
result.append(i)
self.aval=result
if all([not i for i in [a[1] for a in self.aval]]):
raise DriverError(
'''None of web drivers installed.
Check https://jenca-adam.github.io/projects/weather/docs.html#special_requirements .
Alternatively, you can use weather.yrno instead of weather.google .
''')
def _checkin(self,a,b,index=0):
for i in a:
if b==i[index]:
return True
return False
def _isaval(self,dr):
for i in self.aval:
if i[0]==dr and i[1]:
return True
return False
def _gethopt(self,dr):
for i in self.headlessopt:
if i[0]==dr:
return i[1]
def best(self,reload=False):
debugger.debug("Getting best driver")
'''Get best driver'''
if '.cache/weather' not in os.listdir() or reload:
if '.cache/weather' not in os.listdir():
debugger.debug("Could not load data from cache, parsing manually","WARNING")
os.makedirs('.cache/weather',exist_ok=True)
hdlsxst=False
for b in self.browsers:
if self._checkin(self.headlessopt,b):
hdlsxst=True
if not hdlsxst:
warnings.warn(
NoHeadlessWarning(
'''
No headless web driver, browser will open while searching for forecast.
Headless web drivers are: chromedriver (Chrome),geckodriver (Mozilla Firefox),IEDriverServer.exe(Internet Explorer).
Check https://jenca-adam.github.io/projects/weather/docs.html#headless_drivers'''
)
)
for b in self.browsers:
if self._isaval(b):
with open('.cache/weather/browser','w') as f:
f.write(repr(b))
if self._checkin(self.headlessopt,b):
return b(options=self._gethopt(b))
return b()
elif 'browser' not in os.listdir('.cache/weather'):
debugger.debug("Could not load data from cache, parsing manually","WARNING")
hdlsxst=False
for b in self.browsers:
if self._checkin(self.headlessopt,b):
hdlsxst=True
if not hdlsxst:
warnings.warn(
NoHeadlessWarning(
'''
No headless web driver, browser will open while searching for forecast.
Headless web drivers are: chromedriver (Chrome),geckodriver (Mozilla Firefox),IEDriverServer.exe(Internet Explorer).
Check https://jenca-adam.github.io/projects/weather/docs.html#headless_drivers'''
)
)
for b in self.browsers:
if self._isaval(b):
with open('.weather/browser','w') as f:
f.write(repr(b))
if self._checkin(self.headlessopt,b):
return b(options=self._gethopt(b))
return b()
else:
debugger.debug("loading data from cache")
cont=open('.weather/browser').read()
b=self.reprs[cont]
if self._checkin(self.headlessopt,b):
return b(options=self._gethopt(b))
return b()
class _SetterUp:
'''Sets up chromedriver and geckodriver'''
SYSTEM=platform.system()
if SYSTEM=='Windows':
CHROMEDRIVER_URL="https://github.com/jenca-adam/jenca-adam.github.io/raw/master/projects/weather/extras/bin/win/chromedriver.exe"
GECKODRIVER_URL="https://github.com/jenca-adam/jenca-adam.github.io/raw/master/projects/weather/extras/bin/win/geckodriver.exe"
elif SYSTEM == 'Linux':
CHROMEDRIVER_URL="https://github.com/jenca-adam/jenca-adam.github.io/raw/master/projects/weather/extras/bin/linux/chromedriver"
GECKODRIVER_URL="https://github.com/jenca-adam/jenca-adam.github.io/raw/master/projects/weather/extras/bin/linux/geckodriver"
else:
CHROMEDRIVER_URL="https://github.com/jenca-adam/jenca-adam.github.io/raw/master/projects/weather/extras/bin/mac/chromedriver"
GECKODRIVER_URL="https://github.com/jenca-adam/jenca-adam.github.io/raw/master/projects/weather/extras/bin/mac/geckodriver"
HOME=str(Path.home())
INSTALL_DIR=os.path.join(HOME,'.local/bin')
if '.local' not in os.listdir(HOME):
p=os.getcwd()
os.chdir(HOME)
os.makedirs('.local/bin')
os.chdir(p)
elif 'bin' not in os.listdir(os.path.join(HOME,'.local')):
p=os.getcwd()
os.chdir(HOME)
os.makedirs('.local/bin')
os.chdir(p)
if INSTALL_DIR not in os.environ["PATH"].split(os.pathsep):
os.environ["PATH"]+=os.pathsep+INSTALL_DIR
CHROMEDRIVER_INSTALL=os.path.join(INSTALL_DIR,CHROMEDRIVER_URL.split('/')[-1])
GECKODRIVER_INSTALL=os.path.join(INSTALL_DIR,GECKODRIVER_URL.split('/')[-1])
def install_cdr(self):
debugger.debug("Installing chromedriver")
h=Http()
debugger.debug("Downloading chromedriver")
r,content=h.request(self.CHROMEDRIVER_URL)
with open(self.CHROMEDRIVER_INSTALL,'wb')as f:
f.write(content)
os.chmod(self.CHROMEDRIVER_INSTALL,0o777)
def install_gecko(self):
debugger.debug("Installing geckodriver")
h=Http()
r,content=h.request(self.GECKODRIVER_URL)
with open(self.GECKODRIVER_INSTALL,'wb')as f:
f.write(content)
os.chmod(self.GECKODRIVER_INSTALL,0o777)
def setup(self,drivers=['chromedriver','geckodriver']):
debugger.debug("setting up")
if not drivers:
raise SetupError('please specify at least one driver')
chopt=chrome.options.Options()
ffxopt=firefox.options.Options()
chopt.headless=True
ffxopt.headless=True
if 'chromedriver' in drivers:
try:
Chrome(options=chopt)
except:
self._setup(drivers=['chromedriver'])
if 'geckodriver' in drivers:
try:
Firefox(options=ffxopt)
except:
self._setup(drivers=['geckodriver'])
def _setup(self,drivers):
if not drivers:
raise SetupError('please specify at least one driver')
if 'chromedriver' in drivers:
self.install_cdr()
if 'geckodriver' in drivers:
self.install_gecko()
chopt=chrome.options.Options()
ffxopt=firefox.options.Options()
chopt.headless=True
ffxopt.headless=True
try:
Chrome(options=chopt)
except:
try:
Firefox(options=ffxopt)
except:
raise SetupError('''
Please note that weather.setup() works only for Firefox and Chrome for now.
You have 2 options:
1.Install Firefox or Chrome
2.Don't call weather.setup on initializing and install one of the drivers manually.
''')
class _cukor:
'''Advanced list comprehension'''
def cukor(a,b,c,d,e):
result=[]
exec(
f'''for {b} in {c}:
if {d}:
result.append({a})
else:
result.append({e})'''
,{
'result':result})
return result
class parser:
'''Parses temperature and time strings'''
class ParsingError(ValueError):pass
def detectint(self,st):
p=re.compile(r"(\-?\d+)")
match=p.search(st)
if not match:
raise self.ParsingError(
"no int detected"
)
return int(match.group(0))
def is_inaccurate(self,st):
i=self.detectint(st)
return '-' in st
def parse_inaccurate(self,t):
if not self.is_inaccurate(t):
try:
return int(t)
except ValueError:
raise self.ParsingError("not a valid time")
else:
f,l=t.split('-')
try:
f=int(f)
l=int(l)
except ValueError:
raise self.ParsingError("not a valid time")
d=(l-f)//2
if d<0:
raise self.ParsingError(
"not a valid time -- second int is larger than first")
return f+d
class _Avg:
'''get best forecast result'''
SVC=['yrno','google']
def forecast(self,*a,**k):
ress=[]
for service in self.SVC:
debugger.debug(f"running service \"{service}\"")
try:
ress.append( SERVICES[service].forecast(*a,**k))
except KeyboardInterrupt:
raise SetupError("lost connection to service")
except urllib3.exceptions.MaxRetryError:
try:
debugger.debug("Lost connection to the driver,attempting reconnect...","ERROR")
importlib.reload(selenium)
ress.append(SERVICES[service].forecast(*a,**k))
except urllib3.exceptions.MaxRetryError:
raise DriverError(
"""Because of unknown error in Selenium was lost connection to driver.
Consider restarting your script/module"""
)
except WeatherError as e:
if e==NoSuchCityError:
raise
debugger.debug(f'service \"{service}\" does not recognise place',"ERROR")
except selenium.common.exceptions.WebDriverException:
raise DriverError(
"""Could not set up the driver, probably keyboard interrupt?"""
)
except Exception as e:
if e==KeyboardInterrupt:
raise KeyboardInterrupt("interrupted while searching for forecast")
raise
if not ress:
raise WeatherError("could not find service matching your search")
fcast=[]
city=ress[0].city
country=ress[0].country
for i in ress:
dayc=0
for day in i.days:
if len(fcast)<dayc+1:
wdict={}
else:
wdict=fcast[dayc].weather_as_dict
for time,weather in day.weather_as_dict.items():
if time not in wdict:
wdict[time]=weather
else:
olw=wdict[time]
olw.temp=(olw.temp+weather.temp)//2
if weather.humid is not None:
olw.humid=(olw.humid+weather.humid)//2
olw.wind.speed=(olw.wind.speed+weather.wind.speed)/2
olw.wind.direction=Direction((olw.wind.direction.angle+weather.wind.direction.angle)//2)
if type(weather.precip) in [int,float]:
olw.precip=(weather.precip+olw.precip)/2
wdict[time]=olw
fcast.append(yrno.Day(list(wdict.values()),wdict))
return yrno.Forecast(fcast,city,country)
parser=parser()
google=_WeatherChannel()
yrno=_YR_NORI()
setup=_SetterUp()
average=_Avg()
debugger=_debugger()
f7timer=_7Timer()
dumper=_fcdumper()
DEBUG=False
debugger.debug=debugger.debug
cacher=_cacher()
fastener=_fastener()
getcountry=fastener.getcountry
SERVICES={'google':google,'yrno':yrno,'metno':yrno,'7timer':f7timer,'average':average}
def fix(svc):
fxd = difflib.get_close_matches(svc,SERVICES.keys(),n=1,cutoff=0.7)
if len(fxd)>0:
return fxd[0]
else:
return svc
def forecast(cityname=CITY,countryname=None,unit=None,service=average,debug=False):
global DEBUG,selenium
DEBUG=debug
if isinstance(service,str):
try:
service=SERVICES[service]
except KeyError:
afms=""
excm="!"
if fix(service)!=service:
excm="?"
afms=f", did you mean {fix(service)!r}"
raise UnknownServiceError(f'unknown service : {service!r}{afms}{excm}')
debugger.debug("Debugger has started","INFO")
if service==average:
return service.forecast(cityname,countryname,unit)
else:
try:
return service.forecast(cityname,countryname,unit)
except KeyboardInterrupt:
raise SetupError("lost connection to service")
except urllib3.exceptions.MaxRetryError:
debugger.debug("Lost connection to the driver,attempting reconnect...","ERROR")
try:
return service.forecast(cityname,countryname,unit,driver=_driverSearch(throw=True).best())
except urllib3.exceptions.MaxRetryError:
raise DriverError(
"""Because of unknown error in Selenium was lost connection to driver.
Consider restarting your script/module"""
)
except WeatherError as e:
if e==NoSuchCityError:
raise
debugger.debug(f'service \"{service}\" does not recognise place ;{e} raised',"ERROR")
except selenium.common.exceptions.WebDriverException:
raise DriverError(
"""Could not set up the driver, probably keyboard interrupt?"""
)
except Exception as e:
if e==KeyboardInterrupt:
raise KeyboardInterrupt("interrupted while searching for forecast")
raise
class More(object):
def __init__(self, num_lines,debug=False):
self.num_lines = num_lines
self.debug=debug
def __ror__(self, other):
s = str(other).split("\n")
print(*s[:self.num_lines], sep="\n")
for i in range( self.num_lines,len(s)):
print("--MORE--\r",end="")
key=getchlib.getkey()
if key.lower()=='q' :
if not self.debug:
termutils.clear()
quit()
if key in ['\x1b[B','\x1b[6~',' ','\n']:
print(s[i])
time.sleep(0.1)
class CLI:
def main(self):
parser=argparse.ArgumentParser(description='Python app for getting weather forecast')
parser.add_argument('--city',type=str,help='City for forecast (if not passed, using current location)',nargs=1)
parser.add_argument('--country',type=str,help='Country for forecast (see above)',nargs=1)
parser.add_argument('-d','--debug',action='store_true',help='Debug')
parser.add_argument('-s','--service',type=str,help='Service to use (e.g. "yrno","7timer","google"). Implied with "average"(try to optimise the service)')
args=parser.parse_args()
if not args.city:
args.city=[CITY]
if not args.country:
args.country=[None]
if not args.service:
args.service="average"
if not args.debug:
termutils.clear()
print('Loading ...')
foc=forecast(args.city[0],args.country[0],service=args.service,debug=args.debug)
if foc is None:
raise NoSuchCityError(f'no such city :{args.city[0]!r}')
if not args.debug:
termutils.clear()
termcolor.cprint('Weather forecast for',end=' ',color='cyan')
termcolor.cprint(','.join([foc.city,foc.country]),color='yellow')
if isinstance(foc,yrno.Forecast):
source='Yr.no'
elif isinstance(foc,google.Forecast):
source='Google'
elif isinstance(foc,f7timer.Forecast):
source='7timer!'
else:
source=None
lac=2
if source:
print('Source : '+source)
lac+=1
foc2t(foc)|More(num_lines=os.get_terminal_size().lines-lac,debug=args.debug)
cli=CLI()
main=cli.main
if __name__=='__main__':
main()
```
|
{
"source": "jenchen1398/artistic-music-style-transfer",
"score": 3
}
|
#### File: artistic-music-style-transfer/pytorch/domain_confusion.py
```python
import torch
import numpy as np
from torch.nn import functional as F
class DomainCNN(torch.nn.Module):
# input is vector in R^64
def __init__(self, domains):
super(DomainCNN, self).__init__()
# 3 1D convolution layers
self.conv1 = torch.nn.Conv1d(1, 32, kernel_size=5)
self.pool1 = torch.nn.MaxPool1d(kernel_size=2)
self.conv2 = torch.nn.Conv1d(32, 16, kernel_size=5, stride=2)
self.pool2 = torch.nn.MaxPool1d(kernel_size=2, stride=2)
self.conv3 = torch.nn.Conv1d(16, 8, kernel_size=2, stride=2)
self.pool3 = torch.nn.MaxPool1d(kernel_size=2, stride=1)
# last layer projects vectors to dimension k
# average the vectors to obtain a single vector of dim k
self.fc1 = torch.nn.Linear(8*2, domains)
def forward(self, x):
x = F.elu(self.conv1(x))
x = self.pool1(x)
x = F.elu(self.conv2(x))
x = self.pool2(x)
x = F.elu(self.conv3(x))
x = self.pool3(x)
# reshape
x = x.view(-1, 8*2)
m = torch.nn.Softmax(1)
x = m(self.fc1(x))
return x
class DomainLoss(torch.nn.Module):
def __init__(self, num_classes):
super(DomainLoss, self).__init__()
self.num_classes = num_classes
def forward(self, outputs, targets):
"""param outputs: k dimension outputs from confusion network """
# TODO FIX THIS HAHA
loss = torch.nn.CrossEntropyLoss(outputs, targets)
loss = loss.sum / loss.shape[1]
```
|
{
"source": "jenchen1398/ccigan",
"score": 3
}
|
#### File: ccigan/code/ds.py
```python
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
class MIBIDataset(Dataset):
def __init__(self, data, transform=None):
"""
Parameters: data is of shape(n_cells, img_h, img_w, n_channels)
"""
self.data = data
self.transform = transforms.Compose([transforms.ToTensor()])
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
seg = self.data[idx][0]
real = self.data[idx][1]
seg = self.transform(seg)
real = self.transform(real)
return seg, real
```
|
{
"source": "jencmart/mff-statisticky-strojovy-preklad",
"score": 2
}
|
#### File: jencmart/mff-statisticky-strojovy-preklad/apply.py
```python
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import re
import subprocess
import tempfile
from collections import defaultdict
class G2PModelTester():
"""G2P Model training wrapper class.
Phonetisaurus G2P modeling training wrapper class.
This wraps the alignment, joint n-gram training, and ARPA to
WFST conversion steps into one command.
"""
def __init__(self, model, **kwargs):
self.model = model
self.lexicon_file = kwargs.get("lexicon", None)
self.nbest = kwargs.get("nbest", 1)
self.thresh = kwargs.get("thresh", 99)
self.beam = kwargs.get("beam", 10000)
self.greedy = kwargs.get("greedy", False)
self.accumulate = kwargs.get("accumulate", False)
self.pmass = kwargs.get("pmass", 0.0)
self.probs = kwargs.get("probs", False)
self.verbose = kwargs.get("verbose", False)
self.logger = self.setupLogger()
self.gsep = kwargs.get("gsep", "")
def setupLogger(self):
"""Setup the logger and logging level.
Setup the logger and logging level. We only support
verbose and non-verbose mode.
Args:
verbose (bool): Verbose mode, or not.
Returns:
Logger: A configured logger instance.
"""
level = logging.DEBUG if self.verbose else logging.INFO
logging.basicConfig(
level=level,
format="\033[94m%(levelname)s:%(name)s:" \
"%(asctime)s\033[0m: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S"
)
return logging.getLogger("phonetisaurus-apply")
def _loadLexicon(self):
"""Load the lexicon from a file.
Load the reference lexicon from a file, and store it
in a defaultdict (list).
"""
_lexicon = defaultdict(list)
if not self.lexicon_file:
return _lexicon
self.logger.debug("Loading lexicon from file...")
with open(self.lexicon_file, "r") as ifp:
for line in ifp:
# py2py3 compatbility,
if sys.version_info[0] < 3:
line = line.decode("utf8").strip()
else:
line = line.strip()
word, pron = re.split(r"\t", line, 1)
_lexicon[word].append(pron)
return _lexicon
def checkPhonetisaurusConfig(self):
"""Run some basic checks before training.
Run some basic checks regarding the $PATH, environment,
and provided data before starting training.
Raises:
EnvironmentError: raised if binaries are not found.
"""
self.logger.debug("Checking command configuration...")
for program in ["phonetisaurus-g2pfst"]:
if not self.which(program):
raise EnvironmentError("Phonetisaurus command, '{0}', " \
"not found in path.".format(program))
if self.lexicon_file and not os.path.exists(self.lexicon_file):
self.logger.error("Could not find provided lexicon file.")
sys.exit(1)
for key, val in sorted(vars(self).items()):
self.logger.debug(u"{0}: {1}".format(key, val))
self.lexicon = self._loadLexicon()
return
def which(self, program):
"""Basic 'which' implementation for python.
Basic 'which' implementation for python from stackoverflow:
* https://stackoverflow.com/a/377028/6739158
Args:
program (str): The program name to search the $PATH for.
Returns:
path/None: The path to the executable, or None.
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def makeG2PCommand(self, word_list):
"""Build the G2P command.
Build the G2P command from the provided arguments.
Returns:
list: The command in subprocess list format.
"""
command = [
u"phonetisaurus-g2pfst",
u"--model={0}".format(self.model),
u"--nbest={0}".format(self.nbest),
u"--beam={0}".format(self.beam),
u"--thresh={0}".format(self.thresh),
u"--accumulate={0}".format(str(self.accumulate).lower()),
u"--pmass={0}".format(self.pmass),
u"--nlog_probs={0}".format(str(not self.probs).lower()),
u"--wordlist={0}".format(word_list),
u"--gsep={0}".format(self.gsep),
]
self.logger.debug(u" ".join(command))
return command
def runG2PCommand(self, word_list_file):
"""Generate and run the actual G2P command.
Generate and run the actual G2P command. Each synthesized
entry will be yielded back on-the-fly via the subprocess
stdout readline method.
Args:
word_list_file (str): The input word list.
"""
g2p_command = self.makeG2PCommand(word_list_file)
self.logger.debug("Applying G2P model...")
with open(os.devnull, "w") as devnull:
proc = subprocess.Popen(
g2p_command,
stdout=subprocess.PIPE,
stderr=devnull if not self.verbose else None
)
for line in proc.stdout:
parts = re.split(r"\t", line.decode("utf8").strip())
if not len(parts) == 3:
self.logger.warning(
u"No pronunciation for word: '{0}'".format(parts[0])
)
continue
yield parts
return
def applyG2POnly(self, word_list_file):
self.checkPhonetisaurusConfig()
if not os.path.exists(word_list_file) \
or not os.path.isfile(word_list_file):
raise IOError("Word list file not found.")
results = []
for word, score, pron in self.runG2PCommand(word_list_file):
results.append((word, float(score), pron))
return results
def applyG2PWithLexicon(self, word_list_file):
"""Apply the G2P model to a word list, combined with lexicon.
Apply the G2P model to a word list, but combine this with
a reference lexicon. Words for which a reference entry exists
will not be sent to the G2P, unless the additional '--greedy'
flag is set to True.
Args:
word_list_file (str): The input word list.
"""
target_lexicon = defaultdict(list)
tmpwordlist = tempfile.NamedTemporaryFile(mode='w', delete=False)
# First, find any words in the target_meta list for which we already
# have a canonical pronunciation in the reference lexicon.
with open(word_list_file, "r") as ifp:
for word in ifp:
# py2py3 compatbility,
if sys.version_info[0] < 3:
word = word.decode("utf8").strip()
else:
word = word.strip() # already in 'utf8'.
if word in self.lexicon:
target_lexicon[word] = [(0.0, pron)
for pron in self.lexicon[word]]
# In greedy mode we still send words to the G2P, even
# if we have canonical entries in the reference lexicon.
if self.greedy:
print(word.encode("utf8"), file=tmpwordlist)
else:
# py2py3 compatbility,
if sys.version_info[0] < 3:
print(word.encode("utf8"), file=tmpwordlist)
else:
print(word, file=tmpwordlist)
tmpwordlist.close()
# Second, iterate through the G2P output, and filter against
# any possible duplicates previously found in the reference lexicon.
for word, score, pron in self.runG2PCommand(tmpwordlist.name):
prons = set([p for s, p in target_lexicon[word]])
if pron in prons:
continue
target_lexicon[word].append((score, pron))
# Finally, sort everything that is left and print it.
for word in sorted(target_lexicon.keys()):
for score, pron in target_lexicon[word]:
line = u""
if self.verbose:
line = u"{0}\t{1:.2f}\t{2}".format(
word, float(score), pron
)
else:
line = u"{0}\t{1}".format(word, pron)
# py2py3 compatbility,
if sys.version_info[0] < 3:
print(line.encode("utf8"))
else:
print(line)
os.unlink(tmpwordlist.name)
return
def ApplyG2PModel(self, word_list_file):
self.checkPhonetisaurusConfig()
if not os.path.exists(word_list_file) \
or not os.path.isfile(word_list_file):
raise IOError("Word list file not found.")
if len(self.lexicon) == 0:
return self.applyG2POnly(word_list_file)
else:
self.applyG2PWithLexicon(word_list_file)
return
if __name__ == "__main__":
import sys, argparse
example = "{0} --model train/model.fst --word test".format(sys.argv[0])
parser = argparse.ArgumentParser(description=example)
parser.add_argument("--model", "-m", help="Phonetisaurus G2P fst model.", required=True)
parser.add_argument("--lexicon", "-l", help="Optional reference lexicon.", required=False)
parser.add_argument("--nbest", "-n", help="Maximum number of hypotheses to produce. Overridden if --pmass is set.",
default=1, type=int)
parser.add_argument("--beam", "-b", help="Search 'beam'.", default=10000, type=int)
parser.add_argument("--thresh", "-t", help="Pruning threshold for n-best.", default=99.0, type=float)
parser.add_argument("--greedy", "-g", help="Use the G2P even if a reference lexicon has been provided.",
default=False, action="store_true")
parser.add_argument("--accumulate", "-a", help="Accumulate probabilities across unique pronunciations.",
default=False, action="store_true")
parser.add_argument("--pmass", "-p",
help="Select the maximum number of hypotheses summing to P total mass for a word.", default=0.0,
type=float)
parser.add_argument("--probs", "-pr", help="Print exp(-val) instead of default -log values.", default=False,
action="store_true")
parser.add_argument("--word_list", "-wl", help="Input word or word list to apply ""G2P model to.", type=str)
parser.add_argument("--verbose", "-v", help="Verbose mode.", default=False, action="store_true")
parser.add_argument("--gsep", help="separator of 'graphemes', default: ''")
args = parser.parse_args()
tester = G2PModelTester(
args.model,
**{key: val for key, val in args.__dict__.items()
if not key in ["model", "word_list"]}
)
tester.ApplyG2PModel(args.word_list)
```
#### File: mff-statisticky-strojovy-preklad/generators/factory.py
```python
from generators.base_generator import SentenceGenerator
class GeneratorFactory:
@staticmethod
def get_generator_class(name, *argv):
generators_names = [cls.__name__ for cls in SentenceGenerator.__subclasses__()]
generator_classes = SentenceGenerator.__subclasses__()
assert name in generators_names, "Generator '{}' not found. Available: {}".format(name, generators_names)
i = generators_names.index(name)
return generator_classes[i](*argv)
```
#### File: jencmart/mff-statisticky-strojovy-preklad/sentence_graph.py
```python
import logging
import math
import random
import re
import nltk
from nltk.tokenize import word_tokenize
import logger
from noise.noise_phoneme import NoiseFromP2G
from utils import choice
class SamplingGraph:
def __init__(self, noise_generator, error_prob, max_M, sampling_M, sampling_N, sampling_error_samples):
self.logger = logging.getLogger(logger.BasicLogger.logger_name)
self.noise_generator = noise_generator
try:
nltk.data.find('tokenizers/punkt')
except:
self.logger.warning('downloading nltk tokenizer punkt...')
nltk.download('punkt')
assert 0 < error_prob < 1, "error prob must be in (0,1)"
assert max_M >= 1, "1:N errors are smallest possible option"
assert sampling_M in ['weighted', 'uniform'], "Sampling M must be 'weighted' or 'uniform'"
assert sampling_N in ['weighted', 'uniform'], "Sampling N must be 'weighted' or 'uniform'"
# Parameters for the sampling of the errors
self.max_M = max_M
self.sampling_M = sampling_M
self.sampling_N = sampling_N
self.error_prob = error_prob
self.sampling_error_samples = sampling_error_samples
def tokenize_sentence(self, sentence):
assert isinstance(sentence, str), "sentence must be non-empty string"
assert len(sentence) > 0, "sentence must be non-empty string"
# replace typographic stuff
sentence = sentence.replace("’", "'")
sentence = sentence.replace('“', '"')
sentence = sentence.replace('”', '"')
sentence = sentence.replace('“', '"')
# tokenize sentence and extract alpha words + their position in the sentence
words = word_tokenize(sentence)
self.original_words = words
self.words_alpha_pos = list([])
for i, word in enumerate(words):
if word.isalpha():
self.words_alpha_pos.append((word.lower(), "", i))
# if we use phoneme model, we must generate pronunciation for alpha words
if self.noise_generator.get_name() == 'phoneme':
pronunciation = self.noise_generator.generate_pronunciation(self.words_alpha_pos)
for i, result in enumerate(pronunciation):
w, _, idx = self.words_alpha_pos[i]
self.words_alpha_pos[i] = (w, result[2], idx)
def build_base_graph(self):
self.end_node = Node((".", ".", len(self.original_words)), len(self.words_alpha_pos) + 1, from_M=-2)
self.start_node = Node(("<eps>", "<eps>", -1), 0, from_M=-1)
self.map_start_word_level_id__node = {}
for i in range(0, len(self.words_alpha_pos) + 2):
self.map_start_word_level_id__node[i] = []
self.map_start_word_level_id__node[0].append(self.start_node)
self.map_start_word_level_id__node[self.end_node.level_id].append(self.end_node)
def set_sentence(self, sentence):
"""
Weighted Oriented Acyclic graph is build from the sentence
Each node represent one error instance
All positions of the errors are considered using window=1
Nodes of errors are connected s.t. only correct sentences are constructed
Weights between noedes represents probability of the erro
:param sentence:
:return:
"""
# 1. TOKENIZE SENTENCE
self.tokenize_sentence(sentence)
# 2. BUILD "CORRECT SENTENCE" GRAPH
# sentence: Hello, how are you
# graph: <eps>:0 --> HELLO:1 --> HOW:2 --> ARE:3, YOU:4 --> '.':5
self.build_base_graph()
# 3. BUILD REST OF THE GRAPH
self.__build_noise_graph(self.words_alpha_pos)
# 4. CREATE ERROR SAMPLES
# { node_id : {to_n : [(error, score)] } }
error_samples = self.noise_generator.generate_noise(self.list_sampling_nodes)
for node_id, errors in error_samples.items():
for to_N, err_score_list in errors.items():
for err_score in err_score_list:
error_word, score = err_score
self.map_id_sampling_node[node_id].add_sample(to_N, error_word, score)
def sample_sentence(self):
"""
Example of how the final sentence is constructed:
0 1 2 3 4 5 6 7 8
original sentence: Hello THIS , is 123 welcome ??? there !
0 1 3 5 7
alpha sentence : Hello THIS is welcome there
0 1 3 5 7
synth sentence : [hellish ] [is this] [welcomere]
0 1 2 3 4 5 7 8
final sentecnce : [Hellish] , [is this] 123 [welcomere] !
we can see, that non alpha token '???' at position 6 in the original sentence have to be omitted
also the letter casing is forgotten
and finally, spacing between alpha and non-alpha characters (like punctuation symbols) will also be different
"""
current_node = self.start_node
debug_sentence = []
synth_sentence = []
while True:
if current_node.from_M_variant == -2: # ... '.'
# sentence.append(".")
break
if current_node.from_M_variant == -1:
pass # sentence.append("<eps> ")
elif current_node.from_M_variant == 0: # original word
debug_sentence.append(current_node.word + " ")
else: # sampled word ...
# sentence.append(">" + current_node.word + "<[{}] ".format(current_node.variant))
# we are sampling now ....
to_N_parts = current_node.select_N(self.sampling_N)
sampled_word = current_node.sample_word(to_N_parts, self.sampling_error_samples)
synth_sentence.append((sampled_word, current_node.sentence_start_idx, current_node.from_M_variant))
debug_sentence.append(">" + sampled_word + "<[{}->{}] ".format(current_node.from_M_variant,
to_N_parts)) # current_node.variant
# current_node.display()
if len(current_node.neighbours) == 1:
current_node = current_node.neighbours[0]
else:
current_node = choice(current_node.neighbours, current_node.weights)
final_sentence = []
last_idx_in = -1
for synth_word in synth_sentence:
word, start_idx, cnt_words = synth_word
word = word.replace(".", "")
if start_idx == 0:
word = word.capitalize()
# fill in missing non-alpha words
while 1 + last_idx_in < start_idx:
last_idx_in += 1
final_sentence.append(self.original_words[last_idx_in])
# we fill current word
assert start_idx == 1 + last_idx_in, "safety check"
final_sentence.append(word)
# wrong... last_idx_in = last_idx_in + cnt_words # from 1 , from 2 ...
# because we can occasionally skip " this && is " => "thisis" ] and we need to check this ... here
# ( we actually moved 3 words forward, not 2 )
# we check it against the index in the alpha sentence
for final_idx, w_p_idx in enumerate(self.words_alpha_pos):
_, _, orig_idx = w_p_idx
if orig_idx == start_idx:
_, _, true_idx = self.words_alpha_pos[final_idx + cnt_words - 1]
last_idx_in = true_idx
# synth word: welcomere(5)
# words alpha pos: welcome(5) there(7)
# last indx in : not 6
# but ... .... 7
break
# fill the end of the sentence
while last_idx_in + 1 < len(self.original_words):
last_idx_in += 1
final_sentence.append(self.original_words[last_idx_in])
final_sentence = " ".join(final_sentence)
# fix [ ( {
final_sentence = re.sub(r'\s*([(\[{])\s*', r' \1', final_sentence) # "is ( maybe ) good" -> "is (maybe ) good"
# fix } ) }
final_sentence = re.sub(r'\s*([)\]\}])\s*', r'\1 ', final_sentence) # "is (maybe ) good" -> "is (maybe) good"
# fix - @
final_sentence = re.sub(r'\s*([@])\s*', r'\1', final_sentence) # "hello - kitty" -> "hello-kitty"
# fix , . ; : ! ? % $
final_sentence = re.sub(r'\s([?,.;!:%$](?:\s|$))', r'\1', final_sentence) # " hello , Peter" -> hello, Peter
# fix ``
# final_sentence = re.sub(r'\s*(``)\s*', r' \1', final_sentence)
final_sentence = re.sub(r"(``)\s", r'"', final_sentence)
# fix ''
final_sentence = re.sub(r"\s(''(?:\s|$))", r'" ', final_sentence)
final_sentence = final_sentence.strip()
# def remove_s(final_sentence, sym):
# p1 = -1
# for i, s in enumerate(final_sentence):
# if s == sym:
# if p1 == -1:
# if i + 1 < len(final_sentence):
# if final_sentence[i + 1] == " ":
# if i == 0 or final_sentence[i - 1] == " ":
# p1 = i
# continue
# if p1 != -1:
# if final_sentence[i-1] == " " and i-2 != p1:
# final_sentence = final_sentence[0:p1+1] + final_sentence[p1+2: i-1] + final_sentence[i:]
# else:
# final_sentence = final_sentence[0:p1 + 1] + final_sentence[p1 + 2: ]
# break
# return final_sentence
#
# cnt = final_sentence.count('"')
# for i in range(math.floor(cnt/2)):
# final_sentence = remove_s(final_sentence, '"')
# 012345678
# A " B " C
# 2 6
# [0:3]
debug_sentence = "\t".join(debug_sentence)
return debug_sentence, final_sentence
def __create_arc_weight(self, node, level):
if node.from_M_variant == -1: # end node...
return 1
elif node.from_M_variant == 0: # 'original word'
return 1 - self.error_prob
elif len(self.map_start_word_level_id__node[level]) == 1:
return 1
else:
cnt_on_level = len(self.map_start_word_level_id__node[level])
if self.sampling_M == 'uniform':
normed_weight = 1
else:
# 1:N -- weight = max_M - 1 + 1 = 3
# 2:N -- weight = max_M - 2 + 1 = 2
# 3:N -- weight = max_M - 2 + 1 = 1
weight = self.max_M - node.from_M_variant + 1
sum = self.max_M * (self.max_M + 1) / 2
normed_weight = weight / sum
# error_prob is distributed between different types of errors
prob = self.error_prob / (cnt_on_level - 1) * normed_weight # -1 for the original word ...
return prob
def __build_noise_graph(self, words):
"""
nodes for errors are build
1:1 ... 1:N
2:1 ... 2:N
... ... ...
M:1 ... M:N
Edges are build s.t. it creates correct sentence
:param words:
:return:
"""
self.map_id_sampling_node = {}
self.max_id = 0
self.list_sampling_nodes = []
for i in range(len(words) - 1, -1, -1):
# because nodes ends with the same word, they all have same successors
# on the other hand, they differ by the "start" level
# A. 1:1 error
list_new_nodes = [Node(words[i], level_id=i + 1, from_M=0)]
# B. All other word 1:x 2:x ...
for mapping in range(self.max_M):
# mapping is typically 1:x, 2:x
if i - mapping < 0: # prevent underflow
break
tmp_word = "" # 0 .... words[i - 0] , 1 ... words[i-1] words[i - 0]
tmp_word_phonemes = ""
first_idx = None
for j in range(mapping, -1, -1):
if j < mapping:
tmp_word += " "
tmp_word_phonemes += " "
w, p, sentence_idx = words[i - j]
if first_idx is None:
first_idx = sentence_idx
tmp_word += w
tmp_word_phonemes += p
word = (tmp_word, tmp_word_phonemes, first_idx)
_new_node = Node(word, level_id=i + 1 - mapping, from_M=1 + mapping, sampling_id=self.max_id)
list_new_nodes.append(_new_node)
self.map_id_sampling_node[self.max_id] = _new_node
self.list_sampling_nodes.append(_new_node)
self.max_id += 1
# C. Add all successors to all new nodes
for successor in self.map_start_word_level_id__node[i + 2]:
for new_node in list_new_nodes:
weight = self.__create_arc_weight(successor, level=i + 2)
new_node.add_neighbour(successor, weight) # new_node ----weight---> successor
# D. Add all new nodes to the map
for new_node in list_new_nodes:
self.map_start_word_level_id__node[new_node.level_id].append(new_node)
# finally, when all nodes are set
# add arcs from <eps> to all nodes representing start of the sentence
for successor in self.map_start_word_level_id__node[1]:
weight = self.__create_arc_weight(successor, level=1)
self.start_node.add_neighbour(successor, weight)
class Node:
def __init__(self, word, level_id, from_M, sampling_id=0): # word(s), already as the list of phonemes
# word stuff
self.from_M_variant = from_M # -2=end, -1=start, 0=original ; 1=1:N ; 2=2:N 3=3:N ...
self.word, self.phonemes, self.sentence_start_idx = word
self.target_samples = {} # M to 1: 2: 3: # {1: [("heck", 0.221), ("hrrck", 0.4), ("hrrrr", 0.17)], 2: [("he hr", 0.01)], 3: []}
self.cnt_samples = 0 # we start with zero samples ....
self.sampling_id = sampling_id
# node stuff
self.level_id = level_id
self.neighbours = []
self.weights = []
self.set_word = set(self.word.split())
self.logger = logging.getLogger(logger.BasicLogger.logger_name)
def select_N(self, sampling_N):
# select N
# we do it weighted.... the higher the N, the lower the probability
if len(self.target_samples) == 0:
# self.logger.warning("for id:{} word:'{}' empty targets, returning -1".format(self.sampling_id, self.word))
return -1
N_list = list(self.target_samples.keys()) # 1 2 4 ... 7
if sampling_N == 'uniform':
i = random.randint(0, len(N_list) - 1)
N = N_list[i]
else:
_sum = sum(N_list)
weights = [_sum - v + 1 for v in N_list]
N = choice(N_list, weights)
return N
def sample_word(self, to_N, sampling_error_samples):
# empty targets...
if to_N == -1:
return self.word
# print("word: {}".format(self.word))
if to_N in self.target_samples:
possibilities = self.target_samples[to_N]
if sampling_error_samples == 'uniform':
i = random.randint(0, len(possibilities) - 1)
return possibilities[i]
# sample word by probability ....
wrds = []
weights = []
for poss in possibilities:
w, p = poss
wrds.append(w)
weights.append(1 / p)
# if self.word not in w.split():
# narrower.append((w, p))
# print(poss)
weights = [float(i) / max(weights) for i in weights]
selected = choice(wrds, weights)
return selected
# this won't happen is N i selected by Node function select_N
else:
self.logger.warning(
"for id:{} word:'{}' target_meta to_N[{}] not available".format(self.sampling_id, self.word, to_N))
self.logger.warning("{}".format(self.target_samples))
# Just select randomly some other [available] key...
N_list = list(self.target_samples.keys())
i = random.randint(0, len(N_list) - 1)
N = N_list[i]
return self.sample_word(N, sampling_error_samples)
def add_sample(self, to_N, synthetised_word, probability):
probability = float(probability)
# prevent same
if synthetised_word == self.word:
return
synth_set = set(synthetised_word.split())
if len(self.set_word.intersection(synth_set)): # maybe too restricting ... ?
return
# print("adding synthetised word to id: {}, word: '{}' , sampled_word: {}".format(self.sampling_id, self.word, synthetised_word))
# add synthetised word
if to_N in self.target_samples:
self.target_samples[to_N].append((synthetised_word, probability))
else:
self.target_samples[to_N] = [(synthetised_word, probability)]
# one more sample
self.cnt_samples += 1
def add_neighbour(self, node, weight):
self.neighbours.append(node)
self.weights.append(weight)
def display(self):
print(self.word)
for k in range(len(self.neighbours)):
neig = self.neighbours[k]
neig_val = self.weights[k]
print("\t p:{:.2f}, w: {}".format(neig_val, neig.word))
```
#### File: mff-statisticky-strojovy-preklad/train_dictionary/train_util.py
```python
import os
import pickle
def pickle_data(obj, target_dir, filename):
if not os.path.exists(target_dir):
print("Target dir: '{}' not found, creating it...".format(target_dir))
os.makedirs(target_dir)
path = os.path.join(target_dir, filename)
print("Writing pickle: {}".format(path))
with open(path, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def get_list_of_words(source_file):
words = []
with open(source_file, 'r') as f:
for line in f:
word = line.strip().lower()
words.append(word)
print("All words loaded from {}".format(source_file))
return words
```
#### File: jencmart/mff-statisticky-strojovy-preklad/utils.py
```python
import bisect
import random
def cdf(weights):
total = sum(weights)
result = []
cumsum = 0
for w in weights:
cumsum += w
result.append(cumsum / total)
return result
def choice(population, weights):
assert len(population) == len(weights)
cdf_vals = cdf(weights)
x = random.random()
idx = bisect.bisect(cdf_vals, x)
return population[idx]
```
|
{
"source": "jendas1/poly-classifier",
"score": 2
}
|
#### File: poly-classifier/poly_classifier/rooted_poly_decider.py
```python
import math
import networkx
from rooted_tree_classifier.log_decider import isFlexible
def get_labels(configurations):
labels = set()
for conf in configurations:
for label in conf:
labels.add(label)
return labels
def trim(labels, configurations):
# trim outputs a subset of labels that can label any sufficiently large Δ-regular tree
# lemma 5.28 in the paper
while True:
new_labels = get_new_labels(labels, configurations)
assert not (set(new_labels) - set(labels)
) # trimming labels should not introduce any new labels
if set(new_labels) == set(labels):
break
else:
labels = new_labels
return labels
def get_new_labels(old_labels, configurations):
new_labels = set()
for conf in configurations:
pot_label = conf[0]
if pot_label not in old_labels:
continue
ok = True
for cont_label in conf[1:]:
if cont_label not in old_labels:
ok = False
break
if ok:
new_labels.add(pot_label)
return new_labels
def create_graph(labels, configurations):
graph = {label: [] for label in labels}
for conf in configurations:
head = conf[0]
if head in labels:
for tail in conf[1:]:
if tail in labels:
graph[head].append(tail)
return graph
def flexible_scc_restrictions(labels, configurations):
# output: list of all label restrictions
# lemma 5.29 in the paper
# create automaton M
graph = create_graph(labels, configurations)
# find all strongly connected component
nxgraph = networkx.to_networkx_graph(graph, create_using=networkx.DiGraph)
flexible_restrictions = []
for component in networkx.strongly_connected_components(nxgraph):
representative = list(component)[0]
if isFlexible(graph, representative):
flexible_restrictions.append(component)
return flexible_restrictions
def max_depth(labels, configurations):
if not labels:
return 0
maximum = 0
for flexible_restriction in flexible_scc_restrictions(
labels, configurations):
if labels - flexible_restriction: # if we removed something
depth = max_depth(trim(flexible_restriction, configurations),
configurations)
maximum = max(maximum, depth)
else:
return math.inf
return 1 + maximum
def rooted_polynomial_classifier(configurations):
labels = get_labels(configurations)
return max_depth(trim(labels, configurations), configurations)
```
|
{
"source": "jendas1/rooted-tree-classifier",
"score": 3
}
|
#### File: rooted-tree-classifier/rooted_tree_classifier/constant_decider.py
```python
import sys
if __name__ == "__main__":
from common import powerset
from log_star_decider import _is_log_star_solvable
else:
from .common import powerset
from .log_star_decider import _is_log_star_solvable
from .constant_synthesizer import find_algorithm
VERBOSE = False
def is_constant_solvable(constraints):
labels = set("".join(constraints))
for reduced_labels in powerset(labels):
reduced_constraints = [constraint for constraint in constraints if
(constraint[0] in reduced_labels and constraint[1] in reduced_labels and constraint[
2] in reduced_labels)]
for label in reduced_labels:
for constraint in reduced_constraints:
if constraint.startswith(label + label) or constraint.endswith(label + label):
if _is_log_star_solvable(reduced_constraints, list(reduced_labels), label):
if VERBOSE:
find_algorithm(reduced_constraints)
return True
return False
if __name__ == "__main__":
if len(sys.argv) == 2 and (sys.argv[1] == "-v" or sys.argv[1] == "--verbose"):
VERBOSE = True
constraints = input().split()
if is_constant_solvable(constraints):
print("O(1)")
else:
print("ω(1)")
```
#### File: rooted-tree-classifier/tests/tests.py
```python
import unittest, subprocess, sys
from io import StringIO
from unittest.mock import patch
from rooted_tree_classifier import decide_complexity
class TestE2E(unittest.TestCase):
def testDeciderProblem1(self):
result = subprocess.run([sys.executable, '-m', 'rooted_tree_classifier'], input=b"111", capture_output=True)
lines = str(result.stdout.decode('utf-8')).split('\n')
self.assertEqual(len(lines), 2)
self.assertEqual(lines[0], "O(1)")
self.assertEqual(lines[1], "")
def testDeciderProblem2(self):
result = subprocess.run([sys.executable, '-m', 'rooted_tree_classifier'], input=b"121 123 212 131 323 454", capture_output=True)
lines = str(result.stdout.decode('utf-8')).split('\n')
self.assertEqual(len(lines), 2)
self.assertEqual(lines[0], "Θ(log*n)")
self.assertEqual(lines[1], "")
def testDeciderProblem3(self):
result = subprocess.run([sys.executable, '-m', 'rooted_tree_classifier'], input=b"454", capture_output=True)
lines = str(result.stdout.decode('utf-8')).split('\n')
self.assertEqual(len(lines), 2)
self.assertEqual(lines[0], "unsolvable")
self.assertEqual(lines[1], "")
def testDeciderProblem4(self):
result = subprocess.run([sys.executable, '-m', 'rooted_tree_classifier'], input=b"1M1 010 M11 M01", capture_output=True)
lines = str(result.stdout.decode('utf-8')).split('\n')
self.assertEqual(len(lines), 2)
self.assertEqual(lines[0], "O(1)")
self.assertEqual(lines[1], "")
def testDeciderProblem5(self):
result = subprocess.run([sys.executable, '-m', 'rooted_tree_classifier'], input=b"121 112 212", capture_output=True)
lines = str(result.stdout.decode('utf-8')).split('\n')
self.assertEqual(len(lines), 2)
self.assertEqual(lines[0], "Θ(log n)")
self.assertEqual(lines[1], "")
def testDeciderProblem6(self):
result = subprocess.run([sys.executable, '-m', 'rooted_tree_classifier'], input=b"212 122 111", capture_output=True)
lines = str(result.stdout.decode('utf-8')).split('\n')
self.assertEqual(len(lines), 2)
self.assertEqual(lines[0], "O(1)")
self.assertEqual(lines[1], "")
def testDeciderProblem7(self):
with patch('sys.stdout', new=StringIO()) as fakeOutput:
decide_complexity("212 313 323 131 1x1 xx1".split())
self.assertEqual(fakeOutput.getvalue().strip(), 'Θ(log*n)')
# (1:22)
# (1:2x)
# (1:xx)
# (2:11)
# (2:1x)
# (2:xx)
# (x:1a)
# (x:2a)
# (x:xa)
# (x:aa)
# (a:bb)
# (b:aa)
def testDeciderProblem8(self):
with patch('sys.stdout', new=StringIO()) as fakeOutput:
decide_complexity("212 21x x1x 121 12x x2x 1xa 2xa axa bab aba".split())
self.assertEqual(fakeOutput.getvalue().strip(), 'Θ(n^(1/2))')
def testDeciderProblem9(self):
with patch('sys.stdout', new=StringIO()) as fakeOutput:
decide_complexity("212 121 12x 1xa bab aba".split())
self.assertEqual(fakeOutput.getvalue().strip(), 'Θ(n^(1/2))')
def testDecider1(self):
result = subprocess.run([sys.executable, '-m', 'rooted_tree_classifier'], input=b"111", capture_output=True)
lines = str(result.stdout.decode('utf-8')).split('\n')
self.assertEqual(len(lines), 2)
self.assertEqual(lines[0], "O(1)")
self.assertEqual(lines[1], "")
def testDecider2(self):
result = subprocess.run([sys.executable, '-m', 'rooted_tree_classifier'], input=b"121 212", capture_output=True)
lines = str(result.stdout.decode('utf-8')).split('\n')
self.assertEqual(len(lines), 2)
self.assertEqual(lines[0], "Θ(n)")
self.assertEqual(lines[1], "")
```
|
{
"source": "jendelel/codenames",
"score": 3
}
|
#### File: codenames/notebooks/environment.py
```python
import numpy as np
from enum import Enum
from itertools import chain, combinations
class Team(Enum):
BLUE = 1
RED = 2
class State:
def __init__(self, blue, red, assasin=set(), neutral=set()):
self.blue = blue
self.red = red
self.assasin = assasin
self.neutral = neutral
@property
def word_sets(self):
yield self.blue
yield self.red
yield self.assasin
yield self.neutral
@property
def words(self):
for word_set in self.word_sets:
for word in word_set:
yield word
@property
def hidden_str(self):
return str(list(self.words))
@property
def truth_str(self):
return """
Blue:\t\t{},
Red:\t\t{},
Assasin:\t{},
Neutral:\t{}
""".format(self.blue, self.red, self.assasin, self.neutral)
class Clue:
def __init__(self, word, number, words_meant=set()):
self.word = word
self.number = number
self.words_meant = words_meant
def __str__(self):
return '{} ({}, {})'.format(self.word, self.number, self.words_meant)
def __repr__(self):
return self.__str__()
class DistanceGuesser:
def __init__(self, word_vectors, vocab, card_words):
self.word_vectors = word_vectors
self.vocab = vocab
self.card_words = card_words
def _distance(self, a, b):
return self.word_vectors.distance(a, b)
def guess(self, state, clue, iteration, team=Team.BLUE):
positive_words = state.blue if team == Team.BLUE else state.red
min_word = None
min_loss = np.inf
for guess_word in state.words:
loss = self._distance(clue.word, guess_word)
if loss < min_loss:
print(" Guess attempt:", guess_word, loss)
min_loss = loss
min_word = guess_word
return min_word
class DistanceMaster:
def __init__(self, word_vectors, vocab, card_words):
self.word_vectors = word_vectors
self.vocab = vocab
self.card_words = card_words
self.mem = {}
@staticmethod
def _powerset(iterable):
"""powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"""
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
def give_clue(self, state, team=Team.BLUE):
positive_words = state.blue if team == Team.BLUE else state.red
results = []
for subset in DistanceMaster._powerset(positive_words):
if len(subset) < 1:
continue
#words_meant = list(positive_words)
words_meant = list(subset)
#print(" Words meant:", words_meant)
neg_words = set(state.words) - set(words_meant)
clue = Clue(word=self._find_clue_word(state, words_meant, team=team), number=len(words_meant), words_meant=words_meant)
#loss = self._loss(state, clue.word, team=team) # expected loss
loss = self._loss2(words_meant, neg_words, clue.word)
results.append((loss, clue))
#print(" Clue chosen:", clue, loss)
results = sorted(results, key=lambda x: x[0])
result_clue = results[0][1]
return result_clue
def _distance(self, a, b):
if b < a:
a, b = b, a
if a not in self.mem:
self.mem[a] = {b: self.word_vectors.distance(a, b)}
elif b not in self.mem[a]:
self.mem[a][b] = self.word_vectors.distance(a, b)
return self.mem[a][b]
def _loss(self, state, clue_word, team=Team.BLUE, c=2):
# 1. loss is minimized
# 2. if blue, subtract distance to blue words with weight c, add distance to red words with weight c, add distance to negative words with weight 1, add distance to assasin with weight 10
# 3. if red, invert loss terms for teams
blue_loss = 0
red_loss = 0
neutral_loss = 0
assasin_loss = 0
for word in state.blue:
blue_loss += self._distance(clue_word, word)
for word in state.red:
red_loss += self._distance(clue_word, word)
for word in state.neutral:
neutral_loss += self._distance(clue_word, word)
for word in state.assasin:
assasin_loss += self._distance(clue_word, word)
blue_loss /= len(state.blue)
red_loss /= len(state.red)
neutral_loss /= len(state.neutral)
assasin_loss /= len(state.assasin)
if team == team.BLUE:
loss = +blue_loss - red_loss
elif team == team.RED:
loss = -blue_loss + red_loss
return loss * c - neutral_loss - assasin_loss * 10
def _loss2(self, positive_words, negative_words, clue_word):
pos_loss = 0
neg_loss = 0
for word in positive_words:
pos_loss += self._distance(clue_word, word)
for word in negative_words:
neg_loss += self._distance(clue_word, word)
pos_loss /= len(positive_words)
neg_loss /= len(negative_words)
return (+pos_loss - neg_loss) * len(positive_words)
def _find_clue_word(self, state, words_meant, team=Team.BLUE):
def filter_word_by_rules(clue_word):
for word_set in state.word_sets:
for word in word_set:
if clue_word in word or word in clue_word: # TODO replace this check by a better one
return True
return False
min_loss = np.inf
min_word = None
assert team == Team.BLUE
pos_words = set(words_meant)
neg_words = set(state.words) - pos_words
for clue_word in self.vocab:
if filter_word_by_rules(clue_word):
continue
#loss = self._loss(state, clue_word, team=team) # TODO: This should reflect the word subset choice.
loss = self._loss2(pos_words, neg_words, clue_word)
if loss < min_loss:
# print(" Clue attempt:", clue_word, loss)
min_loss = loss
min_word = clue_word
return min_word
def reward_function(state, clue, guess, iteration, team=Team.BLUE):
if guess in state.assasin: # lose game
return -100
elif guess in state.neutral: # lose a turn
return -1
elif (guess in state.blue and team == Team.BLUE) or (guess in state.red and team == Team.RED): # get a turn + correct guess
return +iteration + 1
else: # incorrect guess and other team gets a point
return -iteration - 1
class Configuration:
blue = (5, 5)
red = (5, 5)
assasin = (1, 1)
neutral = (5, 5)
@staticmethod
def _rand(conf_tuple):
return np.random.randint(conf_tuple[0], conf_tuple[1] + 1)
def instantiate(self):
c = Configuration()
c.blue = Configuration._rand(self.blue)
c.red = Configuration._rand(self.red)
c.assasin = Configuration._rand(self.assasin)
c.neutral = Configuration._rand(self.neutral)
return c
class StateGenerator:
def __init__(self, word_vectors, vocab, card_words):
self.configuration = Configuration()
self.word_vectors = word_vectors
self.vocab = vocab
self.card_words = list(card_words)
def generate_state(self):
c = self.configuration.instantiate()
total = c.blue + c.red + c.assasin + c.neutral
chosen_idx = np.random.choice(len(self.card_words), size=total, replace=False)
chosen = [self.card_words[idx] for idx in chosen_idx]
ts = c.blue + c.red
ass = ts + c.assasin
return State(
blue=set(chosen[:c.blue]), red=set(chosen[c.blue:c.blue + c.red]), assasin=set(chosen[ts:ass]), neutral=set(chosen[ass:]))
```
|
{
"source": "jendelel/rhl-algs",
"score": 2
}
|
#### File: jendelel/rhl-algs/deep_coach.py
```python
from PyQt5 import QtGui, QtCore, QtWidgets
from collections import namedtuple
import time
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import utils
HumanFeedback = namedtuple('HumanFeedback', ['feedback_value'])
SavedAction = namedtuple('SavedAction', ['state', 'action', 'logprob'])
SavedActionsWithFeedback = namedtuple('SavedActionsWithFeedback', ['saved_actions', 'final_feedback'])
def parse_args(parser):
parser.add_argument('--batch_size', type=int, default=16, help='batch_size (default: 16)')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Learning rate (default:0.00025)')
parser.add_argument('--eligibility_decay', type=float, default=0.35, help='Learning rate (default:0.01)')
parser.add_argument("--coach_window_size", type=int, default=10, help="Number of transitions in a window.")
parser.add_argument('--entropy_reg', type=float, default=1.5, help='Entropy regularization beta')
parser.add_argument('--feedback_delay_factor', type=int, default=1, help='COACH Feedback delay factor.')
parser.add_argument(
'--ppo_eps',
type=float,
default=0.2,
help='PPO-like clipping of the loss. Negative value turns the ppo clipping off.')
parser.add_argument('--no_cuda', action='store_true', default=True, help='disables CUDA training')
class DeepCoach():
def __init__(self, window, args, env):
self.window = window
self.args = args
self.env = env
torch.manual_seed(args.seed)
self.device = torch.device("cuda" if not args.no_cuda else "cpu")
if window is not None:
self.setup_ui(window)
PolicyNet = CategoricalPolicyNet if hasattr(self.env.action_space, 'n') else GaussianPolicyNet
self.policy_net = PolicyNet(env.observation_space.shape[0], env.action_space).to(device=self.device)
self.optimizer = torch.optim.RMSprop(self.policy_net.parameters(), lr=args.learning_rate)
self.feedback = None
def setup_ui(self, window):
@QtCore.pyqtSlot(QtGui.QKeyEvent)
def keyPressed(event):
numpad_mod = int(event.modifiers()) & QtCore.Qt.KeypadModifier
if (event.key() == QtCore.Qt.Key_Minus and numpad_mod) or event.key() == QtCore.Qt.Key_M:
self.buttonClicked(-1)
elif (event.key() == QtCore.Qt.Key_Plus and numpad_mod) or event.key() == QtCore.Qt.Key_P:
self.buttonClicked(1)
else:
print("ERROR: Unknown key: ", event)
hor = QtWidgets.QHBoxLayout()
for i in range(-1, 2):
if i == 0:
continue
but = QtWidgets.QPushButton()
but.setText(str(i))
but.clicked.connect(lambda bla, def_arg=i: self.buttonClicked(def_arg))
hor.addWidget(but)
window.feedback_widget.setLayout(hor)
window.keyPressedSignal.connect(keyPressed)
def buttonClicked(self, value):
self.feedback = HumanFeedback(feedback_value=value)
def to_tensor(self, value):
return torch.tensor(value).float().to(device=self.device)
def select_action(self, state):
state = torch.from_numpy(state).to(device=self.device).float()
action, logprob, entropy = self.policy_net(state)
return logprob, action.detach().cpu().numpy(), entropy
def update_net(self, savedActionsWithFeedback, current_entropy):
if not savedActionsWithFeedback:
return
print("training")
e_losses = []
for saf in savedActionsWithFeedback:
final_feedback = saf.final_feedback
for n, sa in enumerate(saf.saved_actions[::-1]):
log_p_old = torch.tensor(sa.logprob).to(self.device)
log_prob, _, _ = self.select_action(sa.state)
probs_ratio = (log_prob - log_p_old).exp()
if self.args.ppo_eps > 0:
surr1 = final_feedback * probs_ratio
surr2 = torch.clamp(probs_ratio, 1.0 - self.args.ppo_eps, 1.0 + self.args.ppo_eps) * final_feedback
loss_term = torch.min(surr1, surr2)
else:
loss_term = probs_ratio * final_feedback
e_loss = (self.args.eligibility_decay**(n)) * loss_term
e_loss = torch.sum(e_loss, dim=0) # Sum the loss across all actions.
e_losses.append(e_loss)
loss = -(self.to_tensor(1 /
(len(savedActionsWithFeedback))) * torch.stack(e_losses).to(device=self.device).sum() +
torch.sum(self.args.entropy_reg * current_entropy, dim=0))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def processFeedback(self, savedActions, buffer):
feedback = self.feedback.feedback_value
if feedback is not None and len(savedActions) > 0:
print("Feedback: ", feedback)
if feedback > 0:
self.window.viewer.num_pos_feedback += 1
elif feedback < 0:
self.window.viewer.num_neg_feedback += 1
window_size = min(len(savedActions), self.args.coach_window_size)
del savedActions[:-(window_size + self.args.feedback_delay_factor)]
window = savedActions[:-self.args.feedback_delay_factor] # Copy the list
savedActionsWithFeedback = SavedActionsWithFeedback(saved_actions=window, final_feedback=feedback)
buffer.append(savedActionsWithFeedback)
self.feedback = None
def train(self):
buffer = []
running_reward = 10
for i_episode in range(1, 10000):
state, ep_reward = self.env.reset(), 0
savedActions = []
for t in range(1, 10000): # Don't infinite loop while learning
logprob, action, entropy = self.select_action(state)
old_state = state
state, reward, done, _ = self.env.step(action)
ep_reward += reward
savedActions.append(SavedAction(state=state, action=action, logprob=logprob.detach().cpu().numpy()))
self.window.render(self.env)
if not self.window.isVisible():
break
if self.feedback:
self.processFeedback(savedActions, buffer)
if len(buffer[-1].saved_actions) > 0 and self.window.trainCheck.isChecked():
self.update_net([buffer[-1]], self.select_action(old_state)[2])
time.sleep(self.window.renderSpin.value())
if len(buffer) > 50:
del buffer[:10]
if len(buffer) >= self.args.batch_size and self.window.trainCheck.isChecked():
indicies = random.sample(range(len(buffer)), self.args.batch_size)
mini_batch = [buffer[i] for i in indicies]
self.update_net(mini_batch, entropy)
print("Action: {}, Reward: {:.2f}, ep_reward: {:.2f}".format(action, reward, ep_reward))
if done:
break
if not self.window.isVisible():
break
running_reward = 0.05 * ep_reward + (1 - 0.05) * running_reward
print("Running reward %d" % running_reward)
def start(window, args, env):
alg = DeepCoach(window, args, env)
print("Number of trainable parameters:", utils.count_parameters(alg.policy_net))
alg.train()
env.close()
class CategoricalPolicyNet(nn.Module):
def __init__(self, observation_space_shape, action_space):
super(CategoricalPolicyNet, self).__init__()
action_dim = action_space.n
self.hidden1 = nn.Linear(observation_space_shape, 16)
# self.hidden2 = nn.Linear(30, 30)
self.action_probs = nn.Linear(16, action_dim)
def forward(self, x):
x = F.tanh(self.hidden1(x))
# x = F.relu(self.hidden2(x))
logits = self.action_probs(x)
action = torch.argmax(logits, dim=-1)
distribution = torch.distributions.Categorical(logits=logits)
return action, distribution.log_prob(action), distribution.entropy()
class GaussianPolicyNet(nn.Module):
def __init__(self, observation_space_shape, action_space):
super(GaussianPolicyNet, self).__init__()
action_dim = action_space.shape[-1]
self.hidden1 = nn.Linear(observation_space_shape, 16)
# self.hidden2 = nn.Linear(30, 30)
self.mu_head = nn.Linear(16, action_dim)
self.log_std = torch.nn.parameter.Parameter(-0.5 * torch.ones(action_dim))
def forward(self, x):
x = F.tanh(self.hidden1(x))
# x = F.relu(self.hidden2(x))
mean = self.mu_head(x)
std = self.log_std.expand_as(mean).exp()
distribution = torch.distributions.Normal(mean, std)
action = torch.normal(mean, std)
return action, distribution.log_prob(action), distribution.entropy()
```
#### File: jendelel/rhl-algs/main.py
```python
import argparse
import functools
from ui import MainWindow, create_app
import time
from gym_utils import make_env, toggle_recording
running = False
def startRl(window):
global running
if running:
return
running = True
parser = argparse.ArgumentParser(description='Reinforcement human learning')
parser.add_argument('--alg', type=str, default="random_alg", help='Name of RL algorithm.')
parser.add_argument('--env', type=str, default="CartPole-v0", help='Name of Gym environment.')
parser.add_argument('--seed', type=int, default=543, help='random seed (default: 543)')
alg, args = window.loadAlg(parser)
env = make_env(args.env, window.viewer, alg_name=args.alg, record=window.recordCheck.isChecked())
window.recordCheck.stateChanged.connect(functools.partial(toggle_recording, env_object=env))
print(args)
env.seed(args.seed)
window.viewer.start_time = time.time()
alg.start(window, args, env)
running = True
def main():
app = create_app()
window = MainWindow()
window.startBut.clicked.connect(functools.partial(startRl, window=window))
window.show()
app.exec_()
if __name__ == "__main__":
main()
```
#### File: jendelel/rhl-algs/manual.py
```python
from PyQt5 import QtGui, QtCore
import time
import numpy as np
# Dummy function because random alg does need parameters
def parse_args(parser):
pass
class ManualControl():
def __init__(self, window, args, env):
self.window = window
self.args = args
self.env = env
if window is not None:
self.setup_ui(window)
self.last_action = 0
def setup_ui(self, window):
@QtCore.pyqtSlot(QtGui.QKeyEvent)
def keyPressed(event):
if event.key() == QtCore.Qt.Key_0:
self.last_action = 0
elif event.key() == QtCore.Qt.Key_1:
self.last_action = 1
elif event.key() == QtCore.Qt.Key_2:
self.last_action = 2
elif event.key() == QtCore.Qt.Key_3:
self.last_action = 3
elif event.key() == QtCore.Qt.Key_4:
self.last_action = 4
elif event.key() == QtCore.Qt.Key_5:
self.last_action = 5
else:
print("ERROR: Unknown key: ", event)
window.keyPressedSignal.connect(keyPressed)
def train(self):
for i_episode in range(1, 10000):
state, ep_reward = self.env.reset(), 0
for t in range(1, 10000): # Don't infinite loop while learning
action = np.clip(self.last_action, 0, int(self.env.action_space.n) - 1)
state, reward, done, _ = self.env.step(action)
ep_reward += reward
self.window.render(self.env)
if not self.window.isVisible():
break
time.sleep(self.window.renderSpin.value())
print("State: {}, Action: {}, Reward: {}, ep_reward: {}".format(state, action, reward, ep_reward))
if done:
break
if not self.window.isVisible():
break
def start(window, args, env):
alg = ManualControl(window, args, env)
alg.train()
env.close()
```
#### File: jendelel/rhl-algs/random_alg.py
```python
import time
# Dummy function because random alg does need parameters
def parse_args(parser):
pass
def start(window, args, env):
def select_action(state):
return env.action_space.sample()
for i_episode in range(1, 10):
state, ep_reward = env.reset(), 0
for t in range(1, 10000): # Don't infinite loop while learning
action = select_action(state)
state, reward, done, _ = env.step(action)
window.render(env)
time.sleep(window.renderSpin.value())
if not window.isVisible():
break
ep_reward += reward
print("Action: {}, Reward: {}, ep_reward: {}".format(action, reward, ep_reward))
if done:
break
if not window.isVisible():
break
env.close()
```
#### File: jendelel/rhl-algs/rcppo.py
```python
from PyQt5 import QtGui, QtCore, QtWidgets
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical, Normal
import numpy as np
import scipy.signal
from utils import utils, logx
def parse_args(parser):
parser.add_argument(
'--epochs',
type=int,
default=400,
help='Number of epochs of interaction (equivalent to'
'number of policy updates) to perform. default:400')
parser.add_argument(
'--eval_epochs', type=int, default=10, help='Number of epochs to render for evaluation. default:200')
parser.add_argument(
'--batch_size', type=int, default=10, help='Batch size (how many episodes per batch). default: 10')
parser.add_argument(
'--lr_pi', type=float, default=3e-4, help='Learning rate for policy optimizer. (default:0.002)')
parser.add_argument(
'--lr_V', type=float, default=1.5e-4, help='Learning rate for value function optimizer. (default:0.0024)')
parser.add_argument(
'--lr_lambda', type=float, default=5e-7, help='Learning rate for value function optimizer. (default:0.0024)')
parser.add_argument(
'--train_v_iters',
type=int,
default=70,
help='Number of gradient descent steps to take on value function per epoch.(default:70)')
parser.add_argument(
"--gae_lambda",
type=float,
default=0.95,
help="Lambda for GAE-Lambda. (Always between 0 and 1, close to 1., default: 0.95)")
parser.add_argument(
"--gae_gamma", type=float, default=0.999, help="Discount factor. (Always between 0 and 1., default: 0.999")
parser.add_argument(
"--rcppo_alpha", type=float, default=0.5, help="Constraint bound. (Always between 0 and 1., default: 0.5")
parser.add_argument(
"--clip_param",
type=float,
default=0.2,
help="Hyperparameter for clipping in the policy objective."
"Roughly: how far can the new policy go from the old policy while"
"still profiting (improving the objective function)? The new policy"
"can still go farther than the clip_ratio says, but it doesn't help"
"on the objective anymore. (Usually small, 0.1 to 0.3.) default:0.2")
parser.add_argument(
"--target_kl",
type=float,
default=0.2,
help="Roughly what KL divergence we think is appropriate"
"between new and old policies after an update. This will get used"
"for early stopping. (Usually small, 0.01 or 0.05.) default:0.01")
parser.add_argument(
'--train_pi_iters',
type=int,
default=70,
help="Maximum number of gradient descent steps to take"
"on policy loss per epoch. (Early stopping may cause optimizer"
"to take fewer than this.) Default: 70")
parser.add_argument(
'--max_episode_len',
type=int,
default=1000,
help='Maximum length of trajectory / episode / rollout. default: 1000')
parser.add_argument(
'--save_freq',
type=int,
default=10,
help='How often (in terms of gap between epochs) to save the current policy and value function. default: 10'
)
parser.add_argument('--no_cuda', action='store_true', default=False, help='disables CUDA training')
class RCPPO():
def __init__(self, window, args, env):
self.window = window
self.args = args
self.env = env
torch.manual_seed(args.seed)
np.random.seed(args.seed)
self.device = torch.device("cuda" if not args.no_cuda else "cpu")
self.render_enabled = True
self.renderSpin = None
self.logger = logx.EpochLogger(output_dir=utils.get_log_dir(args))
self.const_lambda = torch.zeros([], requires_grad=True, device=self.device)
if window is not None:
self.setup_ui(window)
self.actor_critic = ActorCritic(
observation_space_shape=self.env.unwrapped.observation_space.shape[0],
action_space=self.env.unwrapped.action_space).to(device=self.device)
self.optimizer_pi = torch.optim.Adam(self.actor_critic.policy.parameters(), lr=args.lr_pi)
self.optimizer_lambda = torch.optim.Adam([self.const_lambda], lr=args.lr_lambda)
self.optimizer_V = torch.optim.Adam(self.actor_critic.value_function.parameters(), lr=args.lr_V)
def setup_ui(self, window):
@QtCore.pyqtSlot(QtGui.QKeyEvent)
def keyPressed(event):
print("ERROR: Unknown key: ", event)
@QtCore.pyqtSlot(int)
def checkedChanged(state):
print("State: ", state)
self.render_enabled = state > 0
hor = QtWidgets.QHBoxLayout()
self.renderSpin = QtWidgets.QSpinBox()
self.renderSpin.setRange(1, 1000)
self.renderSpin.setSingleStep(5)
self.renderSpin.setValue(100)
renderCheck = QtWidgets.QCheckBox()
renderCheck.setChecked(True)
renderCheck.stateChanged.connect(checkedChanged)
hor.addWidget(self.renderSpin)
hor.addWidget(renderCheck)
window.feedback_widget.setLayout(hor)
window.keyPressedSignal.connect(keyPressed)
def select_action(self, obs, action_taken=None):
action, logp, logp_pi = self.actor_critic.policy(obs, action_taken)
return action, logp, logp_pi
def update_net(self, buffer_minibatch):
obs, act, adv, ret, logp_old, constraints = [torch.Tensor(x).to(self.device) for x in buffer_minibatch]
_, logp, _ = self.select_action(obs, action_taken=act)
def ppo_loss(logp, logp_old, adv, clipped_info=False):
ratio = (logp - logp_old).exp()
surr1 = ratio * adv
surr2 = torch.clamp(ratio, 1.0 - self.args.clip_param, 1.0 + self.args.clip_param) * adv
if clipped_info:
clipped = (ratio > (1 + self.args.clip_param)) | (ratio < (1 - self.args.clip_param))
cf = (clipped.float()).mean()
return -torch.min(surr1, surr2).mean(), cf
return -torch.min(surr1, surr2).mean()
pi_loss_old = ppo_loss(logp, logp_old, adv)
# Estimate the entropy E[-logp]
entropy_est = (-logp).mean()
# Policy gradient steps
for i in range(self.args.train_pi_iters):
_, logp, _ = self.select_action(obs, action_taken=act)
pi_loss = ppo_loss(logp, logp_old, adv)
self.optimizer_pi.zero_grad()
pi_loss.backward()
self.optimizer_pi.step()
_, logp, _ = self.select_action(obs, action_taken=act)
kl = (logp_old - logp).mean()
if kl > 1.5 * self.args.target_kl:
self.logger.log('Early stopping at step %d due to reaching max kl.' % i)
break
self.logger.store(train_StopIter=i)
# Value function learning
# MSE of the value function and the returns
v = self.actor_critic.value_function(obs)
v_loss_old = F.mse_loss(v, ret)
for _ in range(self.args.train_v_iters):
v = self.actor_critic.value_function(obs)
v_loss = F.mse_loss(v, ret)
# V function gradient step
self.optimizer_V.zero_grad()
v_loss.backward()
self.optimizer_V.step()
self.optimizer_lambda.zero_grad()
print(self.const_lambda.grad)
print(-(constraints.mean() - self.args.rcppo_alpha).detach())
print(self.const_lambda.size(), (-(constraints.mean() - self.args.rcppo_alpha).detach()).size())
self.const_lambda.grad = -(constraints.mean() - self.args.rcppo_alpha).detach()
self.optimizer_lambda.step()
self.const_lambda = torch.max(torch.zeros_like(self.const_lambda), self.const_lambda)
_, logp, _, v = self.actor_critic(obs, act)
pi_loss_new, clipped_info = ppo_loss(logp, logp_old, adv, clipped_info=True)
v_loss_new = F.mse_loss(v, ret)
kl = (logp_old - logp).mean()
self.logger.store(
loss_LossPi=pi_loss_new,
loss_LossV=v_loss_old,
metrics_KL=kl,
metrics_Entropy=entropy_est,
train_ClipFrac=clipped_info,
loss_DeltaLossPi=(pi_loss_new - pi_loss_old),
loss_DeltaLossV=(v_loss_new - v_loss_old))
def train(self):
self.logger.save_config({"args:": self.args})
buffer = RCPPOBuffer(
obs_dim=self.env.unwrapped.observation_space.shape,
act_dim=self.env.unwrapped.action_space.shape,
size=(self.args.batch_size + 1) * self.args.max_episode_len,
gamma=self.args.gae_gamma,
lam=self.args.gae_lambda)
tot_steps = 0
var_counts = tuple(
utils.count_parameters(module)
for module in [self.actor_critic.policy, self.actor_critic.value_function])
self.logger.log('\nNumber of parameters: \t pi: %d, \t v: %d\n' % var_counts)
start_time = time.time()
obs, reward, done, episode_ret, episode_len = self.env.reset(), 0, False, 0, 0
for epoch in range(0, self.args.epochs):
# Set the network in eval mode (e.g. Dropout, BatchNorm etc.)
self.actor_critic.eval()
for t in range(self.args.max_episode_len):
action, _, logp_t, v_t = self.actor_critic(torch.Tensor(obs).unsqueeze(dim=0).to(self.device))
# Save and log
assert self.args.env.startswith("Lunar")
constraint = np.abs(obs[4])
buffer.store(obs, action.detach().cpu().numpy(), reward, v_t.item(), logp_t.detach().cpu().numpy(), constraint)
self.logger.store(vals_VVals=v_t)
obs, reward, done, _ = self.env.step(action.detach().cpu().numpy()[0])
episode_ret += reward
episode_len += 1
tot_steps += 1
self.window.processEvents()
if self.render_enabled and epoch % self.renderSpin.value() == 0:
self.window.render(self.env)
time.sleep(self.window.renderSpin.value())
if not self.window.isVisible():
return
terminal = done or (episode_len == self.args.max_episode_len)
if terminal:
if not terminal:
print('Warning: trajectory cut off by epoch at %d steps.' % episode_len)
last_val = reward if done else self.actor_critic.value_function(
torch.Tensor(obs).to(self.device).unsqueeze(dim=0)).item()
assert self.args.env.startswith("Lunar")
last_constraint = np.abs(obs[4])
buffer.finish_path(last_val=last_val, last_const=last_constraint, const_lambda=self.const_lambda.item())
if epoch % self.args.batch_size == 0:
self.actor_critic.train() # Switch module to training mode
self.update_net(buffer.get())
self.actor_critic.eval()
if terminal:
self.logger.store(train_EpRet=episode_ret, train_EpLen=episode_len)
obs, reward, done, episode_ret, episode_len = self.env.reset(), 0, False, 0, 0
break
if (epoch % self.args.save_freq == 0) or (epoch == self.args.epochs - 1):
self.logger.save_state({'env': self.env.unwrapped}, self.actor_critic, None)
pass
# Log info about epoch
self.logger.log_tabular(tot_steps, 'train/Epoch', epoch)
self.logger.log_tabular(tot_steps, 'train/EpRet', with_min_and_max=True)
self.logger.log_tabular(tot_steps, 'train/EpLen', average_only=True)
self.logger.log_tabular(tot_steps, 'vals/VVals', with_min_and_max=True)
self.logger.log_tabular(tot_steps, 'TotalEnvInteracts', tot_steps)
if epoch % self.args.batch_size == 0:
self.logger.log_tabular(tot_steps, 'loss/LossPi', average_only=True)
self.logger.log_tabular(tot_steps, 'loss/LossV', average_only=True)
self.logger.log_tabular(tot_steps, 'loss/DeltaLossPi', average_only=True)
self.logger.log_tabular(tot_steps, 'loss/DeltaLossV', average_only=True)
self.logger.log_tabular(tot_steps, 'metrics/Entropy', average_only=True)
self.logger.log_tabular(tot_steps, 'metrics/KL', average_only=True)
self.logger.log_tabular(tot_steps, 'train/ClipFrac', average_only=True)
self.logger.log_tabular(tot_steps, 'train/StopIter', average_only=True)
self.logger.log_tabular(tot_steps, 'train/Time', time.time() - start_time)
self.logger.dump_tabular()
def eval(self):
# Final evaluation
print("Eval")
episode_reward = 0
for t in range(self.args.eval_epochs):
state, done = self.env.reset(), False
while not done:
action = self.actor_critic.policy.eval(torch.Tensor(state).to(self.device)).detach().cpu().numpy()
# Choose greedy action this time
state, reward, done, _ = self.env.step(action)
episode_reward += reward
if self.render_enabled:
self.window.render(self.env)
time.sleep(self.window.renderSpin.value())
if not self.window.isVisible():
return
def start(window, args, env):
alg = RCPPO(window, args, env)
print("Number of trainable parameters:", utils.count_parameters(alg.actor_critic))
alg.train()
alg.eval()
print("Done")
env.close()
class RCPPOBuffer:
"""
A buffer for storing trajectories experienced by a PPO agent interacting
with the environment, and using Generalized Advantage Estimation (GAE-Lambda)
for calculating the advantages of state-action pairs.
"""
def __init__(self, obs_dim, act_dim, size, gamma=0.99, lam=0.95):
self.obs_buf = np.zeros(self._combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(self._combined_shape(size, act_dim), dtype=np.float32)
self.adv_buf = np.zeros(size, dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.const_buf = np.zeros(size, dtype=np.float32)
self.ret_buf = np.zeros(size, dtype=np.float32)
self.val_buf = np.zeros(size, dtype=np.float32)
self.logp_buf = np.zeros(size, dtype=np.float32)
self.gamma, self.lam = gamma, lam
self.ptr, self.path_start_idx, self.max_size = 0, 0, size
def store(self, obs, act, rew, val, logp, constraint):
"""
Append one timestep of agent-environment interaction to the buffer.
"""
assert self.ptr < self.max_size # buffer has to have room so you can store
self.obs_buf[self.ptr] = obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.val_buf[self.ptr] = val
self.const_buf[self.ptr] = constraint
self.logp_buf[self.ptr] = logp
self.ptr += 1
def finish_path(self, last_val=0, last_const=0, const_lambda=0):
"""
Call this at the end of a trajectory, or when one gets cut off
by an epoch ending. This looks back in the buffer to where the
trajectory started, and uses rewards and value estimates from
the whole trajectory to compute advantage estimates with GAE-Lambda,
as well as compute the rewards-to-go for each state, to use as
the targets for the value function.
The "last_val" argument should be 0 if the trajectory ended
because the agent reached a terminal state (died), and otherwise
should be V(s_T), the value function estimated for the last state.
This allows us to bootstrap the reward-to-go calculation to account
for timesteps beyond the arbitrary episode horizon (or epoch cutoff).
"""
path_slice = slice(self.path_start_idx, self.ptr)
rews = np.append(self.rew_buf[path_slice], last_val)
vals = np.append(self.val_buf[path_slice], last_val)
constraints = np.append(self.val_buf[path_slice], last_const)
# the next two lines implement GAE-Lambda advantage calculation
# \delta_t = - V(s_t) + r_t + \gamma * V_(s_{t+1})
deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]
# GAE_advantage = \Sum_l (\lambda * \gamma)^l * delta_{t+1}
self.adv_buf[path_slice] = self._discount_cumsum(deltas, self.gamma * self.lam)
# the next line computes rewards-to-go, to be targets for the value function
self.ret_buf[path_slice] = self._discount_cumsum(rews - const_lambda * constraints, self.gamma)[:-1]
self.path_start_idx = self.ptr
def get(self):
"""
Call this at the end of an epoch to get all of the data from
the buffer, with advantages appropriately normalized (shifted to have
mean zero and std one). Also, resets some pointers in the buffer.
"""
# assert self.ptr == self.max_size # buffer has to be full before you can get
buffer_slice = slice(0, self.ptr)
self.ptr, self.path_start_idx = 0, 0
# the next two lines implement the advantage normalization trick
# adv_mean, adv_std = mpi_statistics_scalar(self.adv_buf)
adv_mean, adv_std = np.mean(self.adv_buf[buffer_slice]), np.std(self.adv_buf[buffer_slice])
self.adv_buf[buffer_slice] = (self.adv_buf[buffer_slice] - adv_mean) / (adv_std + 1e-5)
# TODO: Consider returning a dictionary.
return [
self.obs_buf[buffer_slice], self.act_buf[buffer_slice], self.adv_buf[buffer_slice],
self.ret_buf[buffer_slice], self.logp_buf[buffer_slice], self.const_buf[buffer_slice]
]
def _combined_shape(self, length, shape=None):
if shape is None:
return (length,)
return (length, shape) if np.isscalar(shape) else (length, *shape)
def _discount_cumsum(self, x, discount):
"""
magic from rllab for computing discounted cumulative sums of vectors.
input:
vector x,
[x0,
x1,
x2]
output:
[x0 + discount * x1 + discount^2 * x2,
x1 + discount * x2,
x2]
"""
return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
class MLP(nn.Module):
def __init__(self, layers, activation=torch.tanh, output_activation=None, output_squeeze=False):
super(MLP, self).__init__()
self.layers = nn.ModuleList()
self.activation = activation
self.output_activation = output_activation
self.output_squeeze = output_squeeze
for i, layer in enumerate(layers[1:]):
self.layers.append(nn.Linear(layers[i], layer))
nn.init.zeros_(self.layers[i].bias)
def forward(self, input):
x = input
for layer in self.layers[:-1]:
x = self.activation(layer(x))
if self.output_activation is None:
x = self.layers[-1](x)
else:
x = self.output_activation(self.layers[-1](x))
return x.squeeze() if self.output_squeeze else x
class CategoricalPolicyNet(nn.Module):
def __init__(self, observation_space_shape, hidden_sizes, activation, output_activation, action_dim):
super(CategoricalPolicyNet, self).__init__()
self.logits = MLP(layers=[observation_space_shape] + list(hidden_sizes) + [action_dim], activation=activation)
def forward(self, x, action_taken=None):
logits = self.logits(x)
policy = Categorical(logits=logits)
# Sample the action.
pi = policy.sample()
logp_pi = policy.log_prob(pi).squeeze()
if action_taken is not None:
logp = policy.log_prob(action_taken).squeeze()
else:
logp = None
return pi, logp, logp_pi
def eval(self, x):
logits = self.logits(x)
return torch.argmax(logits, dim=-1)
class GaussianPolicyNet(nn.Module):
def __init__(self, observation_space_shape, hidden_sizes, activation, output_activation, action_dim):
super(GaussianPolicyNet, self).__init__()
self.mu = MLP(
layers=[observation_space_shape] + list(hidden_sizes) + [action_dim],
activation=activation,
output_activation=output_activation)
self.log_std = nn.Parameter(-0.5 * torch.ones(action_dim))
def forward(self, x, action_taken):
policy = Normal(self.mu(x), self.log_std.exp())
# Sample the action from the policy.
pi = policy.sample()
# Sum over the actions.
logp_pi = policy.log_prob(pi).sum(dim=1)
if action_taken is not None:
logp = policy.log_prob(action_taken).sum(dim=1)
else:
logp = None
return pi, logp, logp_pi
def eval(self, x):
return self.mu(x)
class ActorCritic(nn.Module):
def __init__(self,
observation_space_shape,
action_space,
hidden_sizes=[64, 64],
activation=torch.tanh,
output_activation=None,
policy=None):
super(ActorCritic, self).__init__()
if policy is None and hasattr(action_space, 'n'):
self.policy = CategoricalPolicyNet(
observation_space_shape, hidden_sizes, activation, output_activation, action_dim=action_space.n)
elif policy is None:
self.policy = GaussianPolicyNet(
observation_space_shape,
hidden_sizes,
activation,
output_activation,
action_dim=action_space.shape[0])
else:
self.policy = policy(
observation_space_shape,
hidden_sizes,
activation,
output_activation,
action_dim=action_space.shape[0])
self.value_function = MLP(
layers=[observation_space_shape] + list(hidden_sizes) + [1], activation=activation, output_squeeze=True)
def forward(self, x, action_taken=None):
pi, logp, logp_pi = self.policy(x, action_taken)
v = self.value_function(x)
return pi, logp, logp_pi, v
```
|
{
"source": "jenders97/unitpy",
"score": 2
}
|
#### File: units/combined_units/volume.py
```python
from measurement.base import MeasureBase
__all__ = [
'Volume',
]
class Volume(MeasureBase):
STANDARD_UNIT = 'cubic_meter'
UNITS = {
'us_g': 0.00378541,
'us_qt': 0.000946353,
'us_pint': 0.000473176,
'us_cup': 0.000236588,
'us_oz': 2.9574e-5,
'us_tbsp': 1.4787e-5,
'us_tsp': 4.9289e-6,
'cubic_millimeter': 0.000000001,
'cubic_centimeter': 0.000001,
'cubic_decimeter': 0.001,
'cubic_meter': 1.0,
'l': 0.001,
'cubic_foot': 0.0283168,
'cubic_inch': 1.6387e-5,
'imperial_g': 0.00454609,
'imperial_qt': 0.00113652,
'imperial_pint': 0.000568261,
'imperial_oz': 2.8413e-5,
'imperial_tbsp': 1.7758e-5,
'imperial_tsp': 5.9194e-6,
'tonnage': 2.83168,
}
ALIAS = {
'US Gallon': 'us_g',
'gallon': 'us_g',
'gal': 'us_g',
'US Quart': 'us_qt',
'quart': 'us_qt',
'qt': 'us_qt',
'US Pint': 'us_pint',
'US Cup': 'us_cup',
'cup': 'us_cup',
'US Ounce': 'us_oz',
'oz': 'us_oz',
'US Fluid Ounce': 'us_oz',
'US Tablespoon': 'us_tbsp',
'tbsp': 'us_tbsp',
'US Teaspoon': 'us_tsp',
'tsp': 'us_tsp',
'cubic millimeter': 'cubic_millimeter',
'mm3': 'cubic_millimeter',
'cubic centimeter': 'cubic_centimeter',
'cm3': 'cubic_centimeter',
'ml': 'cubic_centimeter',
'cubic decimeter': 'cubic_decimeter',
'dm3': 'cubic_decimeter',
'cubic meter': 'cubic_meter',
'm3': 'cubic_meter',
'liter': 'l',
'litre': 'l',
'cubic foot': 'cubic_foot',
'ft3': 'cubic_foot',
'cubic inch': 'cubic_inch',
'in3': 'cubic_inch',
'Imperial Gram': 'imperial_g',
'imp_g': 'imperial_g',
'Imperial Quart': 'imperial_qt',
'imp_qt': 'imperial_qt',
'Imperial Pint': 'imperial_pint',
'imp_pint': 'imperial_pint',
'Imperial Ounce': 'imperial_oz',
'imp_oz': 'imperial_oz',
'Imperial Tablespoon': 'imperial_tbsp',
'imp_tbsp': 'imperial_tbsp',
'Imperial Teaspoon': 'imperial_tsp',
'imp_tsp': 'imperial_tsp',
'tnge': 'tonnage'
}
SI_UNITS = ['l']
def __init__(self, *args, **kwargs):
super(Volume, self).__init__(*args, **kwargs)
```
|
{
"source": "jendis/blockapi",
"score": 3
}
|
#### File: blockapi/api/btc.py
```python
from blockapi.services import BlockchainAPI
class BtcAPI(BlockchainAPI):
"""
coins: bitcoin-cash
API docs: https://bch.btc.com/api-doc#API
Explorer: https://btc.com
"""
active = True
symbol = 'BCH'
base_url = 'https://bch-chain.api.btc.com/v3'
rate_limit = 0
coef = 1e-8
max_items_per_page = None
page_offset_step = None
confirmed_num = None
supported_requests = {
'get_balance': '/address/{address}',
}
def get_balance(self):
response = self.request('get_balance',
address=self.address)
if response is None:
return None
if response['data'] is None:
return None
try:
retval = response['data']['banlance'] * self.coef
except KeyError:
return None
return [{'symbol': self.symbol, 'amount': retval}]
```
#### File: blockapi/api/ontology.py
```python
from datetime import datetime
import pytz
from blockapi.services import BlockchainAPI
class OntioAPI(BlockchainAPI):
"""
coins: ontology
API docs: https://dev-docs.ont.io/#/docs-en/API/02-restful_api
https://github.com/ontio/ontology-explorer/tree
/master/back-end-projects/Explorer/src/main/java/com
/github/ontio/controller
Explorer: https://explorer.ont.io
"""
active = True
symbol = 'ONT'
base_url = 'https://explorer.ont.io'
rate_limit = 0
coef = 1
max_items_per_page = 20
page_offset_step = None
confirmed_num = None
supported_requests = {
'get_balance': '/api/v1/explorer/address/{address}/0/1',
'get_txs': '/api/v1/explorer/address/{address}/{limit}/{page}'
}
def get_balance(self):
response = self.request('get_balance',
address=self.address)
if not response:
return None
return [{
"symbol": item['AssetName'].upper(),
"amount": item['Balance']
} for item in response['Result']['AssetBalance']]
def get_txs(self, offset=None, limit=None, unconfirmed=False):
if limit is None:
limit = self.max_items_per_page
if offset is None:
offset = 1
response = self.request('get_txs',
address=self.address,
limit=limit,
page=offset)
txs = response['Result']['TxnList']
txs_result = []
for tx in txs:
for tx_transfer in tx['TransferList']:
txs_result.append({
'date': datetime.fromtimestamp(tx['TxnTime'], pytz.utc),
'from_address': tx_transfer['FromAddress'],
'to_address': tx_transfer['ToAddress'],
'amount': tx_transfer['Amount'] * self.coef,
'fee': tx['Fee'] * self.coef,
'hash': tx['TxnHash'],
'confirmed': None,
'is_error': False,
'type': 'normal',
'kind': 'transaction',
'direction': 'outgoing'
if tx_transfer['FromAddress'] == self.address
else 'incoming',
'status': 'confirmed' if tx['ConfirmFlag'] == 1
else 'unconfirmed',
'raw': tx
})
return txs_result
```
#### File: test/api/test_tronscan.py
```python
from pytest import mark
from blockapi.api.tronscan import TronscanAPI
from blockapi.test_init import test_addresses
class TestTronscanAPI:
ADDRESS = test_addresses['TRX'][0]
@mark.vcr()
def test_get_balance(self):
api = TronscanAPI(address=self.ADDRESS)
result = api.get_balance()
assert next((r["amount"] for r in result if r["symbol"] == "TRX")) ==\
0.588285
assert len(result) == 45
```
#### File: test/api/test_tzscan.py
```python
from pytest import mark
from blockapi.api.tzscan import TzscanAPI
from blockapi.test_init import test_addresses
class TestTzscanAPI:
ADDRESS = test_addresses["XTZ"][0]
REWARD_ADDRESS = test_addresses["XTZ"][1]
def test_init(self):
api = TzscanAPI(address=self.ADDRESS)
assert api
@mark.vcr()
def test_get_balance(self):
api = TzscanAPI(address=self.ADDRESS)
result = api.get_balance()
assert result == [{'symbol': 'XTZ', 'amount': 2068856.4582429999}]
@mark.vcr()
def test_get_rewards(self):
api = TzscanAPI(address=self.ADDRESS)
result = api.get_rewards()
assert isinstance(result, list)
# TODO: find better address, this one returns empty list
@mark.vcr()
def test_get_txs(self):
api = TzscanAPI(address=self.ADDRESS)
result = api.get_txs()
assert isinstance(result, list)
assert len(result) == 3
@mark.vcr()
def test_get_endorsements(self):
api = TzscanAPI(address=self.ADDRESS)
result = api.get_endorsements()
assert isinstance(result, list)
# TODO: find better address, this one returns empty list
```
|
{
"source": "JEndler/hearthstoneAI",
"score": 3
}
|
#### File: JEndler/hearthstoneAI/LogWatcher.py
```python
from hearthstone import deckstrings
from _thread import start_new_thread
import pyperclip
from HSCard import HSCard
import Card
from Gamestate import Gamestate
from Move import Move
import HearthSimulation
import json
import time
class LogWatcher():
def __init__(self, MyDeckFile = None):
self.LogPath = "D:\\Hearthstone\\Hearthstone_Data\\output_log.txt"
self.gamestate = Gamestate()
self.hero = [None,None,None] # Hunter, Rogue, Mage, Warlock, Paladin, Warrior, Priest, Druid, Shaman
self.heroID = [None,None,None] # ID of the Hero Card
self.lastFromPowerLog = None
self.listOfHeroNames = ["<NAME>", "Rexxar", "<NAME>", "<NAME>",
"<NAME>",
"<NAME>", "Thrall", "Gul'dan", "<NAME>", "<NAME>",
"Khadgar",
"Medivh", "<NAME>", "<NAME>", "<NAME>", "<NAME>",
"<NAME>",
"<NAME>", "<NAME>"]
start_new_thread(self.update,())
self.watch()
def watch(self):
print("Watching")
with open(self.LogPath, 'r', encoding="utf-8") as file:
line = file.readline()
print("in file")
while True:
nameOfCard = self.getName(line)
idOfCard = self.getID(line)
line = file.readline()
if "entityName=UNKNOWN ENTITY" in line: continue
elif "BLOCK_START BlockType=POWER" in line:
if "UNKNOWN ENTITY" not in line:
if "Target=0" not in line: # Spell has a Target
s = line.split("Target=")
spell = self.getName(s[0])
target = self.getName(s[1])
idofSpell = self.getID(s[0])
idofTarget = self.getID(s[1])
SpellCard = HSCard(spell, idofSpell)
if SpellCard.getType() is "SPELL":
move = Move("playtargetedspell", actioncard=self.CheckCard(SpellCard),
targetcard= self.CheckCard(self.getCardByID(idofTarget)))
elif SpellCard.getType() is "MINION":
move = Move("playtargetedminion", actioncard=self.CheckCard(SpellCard),
targetcard= self.CheckCard(self.getCardByID(idofTarget)))
result = spell + " has been played with target : " + target
if self.lastFromPowerLog != result:
HearthSimulation.simTurn(self.gamestate, move)
self.lastFromPowerLog = result
print(result)
else: # Spell has no Target
s = line.split("Target=")
spell = self.getName(s[0])
idofSpell = self.getID(s[0])
SpellCard = self.getCardByID(idofSpell)
if SpellCard != None:
if SpellCard.getType() is "SPELL":
move = Move("playspell", actioncard=self.CheckCard(SpellCard))
elif SpellCard.getType() is "MINION":
move = Move("play", actioncard=self.CheckCard(SpellCard))
result = spell + " has been played without target."
if self.lastFromPowerLog != result:
HearthSimulation.simTurn(self.gamestate, move)
print(result)
self.lastFromPowerLog = result
elif "GameState.DebugPrintEntitiesChosen()" in line:
print("Choice for Mulligan: " + self.getName(line))
elif "to FRIENDLY HAND" in line:
cardDrawn = HSCard(nameOfCard,idOfCard)
cardDrawn.ID = idOfCard
self.gamestate.addCardHand(cardDrawn, 1)
print("to HAND:" + nameOfCard + "*** ID = " + idOfCard)
elif "to FRIENDLY DECK" in line:
cardToDeck = self.getCardByID(idOfCard)
newCard = HSCard(cardToDeck.getName(), idOfCard)
self.gamestate.destroy(cardToDeck)
self.gamestate.addCardDeck(newCard,1)
def update(self):
while True:
time.sleep(3)
self.printStatus()
def getID(self, line):
indexID = line.find(" id=")
indexEndOfID = line.find("zone=")
ID = line[indexID + 4:indexEndOfID - 1]
return ID
def getName(self, line):
indexName = line.find("entityName=")
indexEndOfName = line.find("id=")
name = line[indexName + 11:indexEndOfName - 1]
return name
def getCardByID(self, ID):
for player in [1, -1]:
for card in self.gamestate.Board[player]:
if card.ID == ID: return card
for card in self.gamestate.Hand[player]:
if card.ID == ID: return card
for card in self.gamestate.Deck[player]:
if card.ID == ID: return card
print("ERROR : Card with ID:" + str(ID) + " not in play atm.")
return None
def delete(self, list, name):
for card in list:
if name == card.getName():
list.remove(card)
def printStatus(self):
print("*********STATUS***********")
print("Cards in Hand:" + str([card.getName() for card in self.gamestate.Hand[1] if card is not None]))
print("Cards on Board" + str([card.getName() for card in self.gamestate.Board[1] if card is not None]))
print("Cards on EnemyBoard" + str([card.getName() for card in self.gamestate.Board[-1] if card is not None]))
def CheckCard(self,card):
if card is None:
return
if card.getName() == self.hero[1] and card.ID == self.heroID[1]:
return 1
elif card.getName() == self.hero[-1] and card.ID == self.heroID[-1]:
return -1
elif card.getName() == self.gamestate.HeroPower[1]:
print("Friendly Heropower was played")
return None
elif card.getName() == self.gamestate.HeroPower[-1]:
print("Enemy Heropower was played")
return None
return card
Thiele = LogWatcher()
```
#### File: JEndler/hearthstoneAI/PredictionHandler.py
```python
import operator
PATH_TO_DECKS = "D:\\Projects\\Hearthbot_Old\\Decks\\"
#PATH_TO_DECKS = "C:\\Users\\User\\PycharmProjects\\HearthstoneAI\\hearthstoneai\\Decks\\"
PATHS_BY_CLASS = {}
for classname in ["Gul'dan;_WARLOCK","Druid;_DRUID","Mage;_MAGE","Hunter;_HUNTER","Uther Lightbringer;_PALADIN","Rogue;_ROGUE","Garrosh Hellscream;_WARRIOR","Priest;_PRIEST","Shaman;_SHAMAN"]:
Pathlist = classname.split(";")
PATHS_BY_CLASS[Pathlist[0]] = Pathlist[1] + "\decks.txt"
# The PATH_TO_DECKS and PATHS_BY_CLASS Variables are supposed to be Static.
# They depict the Path to a Textfile with the Downloaded Decks for the Class
# It's supposed to be used like this: PathToFile = PATH_TO_DECKS + PATHS_BY_CLASS[ClassName]
class Ngram:
content = [] # This contains two cards
def __init__(self, cards):
self.content = cards
def getCards(self):
return self.content
def setCards(self, cards):
if cards is not None:
self.content = cards
def __str__(self):
String = "" + str(self.content)
return String
# Self has to be a Bigram here! <-- IMPORTANT
def compare(self,Trigram): # Compares two NGrams and returns the Intersection if exactly two cards overlap
overlap = True
for card in self.getCards():
if card not in Trigram.getCards(): overlap = False
if overlap:
s = set(self.getCards())
return s.symmetric_difference(Trigram.getCards()).pop() # This returns the Difference between the Bigram and the Trigram if 2 Cards overlap
return None
class PredictionHandler:
CardsPlayedByOpponent = [] # These are all the cards wich have been played by the Opponent.
DeckListsForClass = [] # This contains a List of Decklists for the Enemies Class.
Class = ""
def __init__(self, cardsplayed, className):
self.CardsPlayedByOpponent = cardsplayed
self.DeckListsForClass = self.getDecksByClass(className)
self.Class = className
# The Parameter mode changes wether the Ngrams are created for the OpponentsDeck or for the Prediction
def makeNgrams(self, mode): # NEED TO DO IF CLAUSE FOR CLASS DECKLIST AND CARDSBYOPPONENT
res = [] # This will be a List of Ngrams
if mode == "Prediction":
for Deck in self.DeckListsForClass:
for card in Deck:
for secondCard in Deck:
if card != secondCard:
res.append(Ngram([card, secondCard]))
elif mode == "Opponent":
for card in self.CardsPlayedByOpponent:
res.append(Ngram([card]))
return res
def predict(self):
if self.CardsPlayedByOpponent == []:
if self.Class == "Warlock":
self.CardsPlayedByOpponent = ["Kobold Librarian"]
elif self.Class == "Druid":
self.CardsPlayedByOpponent = ["Wild Growth"]
elif self.Class == "Hunter":
self.CardsPlayedByOpponent = ["Animal Companion"]
elif self.Class == "Uther Lightbringer":
self.CardsPlayedByOpponent = ["Righteous Protector"]
elif self.Class == "Rogue":
self.CardsPlayedByOpponent = ["Backstab"]
elif self.Class == "Garrosh Hellscream":
self.CardsPlayedByOpponent = ["Shield Block"]
elif self.Class == "Priest":
self.CardsPlayedByOpponent = ["Northshire Cleric"]
elif self.Class == "Shaman":
self.CardsPlayedByOpponent = ["Jade Claws"]
elif self.Class == "Mage":
self.CardsPlayedByOpponent = ["Fireball"]
if self.CardsPlayedByOpponent == [] and self.DeckListsForClass == []:
print("Lists are empty!")
return
res = {} # This Dict will contain all Cards and how often they appeared
BigramsForOpponent = self.makeNgrams("Opponent")
TrigramsFromDeck = self.makeNgrams("Prediction")
#print(len(BigramsForOpponent))
#print(len(TrigramsFromDeck))
print(self.CardsPlayedByOpponent)
for bigram in BigramsForOpponent:
for trigram in TrigramsFromDeck:
#print("blae")
compare = bigram.compare(trigram)
if compare != None:
if compare in res: res[compare] += 1
elif compare not in res: res[compare] = 1
print("blae")
sorted_res = sorted(res.items(), key=operator.itemgetter(1))
sortedcards = [x[0] for x in sorted_res]# This returns a List of Tuples with the Key and Value of the Dict, sorted by the Value
if sortedcards == []:
sortedcards = ["Chillwind Yeti","Shieldbearer","Worgen Greaser","Stormwatcher","Ravenholdt Assassin","Ravenholdt Assassin","Wisp","Wisp","Murloc Raider","Murloc Raider","Bloodfen Raptor","Bloodfen Raptor","Stormwind Champion","Stormwind Champion","Argent Squire","Argent Squire","Goldshire Footman","Goldshire Footman","River Crocolisk","River Crocolisk","Oasis Snapjaw","Oasis Snapjaw","Angry Chicken","Angry Chicken","Grotesque Dragonhawk""Grotesque Dragonhawk","Am'gam Rager","Am'gam Rager","Duskboar","Duskboar"]
return sortedcards
def getDecksByClass(self, className):
print("className")
print(className)
res = []
Path = PATH_TO_DECKS + PATHS_BY_CLASS[className]
with open(Path, "r") as file:
for line in file.readlines(): # Each line is a different Deck
deck = line.split("|")
deck.pop() # Remove the Last item from the List because it is always "\n"
res.append(deck)
return res
```
|
{
"source": "JenDobson/greenbutton",
"score": 3
}
|
#### File: greenbutton/tests/test_greenbutton.py
```python
import unittest
from unittest.mock import patch
import greenbutton.greenbutton as gb
import pandas as pd
import numpy as np
import datetime
import os
THISDIR = os.path.dirname(os.path.abspath(__file__))
class TestStringMethods(unittest.TestCase):
def test_has_sample_data(self):
self.assertTrue(isinstance(gb.SAMPLE_DATA,pd.DataFrame))
def test_create_box_plot_of_use_by_hour(self):
ax = gb.boxplot_use_by_hour(gb.SAMPLE_DATA)
self.assertIsNotNone(ax)
self.assertNotIn('Start Hour',gb.SAMPLE_DATA.columns)
self.assertEqual(168,len(ax.lines))
def test_filter_by_time_of_day(self):
test_df = gb.SAMPLE_DATA.copy()
filtered_df = gb.filter_by_time_of_day(test_df,datetime.time(8,0,0),datetime.time(9,0,0))
unique_filtered_df = filtered_df['Start Time'].dt.time.unique()
self.assertTrue(np.isin(datetime.time(8,0),unique_filtered_df))
self.assertTrue(np.isin(datetime.time(9,0),unique_filtered_df))
self.assertTrue(2,len(unique_filtered_df))
def test_can_load_data_from_file(self):
# Check that loaded data is not just from hardcoded XMLFILE
datafile = os.path.join(THISDIR,'data','testdata.xml')
df = gb.dataframe_from_xml(datafile)
self.assertEqual(pd.Timestamp(2019,10,1,0),df.loc[0]['Start Time'])
def test_aggregate_use_by_day(self):
pass
def test_sample_data_is_immutable(self):
# see: https://stackoverflow.com/questions/24928306/pandas-immutable-dataframe
pass
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "Jendoliver/cookiecutter_project_upgrader",
"score": 2
}
|
#### File: cookiecutter_project_upgrader/dev-util/initial.py
```python
import subprocess
import xml.etree.ElementTree as ElementTree
from dataclasses import dataclass
from pathlib import Path
from typing import cast, Dict
try:
import yaml
except ImportError as error:
print("Attempting to install pyyaml")
subprocess.run(["pip", "install", "pyyaml"])
try:
import yaml
except ImportError as error:
raise Exception(
"Could not install pyyaml automatically successfully, please install it manually first") from error
def _get_project_name():
return cast(Path, Path.cwd()).parent.name
@dataclass
class _XmlElementData:
tag: str
attributes: Dict[str, str]
identifying_attribute: str
class PyCharmConfigUpdater:
def __init__(self):
self.updated = False
def update_pycharm_config(self,
update_testrunner_to_pytest: bool,
exclude_cache_and_build_directories: bool):
project_name = _get_project_name()
idea_project_config_file = Path("../.idea", f"{project_name}.iml")
if idea_project_config_file.exists():
tree = ElementTree.parse(idea_project_config_file)
if update_testrunner_to_pytest:
self._update_testrunner_to_pytest(tree)
if exclude_cache_and_build_directories:
self._exclude_cache_and_build_directories(tree)
if self.updated:
tree.write(idea_project_config_file, encoding="UTF-8", xml_declaration=True)
print(f"Updated PyCharm config file {idea_project_config_file}")
else:
print(f"PyCharm config file {idea_project_config_file} was already correct.")
else:
print("No PyCharm project configuration file found.")
def _update_testrunner_to_pytest(self, tree: ElementTree.ElementTree):
root = tree.getroot()
test_runner_element = self._create_or_update_element_if_necessary(root, _XmlElementData(
tag="component",
attributes={"name": "TestRunnerService"},
identifying_attribute="name"
))
self._create_or_update_element_if_necessary(test_runner_element, _XmlElementData(
tag="option",
attributes={"name": "projectConfiguration", "value": "pytest"},
identifying_attribute="name"
))
self._create_or_update_element_if_necessary(test_runner_element, _XmlElementData(
tag="option",
attributes={"name": "PROJECT_TEST_RUNNER", "value": "pytest"},
identifying_attribute="name"
))
def _exclude_cache_and_build_directories(self, tree: ElementTree.ElementTree):
root = tree.getroot()
module_root_manager_element = self._create_or_update_element_if_necessary(root, _XmlElementData(
tag="component",
attributes={"name": "NewModuleRootManager"},
identifying_attribute="name"
))
content_element = self._create_or_update_element_if_necessary(module_root_manager_element, _XmlElementData(
tag="content",
attributes={"url": "file://$MODULE_DIR$"},
identifying_attribute="url"
))
excluded_folders = [
"file://$MODULE_DIR$/.dev",
f"file://$MODULE_DIR$/{_get_project_name()}.egg-info",
]
for excluded_folder in excluded_folders:
self._create_or_update_element_if_necessary(content_element, _XmlElementData(
tag="excludeFolder",
attributes={"url": excluded_folder},
identifying_attribute="url"
))
def _create_or_update_element_if_necessary(self, parent: ElementTree.ElementTree,
desired_xml_element: _XmlElementData) -> ElementTree.ElementTree:
"""
Makes sure the parent has an sub element as described.
:param parent:
:param desired_xml_element:
:return: relevant real XML element which may have been created or updated
"""
identifying_attribute_value = desired_xml_element.attributes.get(desired_xml_element.identifying_attribute)
searched_element = next(
(element for element in parent.findall(desired_xml_element.tag)
if element.get(desired_xml_element.identifying_attribute) == identifying_attribute_value), None)
if searched_element is not None:
for key, value in desired_xml_element.attributes.items():
if searched_element.get(key) != value:
searched_element.set(key, value)
self.updated = True
return searched_element
else:
new_element = ElementTree.SubElement(parent, desired_xml_element.tag, desired_xml_element.attributes)
self.updated = True
return new_element
PyCharmConfigUpdater().update_pycharm_config(
update_testrunner_to_pytest=True,
exclude_cache_and_build_directories=True
)
```
|
{
"source": "jendrikjoe/flask-dynamo",
"score": 3
}
|
#### File: flask-dynamo/flask_dynamo/manager.py
```python
from os import environ
from boto3.session import Session
from flask import current_app
from .errors import ConfigurationError
class DynamoLazyTables(object):
"""Manages access to Dynamo Tables."""
def __init__(self, connection, table_config):
self._table_config = table_config
self._connection = connection
def __getitem__(self, name):
"""Get the connection for a table by name."""
return self._connection.Table(name)
def keys(self):
"""The table names in our config."""
return [t['TableName'] for t in self._table_config]
def len(self):
"""The number of tables we are configured for."""
return len(self.keys())
def items(self):
"""The table tuples (name, connection.Table())."""
for table_name in self.keys():
yield (table_name, self[table_name])
def _wait(self, table_name, type_waiter):
waiter = self._connection.meta.client.get_waiter(type_waiter)
waiter.wait(TableName=table_name)
def scan(self, name):
"""Scan a table by name."""
scan = self.connection.Table(name).scan()
return scan
def wait_exists(self, table_name):
self._wait(table_name, 'table_exists')
def wait_not_exists(self, table_name):
self._wait(table_name, 'table_not_exists')
def create_all(self, wait=False):
tables_name_list = [table.name for table in self._connection.tables.all()]
for table in self._table_config:
if table['TableName'] not in tables_name_list:
self._connection.create_table(**table)
if wait:
for table in self._table_config:
if table['TableName'] not in tables_name_list:
self.wait_exists(table['TableName'])
def destroy_all(self, wait=False):
for table in self._table_config:
table = self._connection.Table(table['TableName'])
table.delete()
if wait:
for table in self._table_config:
self.wait_not_exists(table['TableName'])
class Dynamo(object):
"""DynamoDB engine manager."""
DEFAULT_REGION = 'us-east-1'
def __init__(self, app=None):
"""
Initialize this extension.
:param obj app: The Flask application (optional).
"""
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
"""
Initialize this extension.
:param obj app: The Flask application.
"""
self._init_settings(app)
self._check_settings(app)
app.extensions['dynamo'] = self
conn = self._connection(app=app)
self.tables = DynamoLazyTables(conn, app.config['DYNAMO_TABLES'])
@staticmethod
def _init_settings(app):
"""Initialize all of the extension settings."""
app.config.setdefault('DYNAMO_SESSION', None)
app.config.setdefault('DYNAMO_TABLES', [])
app.config.setdefault('DYNAMO_ENABLE_LOCAL', environ.get('DYNAMO_ENABLE_LOCAL', False))
app.config.setdefault('DYNAMO_LOCAL_HOST', environ.get('DYNAMO_LOCAL_HOST', None))
app.config.setdefault('DYNAMO_LOCAL_PORT', environ.get('DYNAMO_LOCAL_PORT', None))
app.config.setdefault('AWS_ACCESS_KEY_ID', environ.get('AWS_ACCESS_KEY_ID', None))
app.config.setdefault('AWS_SECRET_ACCESS_KEY', environ.get('AWS_SECRET_ACCESS_KEY', None))
app.config.setdefault('AWS_SESSION_TOKEN', environ.get('AWS_SESSION_TOKEN', None))
app.config.setdefault('AWS_REGION', environ.get('AWS_REGION', Dynamo.DEFAULT_REGION))
@staticmethod
def _check_settings(app):
"""
Check all user-specified settings to ensure they're correct.
We'll raise an error if something isn't configured properly.
:raises: ConfigurationError
"""
if app.config['AWS_ACCESS_KEY_ID'] and not app.config['AWS_SECRET_ACCESS_KEY']:
raise ConfigurationError('You must specify AWS_SECRET_ACCESS_KEY if you are specifying AWS_ACCESS_KEY_ID.')
if app.config['AWS_SECRET_ACCESS_KEY'] and not app.config['AWS_ACCESS_KEY_ID']:
raise ConfigurationError('You must specify AWS_ACCESS_KEY_ID if you are specifying AWS_SECRET_ACCESS_KEY.')
if app.config['DYNAMO_ENABLE_LOCAL'] and not (app.config['DYNAMO_LOCAL_HOST'] and app.config['DYNAMO_LOCAL_PORT']):
raise ConfigurationError('If you have enabled Dynamo local, you must specify the host and port.')
def _get_app(self):
"""
Helper method that implements the logic to look up an application.
pass
"""
if current_app:
return current_app
if self.app is not None:
return self.app
raise RuntimeError(
'application not registered on dynamo instance and no application'
'bound to current context'
)
@staticmethod
def _get_ctx(app):
"""
Gets the dyanmo app context state.
"""
try:
return app.extensions['dynamo']
except KeyError:
raise RuntimeError(
'flask-dynamo extension not registered on flask app'
)
@staticmethod
def _init_session(app):
session_kwargs = {}
# Only apply if manually specified: otherwise, we'll let boto
# figure it out (boto will sniff for ec2 instance profile
# credentials).
if app.config['AWS_ACCESS_KEY_ID']:
session_kwargs['aws_access_key_id'] = app.config['AWS_ACCESS_KEY_ID']
if app.config['AWS_SECRET_ACCESS_KEY']:
session_kwargs['aws_secret_access_key'] = app.config['AWS_SECRET_ACCESS_KEY']
if app.config['AWS_SESSION_TOKEN']:
session_kwargs['aws_session_token'] = app.config['AWS_SESSION_TOKEN']
if app.config['AWS_REGION']:
session_kwargs['region_name'] = app.config['AWS_REGION']
return Session(**session_kwargs)
def _session(self, app=None):
if not app:
app = self._get_app()
ctx = self._get_ctx(app)
try:
return ctx._session_instance
except AttributeError:
ctx._session_instance = app.config['DYNAMO_SESSION'] or self._init_session(app)
return ctx._session_instance
@property
def session(self):
"""
Our DynamoDB session.
This will be lazily created if this is the first time this is being
accessed. This session is reused for performance.
"""
return self._session()
def _connection(self, app=None):
if not app:
app = self._get_app()
ctx = self._get_ctx(app)
try:
return ctx._connection_instance
except AttributeError:
client_kwargs = {}
local = True if app.config['DYNAMO_ENABLE_LOCAL'] else False
if local:
client_kwargs['endpoint_url'] = 'http://{}:{}'.format(
app.config['DYNAMO_LOCAL_HOST'],
app.config['DYNAMO_LOCAL_PORT'],
)
ctx._connection_instance = self._session(app=app).resource('dynamodb', **client_kwargs)
return ctx._connection_instance
@property
def connection(self):
"""
Our DynamoDB connection.
This will be lazily created if this is the first time this is being
accessed. This connection is reused for performance.
"""
return self._connection()
def get_table(self, table_name):
return self.tables[table_name]
def create_all(self, wait=False):
"""
Create all user-specified DynamoDB tables.
We'll ignore table(s) that already exists.
We'll error out if the tables can't be created for some reason.
"""
self.tables.create_all(wait=wait)
def destroy_all(self, wait=False):
"""
Destroy all user-specified DynamoDB tables.
We'll error out if the tables can't be destroyed for some reason.
"""
self.tables.destroy_all(wait=wait)
```
|
{
"source": "jendrikjoe/vscode-importmagic",
"score": 2
}
|
#### File: pythonFiles/src/extended_isort.py
```python
import os
import locale
from difflib import unified_diff, SequenceMatcher
# from pathlib import Path
try:
from pathlib import Path
except ImportError:
# Python 2 backport
from pathlib2 import Path
from isort import settings
from isort.isort import _SortImports
from isort.compat import get_settings_path, resolve, \
determine_file_encoding, read_file_contents
class SortImportsException(Exception):
pass
class ExtendedSortImports(object):
def __init__(self, file_path, settings_path):
self._file_path = file_path
self._settings_path = settings_path
self._import_candidates = []
self.output = None
def add_import(self, from_, module_=None):
self._import_candidates.append('from %s import %s' % (from_, module_) \
if module_ is not None else 'import %s' % from_)
def get_diff(self, **setting_overrides):
# Follow code is a modified part of isort.compat.SortImports
run_path=''
check_skip=True
if not self._file_path:
return []
file_path = Path(self._file_path)
settings_path = None if self._settings_path is None else \
Path(self._settings_path)
self.config = settings.prepare_config(
get_settings_path(settings_path, file_path), **setting_overrides)
# Add custom import
self.config['add_imports'] = self.config['add_imports'] or []
for c in self._import_candidates:
self.config['add_imports'].append(c)
absolute_file_path = resolve(file_path)
file_name = None
if check_skip:
if run_path and run_path in absolute_file_path.parents:
file_name = os.path.relpath(absolute_file_path, run_path)
else:
file_name = str(absolute_file_path)
run_path = ''
if settings.file_should_be_skipped(file_name, self.config, run_path):
raise SortImportsException(
"%s was skipped as it's listed in 'skip' setting or "
"matches a glob in 'skip_glob' setting" % \
absolute_file_path)
preferred_encoding = determine_file_encoding(absolute_file_path)
fallback_encoding = locale.getpreferredencoding(False)
file_contents, used_encoding = read_file_contents(
absolute_file_path, encoding=preferred_encoding,
fallback_encoding=fallback_encoding)
if used_encoding is None:
raise SortImportsException(
"%s was skipped as it couldn't be opened with the given "
"%s encoding or %s fallback encoding" % (
str(absolute_file_path), preferred_encoding,
fallback_encoding))
if file_contents is None or ("isort:" + "skip_file") in file_contents:
return []
extension = file_name.split('.')[-1] if file_name else "py"
self.sorted_imports = _SortImports(file_contents=file_contents,
config=self.config,
extension=extension)
self.output = self.sorted_imports.output
# END. compare file_contents vs self.output
return self._show_diff(file_contents)
def _show_diff(self, file_contents):
diff_commands = []
s1 = file_contents.splitlines(1)
s2 = self.output.splitlines(1)
if s2[-1].endswith('\n') and not s1[-1].endswith('\n'):
s1[-1] += '\n'
# Parse our diff
matcher = SequenceMatcher(None, s1, s2)
for tag, i1, i2, j1, j2 in reversed(matcher.get_opcodes()):
if tag == 'delete':
diff_commands.append({
'action': 'delete',
'start': i1,
'end': i2,
'text': None
})
elif tag == 'insert':
diff_commands.append({
'action': 'insert',
'start': i1,
'end': i2,
'text': ''.join(s2[j1:j2])
})
elif tag == 'replace':
diff_commands.append({
'action': 'replace',
'start': i1,
'end': i2,
'text': ''.join(s2[j1:j2])
})
return diff_commands
```
#### File: pythonFiles/src/extension.py
```python
import os
import importmagic
from isort.settings import WrapModes
from src import WarningException
from src.extended_isort import ExtendedSortImports
from src.index_manager import IndexManager
from src.indexer import DirIndexer, FileIndexer
class Extension(object):
def __init__(self):
self._inited = False
self._style_multiline = None
self._style_max_columns = None
self._style_indent_with_tabs = None
self._workspace_path = None # .isort.cfg could be placed there
self._paths = []
self._skip_tests = True
self._temp_path = None
self._index_manager = None
@property
def style_multiline(self):
return self._style_multiline
@style_multiline.setter
def style_multiline(self, value):
if value in (None, 'backslash', 'parentheses'):
self._style_multiline = value
@property
def style_max_columns(self):
return self._style_max_columns
@style_max_columns.setter
def style_max_columns(self, value):
if value is None or isinstance(value, int):
self._style_max_columns = value
elif isinstance(value, str) and value.isnumeric():
self._style_max_columns = int(value)
@property
def style_indent_with_tabs(self):
return self._style_indent_with_tabs
@style_indent_with_tabs.setter
def style_indent_with_tabs(self, value):
if value is None or isinstance(value, bool):
self._style_indent_with_tabs = value
@property
def paths(self):
return self._paths
@paths.setter
def paths(self, value):
if not isinstance(value, list):
raise TypeError('Paths must be list')
self._paths = value
@property
def workspace_path(self):
return self._workspace_path
@workspace_path.setter
def workspace_path(self, value):
self._workspace_path = value
@property
def skip_tests(self):
return self._skip_tests
@skip_tests.setter
def skip_tests(self, value):
self._skip_tests = bool(value)
@property
def temp_path(self):
return self._temp_path
@temp_path.setter
def temp_path(self, value):
self._temp_path = value
def notify_progress(self, text):
self._success_response(progress=text)
def _cmd_configure(self, **kwargs):
if self._inited:
raise Exception('Restart to reconfigure it')
self.paths = kwargs.get('paths', [])
self.skip_tests = bool(kwargs.get('skipTest', True))
self.temp_path = kwargs.get('tempPath')
self.workspace_path = kwargs.get('workspacePath')
style_settings = kwargs.get('style', {})
self.style_multiline = style_settings.get('multiline')
self.style_max_columns = style_settings.get('maxColumns')
self.style_indent_with_tabs = style_settings.get('indentWithTabs')
if not self.temp_path:
raise ValueError('Empty temp_path')
if not self.paths and os.path.exists(self.workspace_path):
self.paths.append(self.workspace_path)
self._inited = True
self.notify_progress('Index checking in progress...')
self._index_manager = IndexManager(
self, kwargs.get('workspaceName', 'default'))
if not self._index_manager.open():
self._cmd_rebuild_index()
def _report_scan_progress(self, value):
self.notify_progress('Scan files... %i' % value)
def _cmd_change_files(self, files, **kwargs):
#pylint: disable=unused-argument
if not self._inited:
raise Exception('Run configure() at first')
# When __init__.py was changed we should rescan the all packages
# which placed under it. We will be use pathes as target prefixes
prefiexes = []
for f in list(files):
prefiexes.append(f)
basename = os.path.basename(f)
if basename == '__init__.py':
parts = f.split(os.path.sep)
if len(parts) > 1:
package_path = os.path.sep.join(parts[:-1])
prefiexes.append(package_path)
idx = FileIndexer(self.paths, prefiexes, self.skip_tests)
idx.build(self._report_scan_progress)
self._index_manager.remove_from_index(idx)
self._index_manager.append_index(idx)
self._index_manager.commit(idx.total_files)
all_docs_count = self._index_manager.get_documents_count()
return dict(success=True, docs_count=all_docs_count)
def _cmd_rebuild_index(self, **kwargs):
#pylint: disable=unused-argument
if not self._inited:
raise Exception('Run configure() at first')
self.notify_progress('Rebuild index...')
self._index_manager.recreate_index()
idx = DirIndexer(self.paths, self.skip_tests)
idx.build(self._report_scan_progress)
total_items = idx.get_power() or 1
def report_listener2(value):
v = value * 100 / total_items
self.notify_progress('Indexing... %i%%' % int(v))
self._index_manager.append_index(idx, report_listener2)
self.notify_progress('Save index file...')
self._index_manager.commit(idx.total_files)
all_docs_count = self._index_manager.get_documents_count()
return dict(success=True, docs_count=all_docs_count)
def _cmd_get_symbols(self, text, **kwargs):
#pylint: disable=unused-argument
if not self._inited:
raise Exception('Run configure() at first')
if len(text) < 2:
raise WarningException('You should find at least 2-symbols text')
results = []
for f in self._index_manager.search(text):
results.append(dict(
symbol=f['symbol'],
module=f['module'],
kind=f['kind']
))
return dict(items=results)
def _cmd_insert_import(self, **kwargs):
if not self._inited:
raise Exception('Run configure() at first')
source_file = kwargs.get('sourceFile')
module = kwargs.get('module')
symbol = kwargs.get('symbol') # Always present
if not source_file:
raise WarningException('Empty sourceFile')
isort = ExtendedSortImports(source_file, self.workspace_path)
if not module:
isort.add_import(symbol)
else:
isort.add_import(module, symbol)
params = {'verbose': False}
if self.style_max_columns is not None:
params['line_length'] = self.style_max_columns
if self.style_multiline == 'backslash':
params['use_parentheses'] = False
params['multi_line_output'] = WrapModes.HANGING_INDENT
if self.style_multiline == 'parentheses':
params['use_parentheses'] = True
params['multi_line_output'] = WrapModes.GRID
if self.style_indent_with_tabs is not None:
params['indent'] = '\t' if self.style_indent_with_tabs else ' '*4
diff = isort.get_diff(**params)
return dict(diff=diff)
def _cmd_import_suggestions(self, **kwargs):
if not self._inited:
raise Exception('Run configure() at first')
source_file = kwargs.get('sourceFile')
unresolved_name = kwargs.get('unresolvedName')
if len(unresolved_name) < 2:
raise WarningException('You should find at least 2-symbols text')
if not source_file:
raise WarningException('Empty sourceFile')
if not unresolved_name:
raise WarningException('Empty unresolvedName')
with open(source_file, 'r') as fd:
python_source = fd.read()
scope = importmagic.Scope.from_source(python_source)
_unresolved, _unreferenced = \
scope.find_unresolved_and_unreferenced_symbols()
# Sometimes unresolved may contain "sys.path".
# Split this cases for find "sys.path", "sys" and "path"
unresolved = set()
for item1 in _unresolved:
for item2 in item1.split('.'):
unresolved.add(item2)
if unresolved_name not in unresolved:
return dict(items=[])
results = []
for f in self._index_manager.search(unresolved_name):
results.append(dict(
symbol=f['symbol'],
module=f['module'],
kind=f['kind']
))
return dict(items=results)
_COMMANDS = {
'configure': _cmd_configure,
'changeFiles': _cmd_change_files,
'rebuildIndex': _cmd_rebuild_index,
'getSymbols': _cmd_get_symbols,
'insertImport': _cmd_insert_import,
'importSuggestions': _cmd_import_suggestions
}
```
#### File: pythonFiles/src/schema.py
```python
from whoosh.analysis import Filter, LowercaseFilter, StandardAnalyzer, \
NgramFilter
from whoosh.analysis.tokenizers import IDTokenizer
from whoosh.fields import NUMERIC, STORED, SchemaClass, TEXT
class LodashFilter(Filter):
def __call__(self, tokens):
for t in tokens:
t.text = t.text.replace('_', '')
yield t
simple_ana = IDTokenizer() | LowercaseFilter() | LodashFilter()
custom_ana = StandardAnalyzer(stoplist=None) | LodashFilter()
# | NgramFilter(minsize=2, maxsize=5, at='start')
# The sort problems with NgramFilter: less relevant artefacts will be first
class IndexSchema(SchemaClass):
filename = TEXT(stored=True, analyzer=simple_ana)
symbol = TEXT(stored=True, analyzer=custom_ana)
module = TEXT(stored=True, analyzer=simple_ana)
location = STORED()
kind = STORED()
sort = NUMERIC(sortable=True)
```
#### File: pythonFiles/src/symbol_index.py
```python
import os
from contextlib import contextmanager
from importmagic import SymbolIndex
class SymbolIndexAccelerator(object):
def path(self):
if not hasattr(self, '_cached_path'):
path = []
node = self
while node and node._name:
path.append(node._name)
node = node._parent
setattr(self, '_cached_path', '.'.join(reversed(path)))
return getattr(self, '_cached_path')
def depth(self):
if not hasattr(self, '_cached_depth'):
depth = 0
node = self
while node._parent:
depth += 1
node = node._parent
setattr(self, '_cached_depth', depth)
return getattr(self, '_cached_depth')
class ExtendedSymbolIndex(SymbolIndex, SymbolIndexAccelerator):
"""
Extend base class for keep a filename
"""
def __init__(self, name=None, parent=None, score=1.0, location='L',
blacklist_re=None, locations=None, filename=None,
manager=None):
self.filename = filename
self.manager = manager
super().__init__(name, parent, score,
location, blacklist_re, locations)
def get_power(self):
items_count = 0
for subscope in list(self._tree.values()):
items_count += 1
if type(subscope) is not float:
items_count += subscope.get_power()
return items_count
def build_index(self):
super().build_index(self.manager.paths)
def index_file(self, module, filename):
location = self._determine_location_for(filename)
self.manager.total_files += 1
if self.manager.blacklist_re.search(filename):
return
if self.manager.target_prefixes is not None:
ok = False
for test_file in self.manager.target_prefixes:
if filename.startswith(test_file):
ok = True
break
if not ok:
return
self.manager.affected_files.add(filename)
# logger.debug('parsing Python module %s for indexing', filename)
with open(filename, 'rb') as fd:
source = fd.read()
with self.enter(module,
location=location, # self._determine_location_for(filename),
filename=filename) as subtree:
success = subtree.index_source(filename, source)
if not success:
self._tree.pop(module, None)
def _index_package(self, root, location):
root_filename = os.path.join(root, '__init__.py')
basename = os.path.basename(root)
with self.enter(basename, location=location,
filename=root_filename) as subtree:
for filename in os.listdir(root):
subtree.index_path(os.path.join(root, filename))
@contextmanager
def enter(self, name, location='L', score=1.0, filename=None):
if name is None:
tree = self
else:
tree = self._tree.get(name)
if not isinstance(tree, SymbolIndex):
tree = self._tree[name] = ExtendedSymbolIndex(name, self,
score=score, location=location, filename=filename,
manager=self.manager)
if tree.path() in SymbolIndex._PACKAGE_ALIASES:
alias_path, _ = SymbolIndex._PACKAGE_ALIASES[tree.path()]
alias = self.find(alias_path)
alias._tree = tree._tree
yield tree
if tree._exports is not None:
# Delete unexported variables. But keeps submodules
for key in set(tree._tree) - set(tree._exports):
value = tree._tree.get(key)
if value is None or type(value) is float:
del tree._tree[key]
```
|
{
"source": "jendrusk/osm_report_addr",
"score": 2
}
|
#### File: jendrusk/osm_report_addr/webpage.py
```python
from flask import Flask, render_template, request
import locdb
import config
app = Flask(__name__)
def create_josm_list(rep):
res_lst = list()
for feat in rep:
if feat["type"] == "node":
obj = "n"+str(feat["osm_id"])
elif feat["type"] == "way":
obj = "w"+str(feat["osm_id"])
elif feat["type"] == "relation":
obj = "r"+str(feat["osm_id"])
res_lst.append(obj)
return ",".join(res_lst)
@app.route('/')
def main_list():
htdata = locdb.select_all(limit=100)
header = "Ostatnie 100 changesetów z uszkodzonymi adresami"
colnames = ["osm_id", "osm_user", "osm_changeset", "visits", "reason", "checks"]
rows = htdata
return render_template("main.html",
header = header,
colnames = colnames,
rows =rows,
reason_dict=config.reason_dict)
@app.route('/changeset/<chgs_id>')
def changeset_report(chgs_id):
referer = request.headers.get("Referer")
locdb.add_visit(chgs_id,referer)
visits = locdb.select_visits_grouped(chgs_id)
htdata = locdb.select_changeset(chgs_id)
colnames = ["osm_id", "reason", "checks"]
all_obj = create_josm_list(htdata)
return render_template("changeset.html",
rows=htdata,
colnames=colnames,
reason_dict=config.reason_dict,
all_obj=all_obj,
visits=visits)
if __name__ == "__main__":
app.run(host="127.0.0.1", port="5000")
```
|
{
"source": "jene4ekjene4ek/my_mlflow",
"score": 3
}
|
#### File: examples/multistep_workflow/etl_data.py
```python
import tempfile
import os
import pyspark
import mlflow
import click
@click.command(help="Given a CSV file (see load_raw_data), transforms it into Parquet "
"in an mlflow artifact called 'ratings-parquet-dir'")
@click.option("--ratings-csv")
@click.option("--max-row-limit", default=10000,
help="Limit the data size to run comfortably on a laptop.")
def etl_data(ratings_csv, max_row_limit):
with mlflow.start_run() as mlrun:
tmpdir = tempfile.mkdtemp()
ratings_parquet_dir = os.path.join(tmpdir, 'ratings-parquet')
spark = pyspark.sql.SparkSession.builder.getOrCreate()
print("Converting ratings CSV %s to Parquet %s" % (ratings_csv, ratings_parquet_dir))
ratings_df = spark.read \
.option("header", "true") \
.option("inferSchema", "true") \
.csv(ratings_csv) \
.drop("timestamp") # Drop unused column
ratings_df.show()
if max_row_limit != -1:
ratings_df = ratings_df.limit(max_row_limit)
ratings_df.write.parquet(ratings_parquet_dir)
print("Uploading Parquet ratings: %s" % ratings_parquet_dir)
mlflow.log_artifacts(ratings_parquet_dir, "ratings-parquet-dir")
if __name__ == '__main__':
etl_data()
```
#### File: examples/multistep_workflow/load_raw_data.py
```python
import requests
import tempfile
import os
import zipfile
import pyspark
import mlflow
import click
@click.command(help="Downloads the MovieLens dataset and saves it as an mlflow artifact "
" called 'ratings-csv-dir'.")
@click.option("--url", default="http://files.grouplens.org/datasets/movielens/ml-20m.zip")
def load_raw_data(url):
with mlflow.start_run() as mlrun:
local_dir = tempfile.mkdtemp()
local_filename = os.path.join(local_dir, "ml-20m.zip")
print("Downloading %s to %s" % (url, local_filename))
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
extracted_dir = os.path.join(local_dir, 'ml-20m')
print("Extracting %s into %s" % (local_filename, extracted_dir))
with zipfile.ZipFile(local_filename, 'r') as zip_ref:
zip_ref.extractall(local_dir)
ratings_file = os.path.join(extracted_dir, 'ratings.csv')
print("Uploading ratings: %s" % ratings_file)
mlflow.log_artifact(ratings_file, "ratings-csv-dir")
if __name__ == '__main__':
load_raw_data()
```
#### File: mlflow/deployments/plugin_manager.py
```python
import abc
import inspect
import entrypoints
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import RESOURCE_DOES_NOT_EXIST, INTERNAL_ERROR
from mlflow.deployments.base import BaseDeploymentClient
from mlflow.deployments.utils import parse_target_uri
# TODO: refactor to have a common base class for all the plugin implementation in MLFlow
# mlflow/tracking/context/registry.py
# mlflow/tracking/registry
# mlflow/store/artifact/artifact_repository_registry.py
class PluginManager(abc.ABC):
"""
Abstract class defining a entrypoint based plugin registration.
This class allows the registration of a function or class to provide an implementation
for a given key/name. Implementations declared though the entrypoints can be automatically
registered through the `register_entrypoints` method.
"""
@abc.abstractmethod
def __init__(self, group_name):
self._registry = {}
self.group_name = group_name
self._has_registered = None
@abc.abstractmethod
def __getitem__(self, item):
# Letting the child class create this function so that the child
# can raise custom exceptions if it needs to
pass
@property
def registry(self):
"""
Registry stores the registered plugin as a key value pair where key is the
name of the plugin and value is the plugin object
"""
return self._registry
@property
def has_registered(self):
"""
Returns bool representing whether the "register_entrypoints" has run or not. This
doesn't return True if `register` method is called outside of `register_entrypoints`
to register plugins
"""
return self._has_registered
def register_entrypoints(self):
"""
Runs through all the packages that has the `group_name` defined as the entrypoint
and register that into the registry
"""
for entrypoint in entrypoints.get_group_all(self.group_name):
self.registry[entrypoint.name] = entrypoint
self._has_registered = True
class DeploymentPlugins(PluginManager):
def __init__(self):
super().__init__('mlflow.deployments')
self.register_entrypoints()
def __getitem__(self, item):
"""Override __getitem__ so that we can directly look up plugins via dict-like syntax"""
try:
target_name = parse_target_uri(item)
plugin_like = self.registry[target_name]
except KeyError:
msg = 'No plugin found for managing model deployments to "{target}". ' \
'In order to deploy models to "{target}", find and install an appropriate ' \
'plugin from ' \
'https://mlflow.org/docs/latest/plugins.html#community-plugins using ' \
'your package manager (pip, conda etc).'.format(target=item)
raise MlflowException(msg, error_code=RESOURCE_DOES_NOT_EXIST)
if isinstance(plugin_like, entrypoints.EntryPoint):
try:
plugin_obj = plugin_like.load()
except (AttributeError, ImportError) as exc:
raise RuntimeError(
'Failed to load the plugin "{}": {}'.format(item, str(exc)))
self.registry[item] = plugin_obj
else:
plugin_obj = plugin_like
# Testing whether the plugin is valid or not
expected = {'target_help', 'run_local'}
deployment_classes = []
for name, obj in inspect.getmembers(plugin_obj):
if name in expected:
expected.remove(name)
elif inspect.isclass(obj) and \
issubclass(obj, BaseDeploymentClient) and \
not obj == BaseDeploymentClient:
deployment_classes.append(name)
if len(expected) > 0:
raise MlflowException("Plugin registered for the target {} does not has all "
"the required interfaces. Raise an issue with the "
"plugin developers.\n"
"Missing interfaces: {}".format(item, expected),
error_code=INTERNAL_ERROR)
if len(deployment_classes) > 1:
raise MlflowException("Plugin registered for the target {} has more than one "
"child class of BaseDeploymentClient. Raise an issue with"
" the plugin developers. "
"Classes found are {}".format(item, deployment_classes))
elif len(deployment_classes) == 0:
raise MlflowException("Plugin registered for the target {} has no child class"
" of BaseDeploymentClient. Raise an issue with the "
"plugin developers".format(item))
return plugin_obj
```
#### File: db_migrations/versions/2b4d017a5e9b_add_model_registry_tables_to_db.py
```python
import time
import logging
from alembic import op
import sqlalchemy as sa
from alembic import op
from sqlalchemy import orm, func, distinct, and_
from sqlalchemy import (
Column, String, ForeignKey, Float, Integer,
BigInteger, PrimaryKeyConstraint, Boolean)
from mlflow.entities.model_registry.model_version_stages import STAGE_NONE
from mlflow.entities.model_registry.model_version_status import ModelVersionStatus
from mlflow.store.model_registry.dbmodels.models import SqlRegisteredModel, SqlModelVersion
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.INFO)
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '89d4b8295536'
branch_labels = None
depends_on = None
def upgrade():
bind = op.get_bind()
session = orm.Session(bind=bind)
_logger.info("Adding registered_models and model_versions tables to database.")
op.create_table(SqlRegisteredModel.__tablename__,
Column('name', String(256), unique=True, nullable=False),
Column('creation_time', BigInteger, default=lambda: int(time.time() * 1000)),
Column('last_updated_time', BigInteger, nullable=True, default=None),
Column('description', String(5000), nullable=True),
PrimaryKeyConstraint('name', name='registered_model_pk')
)
op.create_table(SqlModelVersion.__tablename__,
Column('name', String(256), ForeignKey('registered_models.name',
onupdate='cascade')),
Column('version', Integer, nullable=False),
Column('creation_time', BigInteger, default=lambda: int(time.time() * 1000)),
Column('last_updated_time', BigInteger, nullable=True, default=None),
Column('description', String(5000), nullable=True),
Column('user_id', String(256), nullable=True, default=None),
Column('current_stage', String(20), default=STAGE_NONE),
Column('source', String(500), nullable=True, default=None),
Column('run_id', String(32), nullable=False),
Column('status', String(20),
default=ModelVersionStatus.to_string(ModelVersionStatus.READY)),
Column('status_message', String(500), nullable=True, default=None),
PrimaryKeyConstraint('name', 'version', name='model_version_pk')
)
session.commit()
_logger.info("Migration complete!")
def downgrade():
op.drop_table(SqlRegisteredModel.__tablename__)
op.drop_table(SqlModelVersion.__tablename__)
```
#### File: mlflow/types/utils.py
```python
from typing import Any
import numpy as np
import pandas as pd
from mlflow.exceptions import MlflowException
from mlflow.types import DataType
from mlflow.types.schema import Schema, ColSpec
class TensorsNotSupportedException(MlflowException):
def __init__(self, msg):
super().__init__("Multidimensional arrays (aka tensors) are not supported. "
"{}".format(msg))
def _infer_schema(data: Any) -> Schema:
"""
Infer an MLflow schema from a dataset.
This method captures the column names and data types from the user data. The signature
represents model input and output as data frames with (optionally) named columns and data
type specified as one of types defined in :py:class:`DataType`. This method will raise
an exception if the user data contains incompatible types or is not passed in one of the
supported formats (containers).
The input should be one of these:
- pandas.DataFrame or pandas.Series
- dictionary of { name -> numpy.ndarray}
- numpy.ndarray
- pyspark.sql.DataFrame
The element types should be mappable to one of :py:class:`mlflow.models.signature.DataType`.
NOTE: Multidimensional (>2d) arrays (aka tensors) are not supported at this time.
:param data: Dataset to infer from.
:return: Schema
"""
if isinstance(data, dict):
res = []
for col in data.keys():
ary = data[col]
if not isinstance(ary, np.ndarray):
raise TypeError("Data in the dictionary must be of type numpy.ndarray")
dims = len(ary.shape)
if dims == 1:
res.append(ColSpec(type=_infer_numpy_array(ary), name=col))
else:
raise TensorsNotSupportedException("Data in the dictionary must be 1-dimensional, "
"got shape {}".format(ary.shape))
return Schema(res)
elif isinstance(data, pd.Series):
return Schema([ColSpec(type=_infer_numpy_array(data.values))])
elif isinstance(data, pd.DataFrame):
return Schema([ColSpec(type=_infer_numpy_array(data[col].values), name=col)
for col in data.columns])
elif isinstance(data, np.ndarray):
if len(data.shape) > 2:
raise TensorsNotSupportedException("Attempting to infer schema from numpy array with "
"shape {}".format(data.shape))
if data.dtype == np.object:
data = pd.DataFrame(data).infer_objects()
return Schema([ColSpec(type=_infer_numpy_array(data[col].values))
for col in data.columns])
if len(data.shape) == 1:
return Schema([ColSpec(type=_infer_numpy_dtype(data.dtype))])
elif len(data.shape) == 2:
return Schema([ColSpec(type=_infer_numpy_dtype(data.dtype))] * data.shape[1])
elif _is_spark_df(data):
return Schema([ColSpec(type=_infer_spark_type(field.dataType), name=field.name)
for field in data.schema.fields])
raise TypeError("Expected one of (pandas.DataFrame, numpy array, "
"dictionary of (name -> numpy.ndarray), pyspark.sql.DataFrame) "
"but got '{}'".format(type(data)))
def _infer_numpy_dtype(dtype: np.dtype) -> DataType:
if not isinstance(dtype, np.dtype):
raise TypeError("Expected numpy.dtype, got '{}'.".format(type(dtype)))
if dtype.kind == "b":
return DataType.boolean
elif dtype.kind == "i" or dtype.kind == "u":
if dtype.itemsize < 4 or (dtype.kind == "i" and dtype.itemsize == 4):
return DataType.integer
elif dtype.itemsize < 8 or (dtype.kind == "i" and dtype.itemsize == 8):
return DataType.long
elif dtype.kind == "f":
if dtype.itemsize <= 4:
return DataType.float
elif dtype.itemsize <= 8:
return DataType.double
elif dtype.kind == "U":
return DataType.string
elif dtype.kind == "S":
return DataType.binary
elif dtype.kind == "O":
raise Exception("Can not infer np.object without looking at the values, call "
"_map_numpy_array instead.")
raise MlflowException("Unsupported numpy data type '{0}', kind '{1}'".format(
dtype, dtype.kind))
def _infer_numpy_array(col: np.ndarray) -> DataType:
if not isinstance(col, np.ndarray):
raise TypeError("Expected numpy.ndarray, got '{}'.".format(type(col)))
if len(col.shape) > 1:
raise MlflowException("Expected 1d array, got array with shape {}".format(col.shape))
class IsInstanceOrNone(object):
def __init__(self, *args):
self.classes = args
self.seen_instances = 0
def __call__(self, x):
if x is None:
return True
elif any(map(lambda c: isinstance(x, c), self.classes)):
self.seen_instances += 1
return True
else:
return False
if col.dtype.kind == "O":
is_binary_test = IsInstanceOrNone(bytes, bytearray)
if all(map(is_binary_test, col)) and is_binary_test.seen_instances > 0:
return DataType.binary
is_string_test = IsInstanceOrNone(str)
if all(map(is_string_test, col)) and is_string_test.seen_instances > 0:
return DataType.string
# NB: bool is also instance of int => boolean test must precede integer test.
is_boolean_test = IsInstanceOrNone(bool)
if all(map(is_boolean_test, col)) and is_boolean_test.seen_instances > 0:
return DataType.boolean
is_long_test = IsInstanceOrNone(int)
if all(map(is_long_test, col)) and is_long_test.seen_instances > 0:
return DataType.long
is_double_test = IsInstanceOrNone(float)
if all(map(is_double_test, col)) and is_double_test.seen_instances > 0:
return DataType.double
else:
raise MlflowException("Unable to map 'np.object' type to MLflow DataType. np.object can"
"be mapped iff all values have identical data type which is one "
"of (string, (bytes or byterray), int, float).")
else:
return _infer_numpy_dtype(col.dtype)
def _infer_spark_type(x) -> DataType:
import pyspark.sql.types
if isinstance(x, pyspark.sql.types.NumericType):
if isinstance(x, pyspark.sql.types.IntegralType):
if isinstance(x, pyspark.sql.types.LongType):
return DataType.long
else:
return DataType.integer
elif isinstance(x, pyspark.sql.types.FloatType):
return DataType.float
elif isinstance(x, pyspark.sql.types.DoubleType):
return DataType.double
elif isinstance(x, pyspark.sql.types.BooleanType):
return DataType.boolean
elif isinstance(x, pyspark.sql.types.StringType):
return DataType.string
elif isinstance(x, pyspark.sql.types.BinaryType):
return DataType.binary
else:
raise Exception("Unsupported Spark Type '{}', MLflow schema is only supported for scalar "
"Spark types.".format(type(x)))
def _is_spark_df(x) -> bool:
try:
import pyspark.sql.dataframe
return isinstance(x, pyspark.sql.dataframe.DataFrame)
except ImportError:
return False
```
#### File: tests/models/test_model_input_examples.py
```python
import json
import math
import numpy as np
import pandas as pd
import pytest
from mlflow.models.signature import infer_signature
from mlflow.models.utils import _Example
from mlflow.types.utils import TensorsNotSupportedException
from mlflow.utils.file_utils import TempDir
from mlflow.utils.proto_json_utils import _dataframe_from_json
@pytest.fixture
def pandas_df_with_all_types():
return pd.DataFrame({
"boolean": [True, False, True],
"integer": np.array([1, 2, 3], np.int32),
"long": np.array([1, 2, 3], np.int64),
"float": np.array([math.pi, 2 * math.pi, 3 * math.pi], np.float32),
"double": [math.pi, 2 * math.pi, 3 * math.pi],
"binary": [bytes([1, 2, 3]), bytes([4, 5, 6]), bytes([7, 8, 9])],
"string": ["a", "b", 'c'],
})
def test_input_examples(pandas_df_with_all_types):
sig = infer_signature(pandas_df_with_all_types)
# test setting example with data frame with all supported data types
with TempDir() as tmp:
example = _Example(pandas_df_with_all_types)
example.save(tmp.path())
filename = example.info["artifact_path"]
with open(tmp.path(filename), "r") as f:
data = json.load(f)
assert set(data.keys()) == set(("columns", "data"))
parsed_df = _dataframe_from_json(tmp.path(filename), schema=sig.inputs)
assert (pandas_df_with_all_types == parsed_df).all().all()
# the frame read without schema should match except for the binary values
assert (parsed_df.drop(columns=["binary"]) == _dataframe_from_json(tmp.path(filename))
.drop(columns=["binary"])).all().all()
# pass the input as dictionary instead
with TempDir() as tmp:
d = {name: pandas_df_with_all_types[name].values
for name in pandas_df_with_all_types.columns}
example = _Example(d)
example.save(tmp.path())
filename = example.info["artifact_path"]
parsed_df = _dataframe_from_json(tmp.path(filename), sig.inputs)
assert (pandas_df_with_all_types == parsed_df).all().all()
# input passed as numpy array
sig = infer_signature(pandas_df_with_all_types.values)
with TempDir() as tmp:
example = _Example(pandas_df_with_all_types.values)
example.save(tmp.path())
filename = example.info["artifact_path"]
with open(tmp.path(filename), "r") as f:
data = json.load(f)
assert set(data.keys()) == set(("data",))
parsed_ary = _dataframe_from_json(tmp.path(filename), schema=sig.inputs).values
assert (pandas_df_with_all_types.values == parsed_ary).all().all()
# pass multidimensional array
with TempDir() as tmp:
example = np.array([[[1, 2, 3]]])
with pytest.raises(TensorsNotSupportedException):
_Example(example)
# pass multidimensional array
with TempDir() as tmp:
example = np.array([[1, 2, 3]])
with pytest.raises(TensorsNotSupportedException):
_Example({"x": example, "y": example})
# pass dict with scalars
with TempDir() as tmp:
example = {"a": 1, "b": "abc"}
x = _Example(example)
x.save(tmp.path())
filename = x.info["artifact_path"]
parsed_df = _dataframe_from_json(tmp.path(filename))
assert example == parsed_df.to_dict(orient="records")[0]
```
#### File: store/artifact/test_runs_artifact_repo.py
```python
import pytest
from mock import Mock
import mlflow
from mlflow.exceptions import MlflowException
from mlflow.store.artifact.runs_artifact_repo import RunsArtifactRepository
from mlflow.store.artifact.s3_artifact_repo import S3ArtifactRepository
@pytest.mark.parametrize("uri, expected_run_id, expected_artifact_path", [
('runs:/1234abcdf1394asdfwer33/path/to/model', '1234abcdf1394asdfwer33', 'path/to/model'),
('runs:/1234abcdf1394asdfwer33/path/to/model/', '1234abcdf1394asdfwer33', 'path/to/model/'),
('runs:/1234abcdf1394asdfwer33', '1234abcdf1394asdfwer33', None),
('runs:/1234abcdf1394asdfwer33/', '1234abcdf1394asdfwer33', None),
('runs:///1234abcdf1394asdfwer33/', '1234abcdf1394asdfwer33', None),
])
def test_parse_runs_uri_valid_input(uri, expected_run_id, expected_artifact_path):
(run_id, artifact_path) = RunsArtifactRepository.parse_runs_uri(uri)
assert run_id == expected_run_id
assert artifact_path == expected_artifact_path
@pytest.mark.parametrize("uri", [
'notruns:/1234abcdf1394asdfwer33/', # wrong scheme
'runs:/', # no run id
'runs:1234abcdf1394asdfwer33/', # missing slash
'runs://1234abcdf1394asdfwer33/', # hostnames are not yet supported
])
def test_parse_runs_uri_invalid_input(uri):
with pytest.raises(MlflowException):
RunsArtifactRepository.parse_runs_uri(uri)
def test_runs_artifact_repo_init():
artifact_location = "s3://blah_bucket/"
experiment_id = mlflow.create_experiment("expr_abc", artifact_location)
with mlflow.start_run(experiment_id=experiment_id):
run_id = mlflow.active_run().info.run_id
runs_uri = "runs:/%s/path/to/model" % run_id
runs_repo = RunsArtifactRepository(runs_uri)
assert runs_repo.artifact_uri == runs_uri
assert isinstance(runs_repo.repo, S3ArtifactRepository)
expected_absolute_uri = "%s%s/artifacts/path/to/model" % (artifact_location, run_id)
assert runs_repo.repo.artifact_uri == expected_absolute_uri
def test_runs_artifact_repo_uses_repo_download_artifacts():
"""
The RunsArtifactRepo should delegate `download_artifacts` to it's self.repo.download_artifacts
function
"""
artifact_location = "s3://blah_bucket/"
experiment_id = mlflow.create_experiment("expr_abcd", artifact_location)
with mlflow.start_run(experiment_id=experiment_id):
run_id = mlflow.active_run().info.run_id
runs_repo = RunsArtifactRepository('runs:/{}'.format(run_id))
runs_repo.repo = Mock()
runs_repo.download_artifacts('artifact_path', 'dst_path')
runs_repo.repo.download_artifacts.assert_called_once()
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.