metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jersearls/cpap-filler",
"score": 3
}
|
#### File: cpap-filler/calibrate/calibrate.py
```python
import requests
import fileinput
import time
import re
from os import fdopen, remove, getenv
from tempfile import mkstemp
from shutil import move
class Calibrate():
def __init__(self):
self.access_token = getenv("PARTICLE_ACCESS_TOKEN")
self.device_id = getenv("PARTICLE_DEVICE_ID")
def replace_env_var(self, pump_rate):
fh, abs_path = mkstemp()
with fdopen(fh,'w') as new_file:
with open('.env') as old_file:
for line in old_file:
new_file.write(re.sub(r'PUMP_RATE=(.*)', "PUMP_RATE={0}".format(pump_rate), line))
remove('.env')
move(abs_path, '.env')
def calculate_pump_rate(self, mL_dispensed):
rate = mL_dispensed / 30.0
return round(rate, 3)
def call_photon_pump_function(self, seconds):
particle_funtion= "Pump"
argument = seconds
address = 'https://api.particle.io/v1/devices/{0}/{1}'.format(self.device_id, particle_funtion)
data = {'args': argument, 'access_token': self.access_token}
post = requests.post(address, data=data)
def prime(self, input_prompt="Begin"):
if input_prompt == "Begin":
print("Priming process runs the pump for 10 seconds.")
print("Please place pump output hose into an empty vessel.")
user_ready = input("{0} pump priming? y/N: ".format(input_prompt))
if user_ready == "y":
self.call_photon_pump_function(10)
time.sleep(10)
self.prime("Continue")
def run(self):
self.prime()
print("Device will emit water for 30 seconds.")
print("Please place pump output hose into an EMPTY 50 mL graduated cylinder.")
user_ready = input("Ready to being calibration? y/N: ")
if user_ready == "y":
print("Pumping for: 30 seconds.")
self.call_photon_pump_function(30)
time.sleep(30)
mL_dispensed = float(input("Enter the number of mL dispensed: "))
pump_rate = self.calculate_pump_rate(mL_dispensed)
print("Current pump rate is {0} mL per second.".format(pump_rate))
self.replace_env_var(pump_rate)
else:
print("Calibration aborted")
Calibrate().run()
```
#### File: cpap-filler/cpap_filler/pumper.py
```python
import requests
import logging
import os
from scraper import Scraper
class Pumper():
def __init__(self):
self.access_token = os.getenv("PARTICLE_ACCESS_TOKEN")
self.device_id = os.getenv("PARTICLE_DEVICE_ID")
self.pump_rate = float(os.getenv("PUMP_RATE"))
self.consumption_rate = float(os.getenv("CONSUMPTION_RATE"))
self.scraper = Scraper()
self.logger = logging.getLogger(__name__)
def time_to_float(self, time_str):
hours, minutes = time_str.split(':')
return (int(hours)*60 + int(minutes)) / 60.0
def calculate_pump_time(self):
score = self.scraper.find_most_recent_score()
self.logger.info(score)
usage_in_hours = score["UsageDisplay"]
self.logger.info("Slept for {0} hours.".format(usage_in_hours))
pump_rate_per_second_in_mL = self.pump_rate
cpap_water_usage_per_hour_in_mL = self.consumption_rate
pump_seconds_per_usage_hour = cpap_water_usage_per_hour_in_mL / pump_rate_per_second_in_mL
usage_float = self.time_to_float(usage_in_hours)
pump_run_time = usage_float * pump_seconds_per_usage_hour
return round(pump_run_time)
def get_device_status(self):
address = 'https://api.particle.io/v1/devices/{0}'.format(self.device_id)
headers = {'Authorization':'Bearer {0}'.format(self.access_token)}
get = requests.get(address, headers=headers)
device_response = get.json()
return device_response['connected']
def call_photon_pump_function(self, seconds):
particle_funtion= "Pump"
argument = seconds
address = 'https://api.particle.io/v1/devices/{0}/{1}'.format(self.device_id, particle_funtion)
data = {'args': argument, 'access_token': self.access_token}
post = requests.post(address, data=data)
def run(self):
pump_seconds = self.calculate_pump_time()
if self.get_device_status() and pump_seconds != 0:
self.logger.info("Pumping for: {0} seconds.".format(pump_seconds))
self.call_photon_pump_function(pump_seconds)
elif not self.get_device_status():
self.logger.warn("Device not responding")
else:
self.logger.warn("CPAP not used previous night.")
```
|
{
"source": "jersey99/litedram",
"score": 2
}
|
#### File: litedram/phy/dfi.py
```python
from migen import *
from migen.genlib.record import *
from migen.genlib.cdc import PulseSynchronizer
from litedram.common import PhySettings
from litedram.phy.utils import Serializer, Deserializer
def phase_cmd_description(addressbits, bankbits, nranks):
return [
("address", addressbits, DIR_M_TO_S),
("bank", bankbits, DIR_M_TO_S),
("cas_n", 1, DIR_M_TO_S),
("cs_n", nranks, DIR_M_TO_S),
("ras_n", 1, DIR_M_TO_S),
("we_n", 1, DIR_M_TO_S),
("cke", nranks, DIR_M_TO_S),
("odt", nranks, DIR_M_TO_S),
("reset_n", 1, DIR_M_TO_S),
("act_n", 1, DIR_M_TO_S)
]
def phase_wrdata_description(databits):
return [
("wrdata", databits, DIR_M_TO_S),
("wrdata_en", 1, DIR_M_TO_S),
("wrdata_mask", databits//8, DIR_M_TO_S)
]
def phase_rddata_description(databits):
return [
("rddata_en", 1, DIR_M_TO_S),
("rddata", databits, DIR_S_TO_M),
("rddata_valid", 1, DIR_S_TO_M)
]
def phase_description(addressbits, bankbits, nranks, databits):
r = phase_cmd_description(addressbits, bankbits, nranks)
r += phase_wrdata_description(databits)
r += phase_rddata_description(databits)
return r
class Interface(Record):
def __init__(self, addressbits, bankbits, nranks, databits, nphases=1):
layout = [("p"+str(i), phase_description(addressbits, bankbits, nranks, databits)) for i in range(nphases)]
Record.__init__(self, layout)
self.phases = [getattr(self, "p"+str(i)) for i in range(nphases)]
for p in self.phases:
p.cas_n.reset = 1
p.cs_n.reset = (2**nranks-1)
p.ras_n.reset = 1
p.we_n.reset = 1
p.act_n.reset = 1
# Returns pairs (DFI-mandated signal name, Migen signal object)
def get_standard_names(self, m2s=True, s2m=True):
r = []
add_suffix = len(self.phases) > 1
for n, phase in enumerate(self.phases):
for field, size, direction in phase.layout:
if (m2s and direction == DIR_M_TO_S) or (s2m and direction == DIR_S_TO_M):
if add_suffix:
if direction == DIR_M_TO_S:
suffix = "_p" + str(n)
else:
suffix = "_w" + str(n)
else:
suffix = ""
r.append(("dfi_" + field + suffix, getattr(phase, field)))
return r
class Interconnect(Module):
def __init__(self, master, slave):
self.comb += master.connect(slave)
class DDR4DFIMux(Module):
def __init__(self, dfi_i, dfi_o):
for i in range(len(dfi_i.phases)):
p_i = dfi_i.phases[i]
p_o = dfi_o.phases[i]
self.comb += [
p_i.connect(p_o),
If(~p_i.ras_n & p_i.cas_n & p_i.we_n,
p_o.act_n.eq(0),
p_o.we_n.eq(p_i.address[14]),
p_o.cas_n.eq(p_i.address[15]),
p_o.ras_n.eq(p_i.address[16])
).Else(
p_o.act_n.eq(1),
)
]
class DFIRateConverter(Module):
"""Converts between DFI interfaces running at different clock frequencies
This module allows to convert DFI interface `phy_dfi` running at higher clock frequency
into a DFI interface running at `ratio` lower frequency. The new DFI has `ratio` more
phases and the commands on the following phases of the new DFI will be serialized to
following phases/clocks of `phy_dfi` (phases first, then clock cycles).
Data must be serialized/deserialized in such a way that a whole burst on `phy_dfi` is
sent in a single `clk` cycle. For this reason, the new DFI interface will have `ratio`
less databits. For example, with phy_dfi(nphases=2, databits=32) and ratio=4 the new
DFI will have nphases=8, databits=8. This results in 8*8=64 bits in `clkdiv` translating
into 2*32=64 bits in `clk`. This means that only a single cycle of `clk` per `clkdiv`
cycle carries the data (by default cycle 0). This can be modified by passing values
different than 0 for `write_delay`/`read_delay` and may be needed to properly align
write/read latency of the original PHY and the wrapper.
"""
def __init__(self, phy_dfi, *, clkdiv, clk, ratio, serdes_reset_cnt=-1, write_delay=0, read_delay=0):
assert len(phy_dfi.p0.wrdata) % ratio == 0
assert 0 <= write_delay < ratio, f"Data can be delayed up to {ratio} clk cycles"
assert 0 <= read_delay < ratio, f"Data can be delayed up to {ratio} clk cycles"
self.ser_latency = Serializer.LATENCY
self.des_latency = Deserializer.LATENCY
phase_params = dict(
addressbits = len(phy_dfi.p0.address),
bankbits = len(phy_dfi.p0.bank),
nranks = len(phy_dfi.p0.cs_n),
databits = len(phy_dfi.p0.wrdata) // ratio,
)
self.dfi = Interface(nphases=ratio * len(phy_dfi.phases), **phase_params)
wr_delayed = ["wrdata", "wrdata_mask"]
rd_delayed = ["rddata", "rddata_valid"]
for name, width, dir in phase_description(**phase_params):
# all signals except write/read
if name in wr_delayed + rd_delayed:
continue
# on each clk phase
for pi, phase_s in enumerate(phy_dfi.phases):
sig_s = getattr(phase_s, name)
assert len(sig_s) == width
# data from each clkdiv phase
sigs_m = []
for j in range(ratio):
phase_m = self.dfi.phases[pi + len(phy_dfi.phases)*j]
sigs_m.append(getattr(phase_m, name))
ser = Serializer(
clkdiv = clkdiv,
clk = clk,
i_dw = ratio*width,
o_dw = width,
i = Cat(sigs_m),
o = sig_s,
reset_cnt = serdes_reset_cnt,
name = name,
)
self.submodules += ser
# wrdata
for name, width, dir in phase_description(**phase_params):
if name not in wr_delayed:
continue
for pi, phase_s in enumerate(phy_dfi.phases):
sig_s = getattr(phase_s, name)
sig_m = Signal(len(sig_s) * ratio)
sigs_m = []
for j in range(ratio):
phase_m = self.dfi.phases[pi*ratio + j]
sigs_m.append(getattr(phase_m, name))
width = len(Cat(sigs_m))
self.comb += sig_m[write_delay*width:(write_delay+1)*width].eq(Cat(sigs_m))
o = Signal.like(sig_s)
ser = Serializer(
clkdiv = clkdiv,
clk = clk,
i_dw = len(sig_m),
o_dw = len(sig_s),
i = sig_m,
o = o,
reset_cnt = serdes_reset_cnt,
name = name,
)
self.submodules += ser
self.comb += sig_s.eq(o)
# rddata
for name, width, dir in phase_description(**phase_params):
if name not in rd_delayed:
continue
for pi, phase_s in enumerate(phy_dfi.phases):
sig_s = getattr(phase_s, name)
sig_m = Signal(ratio * len(sig_s))
sigs_m = []
for j in range(ratio):
phase_m = self.dfi.phases[pi*ratio + j]
sigs_m.append(getattr(phase_m, name))
des = Deserializer(
clkdiv = clkdiv,
clk = clk,
i_dw = len(sig_s),
o_dw = len(sig_m),
i = sig_s,
o = sig_m,
reset_cnt = serdes_reset_cnt,
name = name,
)
self.submodules += des
if name == "rddata_valid":
self.comb += Cat(sigs_m).eq(Replicate(sig_m[read_delay], ratio))
else:
out_width = len(Cat(sigs_m))
sig_m_window = sig_m[read_delay*out_width:(read_delay + 1)*out_width]
self.comb += Cat(sigs_m).eq(sig_m_window)
@classmethod
def phy_wrapper(cls, phy_cls, ratio, phy_attrs=None, clock_mapping=None, **converter_kwargs):
"""Generate a wrapper class for given PHY
Given PHY `phy_cls` a new Module is generated, which will instantiate `phy_cls` as a
submodule (self.submodules.phy), with DFIRateConverter used to convert its DFI. It will
recalculate `phy_cls` PhySettings to have correct latency values.
Parameters
----------
phy_cls : type
PHY class. It must support a `csr_cdc` argument (function: csr_cdc(Signal) -> Signal)
that it will use to wrap all CSR.re signals to avoid clock domain crossing problems.
ratio : int
Frequency ratio between the new DFI and the DFI of the wrapped PHY.
phy_attrs : list[str]
Names of PHY attributes to be copied to the wrapper (self.attr = self.phy.attr).
clock_mapping : dict[str, str]
Clock remapping for the PHY. Defaults to {"sys": f"sys{ratio}x"}.
converter_kwargs : Any
Keyword arguments forwarded to the DFIRateConverter instance.
"""
if ratio == 1:
return phy_cls
# Generate the wrapper class dynamically
name = f"{phy_cls.__name__}Wrapper"
bases = (Module, object, )
internal_cd = f"sys{ratio}x"
if clock_mapping is None:
clock_mapping = {"sys": internal_cd}
# Constructor
def __init__(self, *args, **kwargs):
# Add the PHY in new clock domain,
self.internal_cd = internal_cd
phy = phy_cls(*args, csr_cdc=self.csr_cdc, **kwargs)
# Remap clock domains in the PHY
# Workaround: do this in two steps to avoid errors due to the fact that renaming is done
# sequentially. Consider mapping {"sys": "sys2x", "sys2x": "sys4x"}, it would lead to:
# sys2x = sys
# sys4x = sys2x
# resulting in all sync operations in sys4x domain.
mapping = [tuple(i) for i in clock_mapping.items()]
map_tmp = {clk_from: f"tmp{i}" for i, (clk_from, clk_to) in enumerate(mapping)}
map_final = {f"tmp{i}": clk_to for i, (clk_from, clk_to) in enumerate(mapping)}
self.submodules.phy = ClockDomainsRenamer(map_final)(ClockDomainsRenamer(map_tmp)(phy))
# Copy some attributes of the PHY
for attr in phy_attrs or []:
setattr(self, attr, getattr(self.phy, attr))
# Insert DFI rate converter to
self.submodules.dfi_converter = DFIRateConverter(phy.dfi,
clkdiv = "sys",
clk = self.internal_cd,
ratio = ratio,
write_delay = phy.settings.write_latency % ratio,
read_delay = phy.settings.read_latency % ratio,
**converter_kwargs,
)
self.dfi = self.dfi_converter.dfi
# Generate new PhySettings
converter_latency = self.dfi_converter.ser_latency + self.dfi_converter.des_latency
self.settings = PhySettings(
phytype = phy.settings.phytype,
memtype = phy.settings.memtype,
databits = phy.settings.databits,
dfi_databits = len(self.dfi.p0.wrdata),
nranks = phy.settings.nranks,
nphases = len(self.dfi.phases),
rdphase = phy.settings.rdphase,
wrphase = phy.settings.wrphase,
cl = phy.settings.cl,
cwl = phy.settings.cwl,
read_latency = phy.settings.read_latency//ratio + converter_latency,
write_latency = phy.settings.write_latency//ratio,
cmd_latency = phy.settings.cmd_latency,
cmd_delay = phy.settings.cmd_delay,
write_leveling = phy.settings.write_leveling,
write_dq_dqs_training = phy.settings.write_dq_dqs_training,
write_latency_calibration = phy.settings.write_latency_calibration,
read_leveling = phy.settings.read_leveling,
delays = phy.settings.delays,
bitslips = phy.settings.bitslips,
)
# Copy any non-default PhySettings (e.g. electrical settings)
for attr, value in vars(self.phy.settings).items():
if not hasattr(self.settings, attr):
setattr(self.settings, attr, value)
def csr_cdc(self, i):
o = Signal()
psync = PulseSynchronizer("sys", self.internal_cd)
self.submodules += psync
self.comb += [
psync.i.eq(i),
o.eq(psync.o),
]
return o
def get_csrs(self):
return self.phy.get_csrs()
# This creates a new class with given name, base classes and attributes/methods
namespace = dict(
__init__ = __init__,
csr_cdc = csr_cdc,
get_csrs = get_csrs,
)
return type(name, bases, namespace)
```
#### File: litedram/phy/sim_utils.py
```python
from migen import *
from litex.build.sim import SimPlatform
from litex.build.sim.config import SimConfig
from litex.build.generic_platform import Pins, Subsignal
from litex.soc.interconnect.csr import CSRStorage, AutoCSR
from litedram.common import Settings, tXXDController
from litedram.phy.utils import Serializer, Deserializer, edge
# PHY ----------------------------------------------------------------------------------------------
class SimSerDesMixin:
"""Helper class for easier (de-)serialization to simulation pads."""
def ser(self, *, i, o, clkdiv, clk, name="", **kwargs):
assert len(o) == 1
kwargs = dict(i=i, i_dw=len(i), o=o, o_dw=1, clk=clk, clkdiv=clkdiv,
name=f"ser_{name}".strip("_"), **kwargs)
self.submodules += Serializer(**kwargs)
def des(self, *, i, o, clkdiv, clk, name="", **kwargs):
assert len(i) == 1
kwargs = dict(i=i, i_dw=1, o=o, o_dw=len(o), clk=clk, clkdiv=clkdiv,
name=f"des_{name}".strip("_"), **kwargs)
self.submodules += Deserializer(**kwargs)
# Platform -----------------------------------------------------------------------------------------
class SimPad(Settings):
def __init__(self, name, width, io=False):
self.set_attributes(locals())
class SimulationPads(Module):
"""Pads for simulation purpose
Tristate pads are simulated as separate input/output pins (name_i, name_o) and
an output-enable pin (name_oe). Output pins are to be driven byt the PHY and
input pins are to be driven by the DRAM simulator. An additional pin without
a suffix is created and this module will include logic to set this pin to the
actual value depending on the output-enable signal.
"""
def layout(self, **kwargs):
raise NotImplementedError("Simulation pads layout as a list of SimPad objects")
def __init__(self, **kwargs):
for pad in self.layout(**kwargs):
if pad.io:
o, i, oe = (f"{pad.name}_{suffix}" for suffix in ["o", "i", "oe"])
setattr(self, pad.name, Signal(pad.width))
setattr(self, o, Signal(pad.width, name=o))
setattr(self, i, Signal(pad.width, name=i))
setattr(self, oe, Signal(name=oe))
self.comb += If(getattr(self, oe),
getattr(self, pad.name).eq(getattr(self, o))
).Else(
getattr(self, pad.name).eq(getattr(self, i))
)
else:
setattr(self, pad.name, Signal(pad.width, name=pad.name))
class Clocks(dict):
"""Helper for definiting simulation clocks
Dictionary format is `{name: {"freq_hz": _, "phase_deg": _}, ...}`.
"""
def names(self):
return list(self.keys())
def add_io(self, io):
for name in self.names():
io.append((name + "_clk", 0, Pins(1)))
def add_clockers(self, sim_config):
for name, desc in self.items():
sim_config.add_clocker(name + "_clk", **desc)
class CRG(Module):
"""Clock & Reset Generator for Verilator-based simulation"""
def __init__(self, platform, clock_domains=None):
if clock_domains is None:
clock_domains = ["sys"]
elif isinstance(clock_domains, Clocks):
clock_domains = list(clock_domains.names())
# request() before creating clock_domains to avoid signal renaming problem
clock_domains = {name: platform.request(name + "_clk") for name in clock_domains}
self.clock_domains.cd_por = ClockDomain(reset_less=True)
for name in clock_domains.keys():
setattr(self.clock_domains, "cd_" + name, ClockDomain(name=name))
int_rst = Signal(reset=1)
self.sync.por += int_rst.eq(0)
self.comb += self.cd_por.clk.eq(self.cd_sys.clk)
for name, clk in clock_domains.items():
cd = getattr(self, "cd_" + name)
self.comb += cd.clk.eq(clk)
self.comb += cd.rst.eq(int_rst)
class Platform(SimPlatform):
def __init__(self, io, clocks: Clocks):
common_io = [
("sys_rst", 0, Pins(1)),
("serial", 0,
Subsignal("source_valid", Pins(1)),
Subsignal("source_ready", Pins(1)),
Subsignal("source_data", Pins(8)),
Subsignal("sink_valid", Pins(1)),
Subsignal("sink_ready", Pins(1)),
Subsignal("sink_data", Pins(8)),
),
]
clocks.add_io(common_io)
SimPlatform.__init__(self, "SIM", common_io + io)
# Logging ------------------------------------------------------------------------------------------
class SimLogger(Module, AutoCSR):
"""Logger for use in simulation
This module allows for easier message logging when running simulation designs.
The logger can be used from `comb` context so it the methods can be directly
used inside `FSM` code. It also provides logging levels that can be used to
filter messages, either by specifying the default `log_level` or in runtime
by driving to the `level` signal or using a corresponding CSR.
"""
# Allows to use Display inside FSM and to filter log messages by level (statically or dynamically)
DEBUG = 0
INFO = 1
WARN = 2
ERROR = 3
NONE = 4
def __init__(self, log_level=INFO, clk_freq=None):
self.ops = []
self.level = Signal(reset=log_level, max=self.NONE)
self.time_ps = None
if clk_freq is not None:
self.time_ps = Signal(64)
cnt = Signal(64)
self.sync += cnt.eq(cnt + 1)
self.comb += self.time_ps.eq(cnt * int(1e12/clk_freq))
def debug(self, fmt, *args, **kwargs):
return self.log("[DEBUG] " + fmt, *args, level=self.DEBUG, **kwargs)
def info(self, fmt, *args, **kwargs):
return self.log("[INFO] " + fmt, *args, level=self.INFO, **kwargs)
def warn(self, fmt, *args, **kwargs):
return self.log("[WARN] " + fmt, *args, level=self.WARN, **kwargs)
def error(self, fmt, *args, **kwargs):
return self.log("[ERROR] " + fmt, *args, level=self.ERROR, **kwargs)
def log(self, fmt, *args, level=DEBUG, once=True):
cond = Signal()
if once: # make the condition be triggered only on rising edge
condition = edge(self, cond)
else:
condition = cond
self.ops.append((level, condition, fmt, args))
return cond.eq(1)
def add_csrs(self):
self._level = CSRStorage(len(self.level), reset=self.level.reset.value)
self.comb += self.level.eq(self._level.storage)
def do_finalize(self):
for level, cond, fmt, args in self.ops:
if self.time_ps is not None:
fmt = f"[%16d ps] {fmt}"
args = (self.time_ps, *args)
self.sync += If((level >= self.level) & cond, Display(fmt, *args))
def log_level_getter(log_level):
"""Parse logging level description
Log level can be presented in a simple form (e.g. `--log-level=DEBUG`) to specify
the same level for all modules, or can set different levels for different modules
e.g. `--log-level=all=INFO,data=DEBUG`.
"""
def get_level(name):
return getattr(SimLogger, name.upper())
if "=" not in log_level: # simple log_level, e.g. "INFO"
return lambda _: get_level(log_level)
# parse log_level in the per-module form, e.g. "--log-level=all=INFO,data=DEBUG"
per_module = dict(part.split("=") for part in log_level.strip().split(","))
return lambda module: get_level(per_module.get(module, per_module.get("all", None)))
# Simulator ----------------------------------------------------------------------------------------
class PulseTiming(Module):
"""Timing monitor with pulse input/output
This module works like `tXXDController` with the following differences:
* countdown triggered by a low to high pulse on `trigger`
* `ready` is initially low, only after a trigger it can become high
* provides `ready_p` which is high only for 1 cycle when `ready` becomes high
"""
def __init__(self, t):
self.trigger = Signal()
self.ready = Signal()
self.ready_p = Signal()
ready_d = Signal()
triggered = Signal()
tctrl = tXXDController(t)
self.submodules += tctrl
self.sync += If(self.trigger, triggered.eq(1)),
self.comb += [
self.ready.eq(triggered & tctrl.ready),
self.ready_p.eq(edge(self, self.ready)),
tctrl.valid.eq(edge(self, self.trigger)),
]
```
|
{
"source": "jerseycity17/team-5",
"score": 2
}
|
#### File: team-5/app/__init__.py
```python
from flask import Flask
from flask_bootstrap import Bootstrap
# from flask_mail import Mail
# from flask_login import LoginManager
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from config import config
bootstrap = Bootstrap()
# mail = Mail()
moment = Moment()
db = SQLAlchemy()
# login_manager = LoginManager()
# login_manager.session_protection = 'strong'
# login_manager.login_view = 'main.login'
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
# mail.init_app(app)
moment.init_app(app)
db.init_app(app)
# login_manager.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app
```
#### File: migrations/versions/48b852d102fd_.py
```python
revision = '<PASSWORD>'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('organization',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('mission', sa.String(length=128), nullable=True),
sa.Column('email', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_organization_email'), 'organization', ['email'], unique=True)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('first_name', sa.String(length=64), nullable=True),
sa.Column('last_name', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=64), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.Column('is_celebrity', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)
op.drop_table('users')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('first_name', sa.VARCHAR(length=64), nullable=False),
sa.Column('last_name', sa.VARCHAR(length=64), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_table('user')
op.drop_index(op.f('ix_organization_email'), table_name='organization')
op.drop_table('organization')
# ### end Alembic commands ###
```
#### File: migrations/versions/819ae90311de_.py
```python
revision = '819ae90311de'
down_revision = '6<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('events',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('description', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('feed',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('event_id', sa.Integer(), nullable=True),
sa.Column('organization_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['event_id'], ['events.id'], ),
sa.ForeignKeyConstraint(['organization_id'], ['organization.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('feed')
op.drop_table('events')
# ### end Alembic commands ###
```
|
{
"source": "jerseyhokie/udacity-catalog",
"score": 2
}
|
#### File: jerseyhokie/udacity-catalog/application.py
```python
from models import Base, Categories, CategoryItems, User
from flask import Flask, render_template, request, redirect
from flask import jsonify, url_for, flash, abort, g
from flask import session as login_session
from flask import make_response
from flask_httpauth import HTTPBasicAuth
from sqlalchemy import create_engine, asc, desc
from sqlalchemy.orm import sessionmaker
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
import json
import random
import string
import requests
app = Flask(__name__)
auth = HTTPBasicAuth()
CLIENT_ID = json.loads(
open('client_secrets.json', 'r').read())['web']['client_id']
APPLICATION_NAME = "Catalog Application"
# Connect to Database and create database session
engine = create_engine('sqlite:///catalog.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Create anti-forgery state token
@app.route('/login')
def showLogin():
state = ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in xrange(32))
login_session['state'] = state
# return "The current session state is %s" % login_session['state']
return render_template('login.html', STATE=state)
# Setup Facebook Login
@app.route('/fbconnect', methods=['POST'])
def fbconnect():
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
access_token = request.data
print "access token received %s " % access_token
app_id = json.loads(open('fb_client_secrets.json', 'r').read())[
'web']['app_id']
app_secret = json.loads(
open('fb_client_secrets.json', 'r').read())['web']['app_secret']
url = 'https://graph.facebook.com/oauth/access_token?grant_type='
'fb_exchange_token&client_id=%s&client_secret=%s&fb_exchange_token=%s' % (
app_id, app_secret, access_token)
h = httplib2.Http()
result = h.request(url, 'GET')[1]
# Use token to get user info from API
userinfo_url = "https://graph.facebook.com/v2.8/me"
'''
Due to the formatting for the result from the server token exchange we
have to split the token first on commas and select the first index which
gives us the key : value for the server access token then we split it on
colons to pull out the actual token value and replace the remaining quotes
with nothing so that it can be used directly in the graph api calls
'''
token = result.split(',')[0].split(':')[1].replace('"', '')
url = "https://graph.facebook.com/v2.8/me?"
"access_token=%s&fields=name,id,email" % token
h = httplib2.Http()
result = h.request(url, 'GET')[1]
# print "url sent for API access:%s"% url
# print "API JSON result: %s" % result
data = json.loads(result)
login_session['provider'] = 'facebook'
login_session['username'] = data["name"]
login_session['email'] = data["email"]
login_session['facebook_id'] = data["id"]
# The token must be stored in the login_session in order to properly logout
login_session['access_token'] = token
# Get user picture
url = 'https://graph.facebook.com/v2.8/me/picture?access_token='
'%s&redirect=0&height=200&width=200' % token
h = httplib2.Http()
result = h.request(url, 'GET')[1]
data = json.loads(result)
login_session['picture'] = data["data"]["url"]
# see if user exists
user_id = getUserID(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;border-radius: '
'150px;-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash("Now logged in as %s" % login_session['username'])
return output
@app.route('/fbdisconnect')
def fbdisconnect():
facebook_id = login_session['facebook_id']
# The access token must me included to successfully logout
access_token = login_session['access_token']
url = 'https://graph.facebook.com/%s/permissions?access_token='
'%s' % (facebook_id, access_token)
h = httplib2.Http()
result = h.request(url, 'DELETE')[1]
return "you have been logged out"
# Create Google Login
@app.route('/gconnect', methods=['POST'])
def gconnect():
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
print "Token's client ID does not match app's."
response.headers['Content-Type'] = 'application/json'
return response
stored_access_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps('Current user is '
'already connected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
login_session['provider'] = 'google'
# See if the user exists, make a new on eif they do not
user_id = getUserID(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;\
border-radius: 150px;-webkit-border-radius: 150px;\
-moz-border-radius: 150px;"> '
flash("you are now logged in as %s" % login_session['username'])
print "done!"
return output
@app.route('/gdisconnect')
def gdisconnect():
access_token = login_session.get('access_token')
if access_token is None:
response = make_response(json.dumps('Current user '
'not connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' \
% login_session['access_token']
h = httplib2.Http()
result = h.request(url, 'GET')[0]
if result['status'] == '200':
response = make_response(json.dumps('Successfully disconnected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
else:
response = make_response(json.dumps('Failed to revoke token for \
given user.', 400))
response.headers['Content-Type'] = 'application/json'
return response
# User Helper Functions
def createUser(login_session):
newUser = User(name=login_session['username'], email=login_session[
'email'], picture=login_session['picture'])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email=login_session['email']).first()
return user.id
def getUserInfo(user_id):
user = session.query(User).filter_by(id=user_id).one()
return user
def getUserID(email):
try:
user = session.query(User).filter_by(email=email).first()
return user.id
except:
return None
# Disconnect based on provider
@app.route('/disconnect')
def disconnect():
if 'provider' in login_session:
if login_session['provider'] == 'google':
gdisconnect()
del login_session['gplus_id']
del login_session['access_token']
if login_session['provider'] == 'facebook':
fbdisconnect()
del login_session['facebook_id']
del login_session['username']
del login_session['email']
del login_session['picture']
del login_session['user_id']
del login_session['provider']
flash("You have successfully been logged out.")
return redirect(url_for('showCategories'))
else:
flash("You were not logged in")
return redirect(url_for('showCategories'))
# JSON Web Interface
# Establishes a way to read the catalog via JSON requests
# Each method is found by adding /JSON
# Entire catalog is read-only
@app.route('/catalog/JSON')
def showCatalogJSON():
catalog = session.query(Categories).all()
return jsonify(Catalog=[categories.serialize for categories in catalog])
# @app.route('/JSON')
# def showAllCatalogJSON():
# jsonCatalog = ""
# entireCatalog = []
# catalog = session.query(Categories).all()
# for c in catalog:
# items = session.query(CategoryItems).filter_by(categories_id=c.id)
# itemList = {}
# itemList["id"] = c.id
# itemList["name"] = c.name
# itemList["items"] = (i.serialize for i in items)
# entireCatalog.append(itemList)
# return jsonify (Catalog = entireCatalog)
@app.route('/catalog/<int:categories_id>/JSON')
def showCategoryJSON(categories_id):
categoryToShow = session.query(Categories).filter_by(
id=categories_id).one()
itemsToShow = session.query(CategoryItems).filter_by(
categories_id=categoryToShow.id)
return jsonify(Category=[categoryItems.serialize for
categoryItems in itemsToShow])
@app.route('/catalog/<string:categories_name>/JSON')
def showCategoryByNameJSON(categories_name):
categoryToShow = session.query(Categories).filter_by(
name=categories_name).one()
itemsToShow = session.query(CategoryItems).filter_by(
categories_id=categoryToShow.id)
return jsonify(Category=[categoryItems.serialize for
categoryItems in itemsToShow])
@app.route('/catalog/<int:categories_id>/<int:categoriesItems_id>/JSON')
def showCategoryItemJSON(categories_id, categoriesItems_id):
item = session.query(CategoryItems).filter_by(id=categoriesItems_id).one()
return jsonify(Item=[item.serialize])
@app.route('/catalog/<string:categories_name>/'
'<string:categoriesItems_name>/JSON')
def showCategoryItemByNameJSON(categories_name, categoriesItems_name):
item = session.query(CategoryItems).filter_by(
name=categoriesItems_name).one()
return jsonify(Item=[item.serialize])
# Catalog Web Interface
# Establishes all the CRUD operations via webpages
# Each method is listed in CRUD order,
# categories then categoryItems respectively
# All methods that edit the DB are protected
# All methods are availabel using ID or Name of item as key
# CREATE - Add a new Category to the Catalog
@app.route('/catalog/new',
methods=['GET', 'POST'])
def newCategory():
if 'username' not in login_session:
return redirect('/login')
if request.method == 'POST':
newItem = Categories(name=request.form['name'],
user_id=login_session['user_id'])
session.add(newItem)
session.commit()
flash("New Category created!")
return redirect(url_for('showCategories'))
else:
return render_template('newCategory.html')
# READ - Root of the web app - Defaults to show all categories
@app.route('/')
@app.route('/catalog/')
def showCategories():
categories = session.query(Categories).order_by(asc(Categories.name))
recentCategoriesItems = session.query(
CategoryItems).join(Categories).order_by(
desc(CategoryItems.id)).limit(5)
if 'username' not in login_session:
return render_template('publicCategories.html',
categories=categories,
recent=recentCategoriesItems)
else:
return render_template('categories.html',
categories=categories,
recent=recentCategoriesItems)
# UPDATE - Edit a Category
# Function available with both category ID and Category Name
@app.route('/catalog/<int:categories_id>/edit',
methods=['GET', 'POST'])
def editCategory(categories_id):
if 'username' not in login_session:
return redirect('/login')
editedItem = session.query(Categories).filter_by(id=categories_id).one()
if editedItem.user_id != login_session['user_id']:
flash("Edit is only available to the owner of the item!")
return redirect(url_for('showCategories'))
if request.method == 'POST':
if request.form['name']:
editedItem.name = request.form['name']
session.add(editedItem)
session.commit()
return redirect(url_for('showCategories'))
else:
return render_template('editCategory.html',
categories_id=categories_id,
item=editedItem)
@app.route('/catalog/<string:categories_name>/edit',
methods=['GET', 'POST'])
def editCategoryByName(categories_name):
if 'username' not in login_session:
return redirect('/login')
categoryNameID = session.query(Categories).filter_by(
name=categories_name).one()
categories_id = categoryNameID.id
editedItem = session.query(Categories).filter_by(
name=categories_name).one()
if editedItem.user_id != login_session['user_id']:
flash("Edit is only available to the owner of the item!")
return redirect(url_for('showCategories'))
if request.method == 'POST':
if request.form['name']:
editedItem.name = request.form['name']
session.add(editedItem)
session.commit()
return redirect(url_for('showCategories'))
else:
return render_template('editCategory.html',
categories_name=categories_name,
item=editedItem)
# DELETE - Remove a Category
# Function available with both category ID and Category Name
@app.route('/catalog/<int:categories_id>/delete',
methods=['GET', 'POST'])
def deleteCategory(categories_id):
if 'username' not in login_session:
return redirect('/login')
categoryToDelete = session.query(Categories).filter_by(
id=categories_id).one()
itemsToDelete = session.query(CategoryItems).filter_by(
categories_id=categories_id)
if categoryToDelete.user_id != login_session['user_id']:
flash("Delete is only available to the owner of the item!")
return redirect(url_for('showCategories'))
if request.method == 'POST':
# Insert code to delete items from CategoryItems with categories_id
for i in itemsToDelete:
session.delete(i)
session.delete(categoryToDelete)
session.commit()
return redirect(url_for('showCategories'))
else:
return render_template('deleteCategory.html',
itemToDelete=categoryToDelete,
items=itemsToDelete,
categories_id=categories_id)
@app.route('/catalog/<string:categories_name>/delete',
methods=['GET', 'POST'])
def deleteCategoryByName(categories_name):
if 'username' not in login_session:
return redirect('/login')
categoryToDelete = session.query(Categories).filter_by(
name=categories_name).one()
itemsToDelete = session.query(CategoryItems).filter_by(
categories_id=categoryToDelete.id)
if categoryToDelete.user_id != login_session['user_id']:
flash("Delete is only available to the owner of the item!")
return redirect(url_for('showCategories'))
if request.method == 'POST':
# Code to delete items from CategoryItems with categories_id
# This cleans up the database if you delete at category,
# removing remnant items
for i in itemsToDelete:
session.delete(i)
session.delete(categoryToDelete)
session.commit()
return redirect(url_for('showCategories'))
else:
return render_template('deleteCategory.html',
itemToDelete=categoryToDelete,
items=itemsToDelete,
categories_name=categories_name)
# CREATE - Add a new Category Item to a specific Category
# Function available with both category ID and Category Name
@app.route('/catalog/<int:categories_id>/new',
methods=['GET', 'POST'])
def newCategoryItem(categories_id):
if 'username' not in login_session:
return redirect('/login')
if request.method == 'POST':
newItem = CategoryItems(name=request.form['name'],
description=request.form['description'],
categories_id=categories_id,
user_id=login_session['user_id'])
session.add(newItem)
session.commit()
flash("New Category Item added!")
return redirect(url_for('showCategory',
categories_id=categories_id))
else:
return render_template('newCategoryItem.html',
categories_id=categories_id)
@app.route('/catalog/<string:categories_name>/new',
methods=['GET', 'POST'])
def newCategoryItemByName(categories_name):
if 'username' not in login_session:
return redirect('/login')
categoryNameID = session.query(Categories).filter_by(
name=categories_name).one()
categories_id = categoryNameID.id
if request.method == 'POST':
newItem = CategoryItems(name=request.form['name'],
description=request.form['description'],
categories_id=categories_id,
user_id=login_session['user_id'])
session.add(newItem)
session.commit()
flash("New Category Item added!")
return redirect(url_for('showCategoryByName',
categories_name=categories_name))
else:
return render_template('newCategoryItem.html',
categories_name=categories_name)
# READ - Display all of the items in a single Category
# Function available with both category ID and Category Name
@app.route('/catalog/<int:categories_id>/')
def showCategory(categories_id):
categories = session.query(Categories).filter_by(id=categories_id).one()
creator = getUserInfo(categories.user_id)
items = session.query(CategoryItems).filter_by(
categories_id=categories_id)
if ('username' not in login_session or
creator.id != login_session['user_id']):
return render_template('showPublicCategory.html',
categories=categories,
items=items,
categories_id=categories_id,
creator=creator)
else:
return render_template('showCategory.html',
categories=categories,
items=items,
categories_id=categories_id,
creator=creator)
@app.route('/catalog/<string:categories_name>/')
def showCategoryByName(categories_name):
categories = session.query(Categories).filter_by(
name=categories_name).one()
creator = getUserInfo(categories.user_id)
items = session.query(CategoryItems).filter_by(
categories_id=categories.id)
if ('username' not in login_session or
creator.id != login_session['user_id']):
return render_template('showPublicCategory.html',
categories=categories,
items=items,
categories_name=categories_name,
creator=creator)
else:
return render_template('showCategory.html',
categories=categories,
items=items,
categories_name=categories_name,
creator=creator)
# READ - Display all details of a Category Item
# Function available with both Category Item ID and Category Item Name
@app.route('/catalog/<int:categories_id>/<int:categoriesItems_id>/')
def showCategoryItem(categories_id, categoriesItems_id):
item = session.query(CategoryItems).filter_by(id=categoriesItems_id).one()
creator = getUserInfo(item.user_id)
if ('username' not in login_session or
creator.id != login_session['user_id']):
return render_template('showPublicCategoryItem.html',
item=item,
categories_id=categories_id,
categoriesItems_id=categoriesItems_id,
creator=creator)
else:
return render_template('showCategoryItem.html',
item=item,
categories_id=categories_id,
categoriesItems_id=categoriesItems_id,
creator=creator)
@app.route('/catalog/<string:categories_name>/<string:categoriesItems_name>/')
def showCategoryItemByName(categories_name, categoriesItems_name):
item = session.query(CategoryItems).filter_by(
name=categoriesItems_name).one()
creator = getUserInfo(item.user_id)
if ('username' not in login_session or
creator.id != login_session['user_id']):
return render_template('showPublicCategoryItem.html',
item=item,
categories_name=categories_name,
categoriesItems_name=categoriesItems_name,
creator=creator)
else:
return render_template('showCategoryItem.html',
item=item,
categories_name=categories_name,
categoriesItems_name=categoriesItems_name,
creator=creator)
# UPDATE - Edit a category Item
# Function available with both category ID and Category Name
@app.route('/catalog/<int:categories_id>/<int:categoriesItems_id>/edit',
methods=['GET', 'POST'])
def editCategoryItem(categories_id, categoriesItems_id):
if 'username' not in login_session:
return redirect('/login')
editedItem = session.query(CategoryItems).filter_by(
id=categoriesItems_id).one()
if editedItem.user_id != login_session['user_id']:
flash("Edit is only available to the owner of the item!")
return redirect(url_for('showCategory',
categories_id=categories_id))
if request.method == 'POST':
if request.form['name']:
editedItem.name = request.form['name']
if request.form['description']:
editedItem.description = request.form['description']
session.add(editedItem)
session.commit()
flash("Category Item Edited!")
return redirect(url_for('showCategory',
categories_id=categories_id))
else:
return render_template('editCategoryItem.html',
categories_id=categories_id,
categoriesItems_id=categoriesItems_id,
item=editedItem)
@app.route('/catalog/<string:categories_name>/'
'<string:categoriesItems_name>/edit',
methods=['GET', 'POST'])
def editCategoryItemByName(categories_name, categoriesItems_name):
if 'username' not in login_session:
return redirect('/login')
editedItem = session.query(CategoryItems).filter_by(
name=categoriesItems_name).one()
if editedItem.user_id != login_session['user_id']:
flash("Edit is only available to the owner of the item!")
return redirect(url_for('showCategoryByName',
categories_name=categories_name))
if request.method == 'POST':
if request.form['name']:
editedItem.name = request.form['name']
if request.form['description']:
editedItem.description = request.form['description']
session.add(editedItem)
session.commit()
flash("Category Item Edited!")
return redirect(url_for('showCategoryByName',
categories_name=categories_name))
else:
return render_template('editCategoryItem.html',
categories_name=categories_name,
categoriesItems_name=categoriesItems_name,
item=editedItem)
# DELETE - Remove a category Item
# Function available with both category ID and Category Name
@app.route('/catalog/<int:categories_id>/<int:categoriesItems_id>/delete',
methods=['GET', 'POST'])
def deleteCategoryItem(categories_id, categoriesItems_id):
if 'username' not in login_session:
return redirect('/login')
itemToDelete = session.query(CategoryItems).filter_by(
id=categoriesItems_id).one()
if itemToDelete.user_id != login_session['user_id']:
flash("Delete is only available to the owner of the item")
return redirect(url_for('showCategory',
categories_id=categories_id))
if request.method == 'POST':
session.delete(itemToDelete)
session.commit()
flash("Category Item Deleted!")
return redirect(url_for('showCategory',
categories_id=categories_id))
else:
return render_template('deleteCategoryItem.html',
categories_id=categories_id,
categoriesItems_id=categoriesItems_id,
itemToDelete=itemToDelete)
@app.route('/catalog/<string:categories_name>/'
'<string:categoriesItems_name>/delete',
methods=['GET', 'POST'])
def deleteCategoryItemByName(categories_name, categoriesItems_name):
if 'username' not in login_session:
return redirect('/login')
categoryNameID = session.query(CategoryItems).filter_by(
name=categoriesItems_name).one()
categoriesItems_id = categoryNameID.id
itemToDelete = session.query(CategoryItems).filter_by(
id=categoriesItems_id).one()
if itemToDelete.user_id != login_session['user_id']:
flash("Delete is only available to the owner of the item")
return redirect(url_for('showCategoryByName',
categories_name=categories_name))
if request.method == 'POST':
session.delete(itemToDelete)
session.commit()
flash("Category Item Deleted!")
return redirect(url_for('showCategoryByName',
categories_name=categories_name))
else:
return render_template('deleteCategoryItem.html',
categories_name=categories_name,
categoriesItems_name=categoriesItems_name,
itemToDelete=itemToDelete)
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host='0.0.0.0', port=5000)
```
|
{
"source": "jerseyshawn/cf-python-client",
"score": 3
}
|
#### File: main/cloudfoundry_client/imported.py
```python
import sys
import requests
if sys.version_info.major == 2:
from httplib import UNAUTHORIZED, BAD_REQUEST, NOT_FOUND, OK
from urllib import quote
requests.packages.urllib3.disable_warnings()
from __builtin__ import reduce
def bufferize_string(content):
return content
elif sys.version_info.major == 3:
from http.client import UNAUTHORIZED, BAD_REQUEST, NOT_FOUND, OK
from urllib.parse import quote
from functools import reduce
def bufferize_string(content):
return bytes(content, 'UTF-8')
else:
raise ImportError('Invalid major version: %d' % sys.version_info.major)
```
#### File: main/cloudfoundry_client/main.py
```python
import argparse
import logging
import os
import re
import sys
import json
from requests.exceptions import ConnectionError
from cloudfoundry_client.imported import NOT_FOUND
from cloudfoundry_client import __version__
from cloudfoundry_client.client import CloudFoundryClient
from cloudfoundry_client.entities import InvalidStatusCode
__all__ = ['main', 'build_client_from_configuration']
_logger = logging.getLogger(__name__)
def _read_value_from_user(prompt, error_message=None, validator=None, default=''):
while True:
sys.stdout.write('%s [%s]: ' % (prompt, default))
sys.stdout.flush()
answer_value = sys.stdin.readline().rstrip(' \r\n')
if len(answer_value) == 0:
answer_value = default
if len(answer_value) > 0 and (validator is None or validator(answer_value)):
return answer_value
else:
if error_message is None:
sys.stderr.write('\"%s\": invalid value\n' % answer_value)
else:
sys.stderr.write('\"%s\": %s\n' % (answer_value, error_message))
def build_client_from_configuration(previous_configuration=None):
dir_conf = os.path.join(os.path.expanduser('~'))
if not os.path.isdir(dir_conf):
if os.path.exists(dir_conf):
raise IOError('%s exists but is not a directory')
os.mkdir(dir_conf)
config_file = os.path.join(dir_conf, '.cf_client_python.json')
if not os.path.isfile(config_file):
target_endpoint = _read_value_from_user('Please enter a target endpoint',
'Url must starts with http:// or https://',
lambda s: s.startswith('http://') or s.startswith('https://'),
default='' if previous_configuration is None else
previous_configuration.get('target_endpoint', ''))
skip_ssl_verification = _read_value_from_user('Skip ssl verification (true/false)',
'Enter either true or false',
lambda s: s == 'true' or s == 'false',
default='false' if previous_configuration is None else
json.dumps(
previous_configuration.get('skip_ssl_verification', False)))
login = _read_value_from_user('Please enter your login')
password = _read_value_from_user('Please enter your password')
client = CloudFoundryClient(target_endpoint, skip_verification=(skip_ssl_verification == 'true'))
client.init_with_user_credentials(login, password)
with open(config_file, 'w') as f:
f.write(json.dumps(dict(target_endpoint=target_endpoint,
skip_ssl_verification=(skip_ssl_verification == 'true'),
refresh_token=client.refresh_token), indent=2))
return client
else:
try:
configuration = None
with open(config_file, 'r') as f:
configuration = json.load(f)
client = CloudFoundryClient(configuration['target_endpoint'],
skip_verification=configuration['skip_ssl_verification'])
client.init_with_token(configuration['refresh_token'])
return client
except Exception as ex:
if type(ex) == ConnectionError:
raise
else:
_logger.exception("Could not restore configuration. Cleaning and recreating")
os.remove(config_file)
return build_client_from_configuration(configuration)
def is_guid(s):
return re.match(r'[\d|a-z]{8}-[\d|a-z]{4}-[\d|a-z]{4}-[\d|a-z]{4}-[\d|a-z]{12}', s.lower()) is not None
def resolve_id(argument, get_by_name, domain_name, allow_search_by_name):
if is_guid(argument):
return argument
elif allow_search_by_name:
result = get_by_name(argument)
if result is not None:
return result['metadata']['guid']
else:
raise InvalidStatusCode(NOT_FOUND, '%s with name %s' % (domain_name, argument))
else:
raise ValueError('id: %s: does not allow search by name' % domain_name)
def log_recent(client, application_guid):
for message in client.loggregator.get_recent(application_guid):
_logger.info(message.message)
def _get_client_domain(client, domain):
return getattr(client, '%ss' % domain)
def main():
logging.basicConfig(level=logging.INFO,
format='%(message)s')
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
commands = dict()
commands['organization'] = dict(list=(), name='name', allow_retrieve_by_name=True, allow_creation=True,
allow_deletion=True, display_name='Organizations')
commands['space'] = dict(list=('organization_guid',), name='name', allow_retrieve_by_name=True, allow_creation=True,
allow_deletion=True, display_name='Spaces')
commands['app'] = dict(list=('organization_guid', 'space_guid',), name='name',
allow_retrieve_by_name=True, allow_creation=True, allow_deletion=True,
display_name='Applications')
commands['service'] = dict(list=('service_broker_guid',), name='label', allow_retrieve_by_name=True,
allow_creation=True,
allow_deletion=True, display_name='Services')
commands['service_plan'] = dict(list=('service_guid', 'service_instance_guid', 'service_broker_guid'), name='name',
allow_retrieve_by_name=False, allow_creation=False, allow_deletion=False,
display_name='Service plans')
commands['service_instance'] = dict(list=('organization_guid', 'space_guid', 'service_plan_guid'), name='name',
allow_retrieve_by_name=False, allow_creation=True, allow_deletion=True,
display_name='Service instances')
commands['service_key'] = dict(list=('service_instance_guid',), name='name',
allow_retrieve_by_name=False, allow_creation=True, allow_deletion=True,
display_name='Service keys')
commands['service_binding'] = dict(list=('app_guid', 'service_instance_guid'), name=None,
allow_retrieve_by_name=False, allow_creation=True, allow_deletion=True,
display_name='Service bindings')
commands['service_broker'] = dict(list=('name', 'space_guid'), name='name',
allow_retrieve_by_name=True, allow_creation=True, allow_deletion=True,
display_name='Service brokers')
commands['buildpack'] = dict(list=(), name='name',
allow_retrieve_by_name=False, allow_creation=False, allow_deletion=False,
display_name='Buildpacks')
commands['route'] = dict(list=(), name='host',
allow_retrieve_by_name=False, allow_creation=False, allow_deletion=False,
display_name='Routes')
application_commands = dict(recent_logs=('get_recent_logs', 'Recent Logs',),
env=('get_env', 'Get the environment of an application',),
instances=('get_instances', 'Get the instances of an application',),
stats=('get_stats', 'Get the stats of an application',),
summary=('get_summary', 'Get the summary of an application',),
start=('start', 'Start an application',),
stop=('stop', 'Stop an application',))
application_extra_list_commands = dict(routes=('list_routes', 'List the routes(host) of an application', 'host'))
description = []
for domain, command_description in list(commands.items()):
description.append(' %s' % command_description['display_name'])
description.append(' list_%ss : List %ss' % (domain, domain))
description.append(' get_%s : Get a %s by %s' % (domain, domain,
'UUID or name (first found then)'
if command_description['allow_retrieve_by_name']
else 'UUID'))
if command_description['allow_creation']:
description.append(' create_%s : Create a %s' % (domain, domain))
if command_description['allow_deletion']:
description.append(' delete_%s : Delete a %s' % (domain, domain))
if domain == 'application':
for command, application_command_description in list(application_commands.items()):
description.append(' %s : %s' % (command, application_command_description[1]))
for command, application_command_description in list(application_extra_list_commands.items()):
description.append(' %s : %s' % (command, application_command_description[1]))
description.append('')
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-V', '--version', action='version', version=__version__)
subparsers = parser.add_subparsers(title='Commands', dest='action', description='\n'.join(description))
for domain, command_description in list(commands.items()):
list_parser = subparsers.add_parser('list_%ss' % domain)
for filter_parameter in command_description['list']:
list_parser.add_argument('-%s' % filter_parameter, action='store', dest=filter_parameter, type=str,
default=None, help='Filter with %s' % filter_parameter)
get_parser = subparsers.add_parser('get_%s' % domain)
get_parser.add_argument('id', metavar='ids', type=str, nargs=1,
help='The id. Can be UUID or name (first found then)'
if command_description['allow_retrieve_by_name'] else 'The id (UUID)')
if command_description['allow_creation']:
create_parser = subparsers.add_parser('create_%s' % domain)
create_parser.add_argument('entity', metavar='entities', type=str, nargs=1,
help='Either a path of the json file containing the %s or a json object or the json %s object' % (domain, domain))
if command_description['allow_deletion']:
delete_parser = subparsers.add_parser('delete_%s' % domain)
delete_parser.add_argument('id', metavar='ids', type=str, nargs=1,
help='The id. Can be UUID or name (first found then)'
if command_description['allow_retrieve_by_name'] else 'The id (UUID)')
if domain == 'app':
for command, application_command_description in list(application_commands.items()):
command_parser = subparsers.add_parser(command)
command_parser.add_argument('id', metavar='ids', type=str, nargs=1,
help='The id. Can be UUID or name (first found then)')
for command, application_command_description in list(application_extra_list_commands.items()):
command_parser = subparsers.add_parser(command)
command_parser.add_argument('id', metavar='ids', type=str, nargs=1,
help='The id. Can be UUID or name (first found then)')
arguments = parser.parse_args()
client = build_client_from_configuration()
if arguments.action == 'recent_logs':
resource_id = resolve_id(arguments.id[0], lambda x: client.apps.get_first(name=x), 'application', True)
log_recent(client, resource_id)
elif application_commands.get(arguments.action) is not None:
resource_id = resolve_id(arguments.id[0], lambda x: client.apps.get_first(name=x), 'application', True)
print(getattr(client.apps, application_commands[arguments.action][0])(resource_id).json(indent=1))
elif application_extra_list_commands.get(arguments.action) is not None:
resource_id = resolve_id(arguments.id[0], lambda x: client.apps.get_first(name=x), 'application', True)
name_property = application_extra_list_commands[arguments.action][2]
for entity in getattr(client.apps, application_extra_list_commands[arguments.action][0])(resource_id):
print('%s - %s' % (entity['metadata']['guid'], entity['entity'][name_property]))
elif arguments.action.find('list_') == 0:
domain = arguments.action[len('list_'): len(arguments.action) - 1]
filter_list = dict()
for filter_parameter in commands[domain]['list']:
filter_value = getattr(arguments, filter_parameter)
if filter_value is not None:
filter_list[filter_parameter] = filter_value
for entity in _get_client_domain(client, domain).list(**filter_list):
name_property = commands[domain]['name']
if name_property is not None:
print('%s - %s' % (entity['metadata']['guid'], entity['entity'][name_property]))
else:
print(entity['metadata']['guid'])
elif arguments.action.find('get_') == 0:
domain = arguments.action[len('get_'):]
resource_id = resolve_id(arguments.id[0],
lambda x: _get_client_domain(client, domain).get_first(
**{commands[domain]['name']: x}),
domain,
commands[domain]['allow_retrieve_by_name'])
print(_get_client_domain(client, domain).get(resource_id).json(indent=1))
elif arguments.action.find('create_') == 0:
domain = arguments.action[len('create_'):]
data = None
if os.path.isfile(arguments.entity[0]):
with open(arguments.entity[0], 'r') as f:
try:
data = json.load(f)
except ValueError:
raise ValueError('entity: file %s does not contain valid json data' % arguments.entity[0])
else:
try:
data = json.loads(arguments.entity[0])
except ValueError:
raise ValueError('entity: must be either a valid json file path or a json object')
print(_get_client_domain(client, domain)._create(data).json())
elif arguments.action.find('delete_') == 0:
domain = arguments.action[len('delete_'):]
if is_guid(arguments.id[0]):
_get_client_domain(client, domain)._remove(arguments.id[0])
elif commands[domain]['allow_retrieve_by_name']:
filter_get = dict()
filter_get[commands[domain]['name']] = arguments.id[0]
entity = _get_client_domain(client, domain).get_first(**filter_get)
if entity is None:
raise InvalidStatusCode(NOT_FOUND, '%s with name %s' % (domain, arguments.id[0]))
else:
_get_client_domain(client, domain)._remove(entity['metadata']['guid'])
else:
raise ValueError('id: %s: does not allow search by name' % domain)
if __name__ == "__main__":
main()
```
#### File: cloudfoundry_client/v2/service_brokers.py
```python
from cloudfoundry_client.entities import EntityManager
class ServiceBrokerManager(EntityManager):
def __init__(self, target_endpoint, client):
super(ServiceBrokerManager, self).__init__(target_endpoint, client, '/v2/service_brokers')
def create(self, broker_url, broker_name, auth_username, auth_password, space_guid=None):
request = dict(broker_url=broker_url,
name=broker_name,
auth_username=auth_username,
auth_password=<PASSWORD>)
if space_guid is not None:
request['space_guid'] = space_guid
return super(ServiceBrokerManager, self)._create(request)
def update(self, broker_guid, broker_url=None, broker_name=None, auth_username=None, auth_password=<PASSWORD>):
request = dict()
if broker_url is not None:
request['broker_url'] = broker_url
if broker_name is not None:
request['name'] = broker_name
if auth_username is not None:
request['auth_username'] = auth_username
if auth_password is not None:
request['auth_password'] = <PASSWORD>
return super(ServiceBrokerManager, self)._update(broker_guid, request)
def remove(self, broker_guid):
super(ServiceBrokerManager, self)._remove(broker_guid)
```
#### File: cf-python-client/test/test_loggregator.py
```python
import unittest
from abstract_test_case import AbstractTestCase
from cloudfoundry_client.imported import OK, reduce
from fake_requests import mock_response
class TestLoggregator(unittest.TestCase, AbstractTestCase):
@classmethod
def setUpClass(cls):
cls.mock_client_class()
def setUp(self):
self.build_client()
def test_recents(self):
boundary = '7e061f8d6ec00677d6f6b17fcafec9eef2e3a2360e557f72e3e1116efcec'
self.client.get.return_value = mock_response('/recent?app=app_id',
OK,
{'content-type':
'multipart/x-protobuf; boundary=%s' % boundary},
'recents', 'GET_response.bin')
cpt = reduce(lambda increment, _: increment + 1, self.client.loggregator.get_recent('app_id'), 0)
self.client.get.assert_called_with(self.client.get.return_value.url, stream=True)
self.assertEqual(cpt, 5946)
```
#### File: cf-python-client/test/test_service_plans.py
```python
import sys
import unittest
import cloudfoundry_client.main as main
from abstract_test_case import AbstractTestCase
from cloudfoundry_client.imported import OK, reduce
from fake_requests import mock_response
from imported import patch, call
class TestServicePlans(unittest.TestCase, AbstractTestCase):
@classmethod
def setUpClass(cls):
cls.mock_client_class()
def setUp(self):
self.build_client()
def test_list(self):
self.client.get.return_value = mock_response(
'/v2/service_plans?q=service_guid%20IN%20service_id',
OK,
None,
'v2', 'service_plans', 'GET_response.json')
cpt = reduce(lambda increment, _: increment + 1, self.client.service_plans.list(service_guid='service_id'), 0)
self.client.get.assert_called_with(self.client.get.return_value.url)
self.assertEqual(cpt, 1)
def test_get(self):
self.client.get.return_value = mock_response(
'/v2/service_plans/plan_id',
OK,
None,
'v2', 'service_plans', 'GET_{id}_response.json')
result = self.client.service_plans.get('plan_id')
self.client.get.assert_called_with(self.client.get.return_value.url)
self.assertIsNotNone(result)
def test_list_instances(self):
self.client.get.return_value = mock_response(
'/v2/service_plans/plan_id/service_instances?q=space_guid%20IN%20space_id',
OK,
None,
'v2', 'apps', 'GET_{id}_routes_response.json')
cpt = reduce(lambda increment, _: increment + 1,
self.client.service_plans.list_instances('plan_id', space_guid='space_id'), 0)
self.client.get.assert_called_with(self.client.get.return_value.url)
self.assertEqual(cpt, 1)
def test_entity(self):
self.client.get.side_effect = [
mock_response(
'/v2/service_plans/plan_id',
OK,
None,
'v2', 'service_plans', 'GET_{id}_response.json'),
mock_response(
'/v2/services/6a4abae6-93e0-438b-aaa2-5ae67f3a069d',
OK,
None,
'v2', 'services', 'GET_{id}_response.json')
,
mock_response(
'/v2/service_plans/5d8f3b0f-6b5b-487f-8fed-4c2d9b812a72/service_instances',
OK,
None,
'v2', 'service_instances', 'GET_response.json')
]
service_plan = self.client.service_plans.get('plan_id')
self.assertIsNotNone(service_plan.service())
cpt = reduce(lambda increment, _: increment + 1, service_plan.service_instances(), 0)
self.assertEqual(cpt, 1)
self.client.get.assert_has_calls([call(side_effect.url) for side_effect in self.client.get.side_effect],
any_order=False)
@patch.object(sys, 'argv', ['main', 'list_service_plans'])
def test_main_list_service_plans(self):
with patch('cloudfoundry_client.main.build_client_from_configuration',
new=lambda: self.client):
self.client.get.return_value = mock_response('/v2/service_plans',
OK,
None,
'v2', 'service_plans', 'GET_response.json')
main.main()
self.client.get.assert_called_with(self.client.get.return_value.url)
@patch.object(sys, 'argv', ['main', 'get_service_plan', '5d8f3b0f-6b5b-487f-8fed-4c2d9b812a72'])
def test_main_get_service_plan(self):
with patch('cloudfoundry_client.main.build_client_from_configuration',
new=lambda: self.client):
self.client.get.return_value = mock_response('/v2/service_plans/5d8f3b0f-6b5b-487f-8fed-4c2d9b812a72',
OK,
None,
'v2', 'service_plans', 'GET_{id}_response.json')
main.main()
self.client.get.assert_called_with(self.client.get.return_value.url)
```
|
{
"source": "JERSHA20PW13/QUADTREE-IMAGE-COMPRESSION",
"score": 3
}
|
#### File: JERSHA20PW13/QUADTREE-IMAGE-COMPRESSION/qTreeAppl.py
```python
from os import error
import numpy as np
import cv2
from PIL import Image, ImageDraw
from numpy.lib.histograms import histogram
MAX = 8
THRESHOLD = 13
def avgclr(img):
imgArr = np.asarray(img)
#getting the average color from the image array
avg_by_row = np.average(imgArr, axis=0)
avg = np.average(avg_by_row, axis=0)
# avg (r,g,b)
return (int(avg[0]), int(avg[1]), int(avg[2]))
def deviation(histo):
total = sum(histo) #no of pixels
value = 0
error = 0
if total>0:
value = sum(x*i for i, x in enumerate(histo))/total; #average intensity
error = (sum(x*(value-i)**2 for i, x in enumerate(histo))/total)**0.5; #deviation
return error
def get_detail_level(histo):
#getting the detail_level for each color (r,g,b) from the histogram
r_detail_level = deviation(histo[0:256])
g_detail_level = deviation(histo[256:512])
b_detail_level = deviation(histo[512:768])
#getting the overall detail_level in terms of grayscale using the below formula
detail_level = r_detail_level*0.2989 + g_detail_level*0.5870 + b_detail_level*0.1140
return detail_level
# node in the quadtree
class Quadrant():
def __init__(self, img, borderbox, depth):
self.borderbox = borderbox
self.depth = depth
self.children = None
self.isLeaf = False
image = img.crop(borderbox)
histo = image.histogram()
self.detail_level = get_detail_level(histo)
self.colour = avgclr(image)
def split(self, img):
left, top, right, bottom = self.borderbox
mid_x = left + (right -left)/2
mid_y = top + (bottom-top )/2
# split root quadrant into 4 new quadrants
upper_left = Quadrant(img, (left, top, mid_x, mid_y), self.depth+1)
upper_right = Quadrant(img, (mid_x, top, right, mid_y), self.depth+1)
bottom_left = Quadrant(img, (left, mid_y, mid_x, bottom), self.depth+1)
bottom_right = Quadrant(img, (mid_x, mid_y, right, bottom), self.depth+1)
#add the new quadrants as children to the root
self.children = [upper_left, upper_right, bottom_left, bottom_right]
class QuadTree():
def __init__(self, img):
self.width, self.height = img.size
self.depth = 0 #max depth from the root
self.root = Quadrant(img, img.getbbox(), 0)
self.buildTree(self.root, img) #build the quadtree
def buildTree(self, root, img):
if root.depth >= MAX or root.detail_level <= THRESHOLD:
if root.depth > self.depth:
self.depth = root.depth
root.isLeaf = True #attained a leaf stop recursing
return
root.split(img)
for x in root.children:
self.buildTree(x, img)
def getLeaves(self, depth):
if depth > self.depth:
raise ValueError('Depth too large')
quadrants = []
self.search(self, self.root, depth, quadrants.append) #searching the tree for leaves
return quadrants #list of leaf quadrants
def search(self, tree, quad, max_depth, appendQuad):
if quad.isLeaf or quad.depth == max_depth:
appendQuad(quad)
elif quad.children != None:
for x in quad.children:
self.search(tree, x, max_depth, appendQuad)
def createImg(self, customDepth):
img = Image.new('RGB', (self.width, self.height))
draw = ImageDraw.Draw(img)
draw.rectangle((0, 0, self.width, self.height), (0,0,0)) # creating a black image to begin with
leafQuadrants = self.getLeaves(customDepth)
for x in leafQuadrants:
draw.rectangle(x.borderbox, x.colour) # colouring the particular Quadrant
return img
def processGIF(self, file_name):
imgarr = []
finalImg = self.createImg(self.depth)
for i in range(self.depth):
img = self.createImg(i)
imgarr.append(img)
for i in range(5):
imgarr.append(finalImg)
imgarr[0].save(file_name, save_all = True, append_images = imgarr[1:], duration = 1000, loop = 0)
if __name__ == '__main__':
img = Image.open("turtle.jpg") #load an image
qTree = QuadTree(img) #create a quadtree for that image
img = qTree.createImg(8) #create a compressed image from the quadtree
img.save("compressed.jpg") #save the compressed image
img.show()
qTree.processGIF("compressedGif.gif") #create the stages as gif and save
```
|
{
"source": "jershi425/NVTabular",
"score": 2
}
|
#### File: nvtabular/io/parquet.py
```python
import functools
import itertools
import logging
import math
import operator
import os
import threading
import warnings
from collections import defaultdict
from distutils.version import LooseVersion
from io import BytesIO
from uuid import uuid4
try:
import cudf
import dask_cudf
from cudf.io.parquet import ParquetWriter as pwriter_cudf
except ImportError:
cudf = None
import dask
import dask.dataframe as dd
import fsspec
import numpy as np
import pandas as pd
import pyarrow as pa
import toolz as tlz
from dask.base import tokenize
from dask.dataframe.core import _concat, new_dd_object
from dask.dataframe.io.parquet.utils import _analyze_paths
from dask.delayed import Delayed
from dask.highlevelgraph import HighLevelGraph
from dask.utils import natural_sort_key, parse_bytes
from fsspec.core import get_fs_token_paths
from pyarrow import parquet as pq
from pyarrow.parquet import ParquetWriter as pwriter_pyarrow
from .dataset_engine import DatasetEngine
from .shuffle import Shuffle, _shuffle_df
from .writer import ThreadedWriter
LOG = logging.getLogger("nvtabular")
class ParquetDatasetEngine(DatasetEngine):
"""ParquetDatasetEngine is a Dask-based version of cudf.read_parquet."""
def __init__(
self,
paths,
part_size,
storage_options,
row_groups_per_part=None,
legacy=False,
batch_size=None, # Ignored
cpu=False,
):
super().__init__(paths, part_size, cpu=cpu, storage_options=storage_options)
self._pp_map = None
self._pp_nrows = None
if row_groups_per_part is None:
path0 = self._dataset.pieces[0].path
if cpu:
with self.fs.open(path0, "rb") as f0:
# Use pyarrow for CPU version.
# Pandas does not enable single-row-group access.
rg_byte_size_0 = _memory_usage(pq.ParquetFile(f0).read_row_group(0).to_pandas())
else:
if cudf.utils.ioutils._is_local_filesystem(self.fs):
# Allow cudf to open the file if this is a local file
# system (can be significantly faster in this case)
rg_byte_size_0 = _memory_usage(cudf.io.read_parquet(path0, row_groups=0))
else:
with self.fs.open(path0, "rb") as f0:
rg_byte_size_0 = _memory_usage(cudf.io.read_parquet(f0, row_groups=0))
row_groups_per_part = self.part_size / rg_byte_size_0
if row_groups_per_part < 1.0:
warnings.warn(
f"Row group memory size ({rg_byte_size_0}) (bytes) of parquet file is bigger"
f" than requested part_size ({self.part_size}) for the NVTabular dataset."
f"A row group memory size of 128 MB is generally recommended. You can find"
f" info on how to set the row group size of parquet files in "
f"https://nvidia.github.io/NVTabular/main/resources/troubleshooting.html"
f"#setting-the-row-group-size-for-the-parquet-files"
)
row_groups_per_part = 1.0
self.row_groups_per_part = int(row_groups_per_part)
assert self.row_groups_per_part > 0
@property
@functools.lru_cache(1)
def _dataset(self):
paths = self.paths
fs = self.fs
if len(paths) > 1:
# This is a list of files
dataset = pq.ParquetDataset(paths, filesystem=fs, validate_schema=False)
elif fs.isdir(paths[0]):
# This is a directory
dataset = pq.ParquetDataset(paths[0], filesystem=fs, validate_schema=False)
else:
# This is a single file
dataset = pq.ParquetDataset(paths[0], filesystem=fs)
return dataset
@property
def _file_partition_map(self):
if self._pp_map is None:
self._process_parquet_metadata()
return self._pp_map
@property
def _partition_lens(self):
if self._pp_nrows is None:
self._process_parquet_metadata()
return self._pp_nrows
@property
def num_rows(self):
# TODO: Avoid parsing metadata once upstream dask
# can get the length efficiently (in all practical cases)
return sum(self._partition_lens)
def _process_parquet_metadata(self):
# Utility shared by `_file_partition_map` and `_partition_lens`
# to collect useful information from the parquet metadata
_pp_nrows = []
def _update_partition_lens(md, num_row_groups, rg_offset=None):
# Helper function to calculate the row count for each
# output partition (and add it to `_pp_nrows`)
rg_offset = rg_offset or 0
for rg_i in range(0, num_row_groups, self.row_groups_per_part):
rg_f = min(rg_i + self.row_groups_per_part, num_row_groups)
_pp_nrows.append(
sum([md.row_group(rg + rg_offset).num_rows for rg in range(rg_i, rg_f)])
)
return
dataset = self._dataset
if dataset.metadata:
# We have a metadata file.
# Determing the row-group count per file.
_path_row_groups = defaultdict(int)
for rg in range(dataset.metadata.num_row_groups):
fn = dataset.metadata.row_group(rg).column(0).file_path
_path_row_groups[fn] += 1
# Convert the per-file row-group count to the
# file-to-partition mapping
ind, rg = 0, 0
_pp_map = defaultdict(list)
for fn, num_row_groups in _path_row_groups.items():
part_count = math.ceil(num_row_groups / self.row_groups_per_part)
_pp_map[fn] = np.arange(ind, ind + part_count)
_update_partition_lens(dataset.metadata, num_row_groups, rg_offset=rg)
ind += part_count
rg += num_row_groups
else:
# No metadata file. Construct file-to-partition map manually
ind = 0
_pp_map = {}
for piece in dataset.pieces:
md = piece.get_metadata()
num_row_groups = md.num_row_groups
part_count = math.ceil(num_row_groups / self.row_groups_per_part)
fn = piece.path.split(self.fs.sep)[-1]
_pp_map[fn] = np.arange(ind, ind + part_count)
_update_partition_lens(md, num_row_groups)
ind += part_count
self._pp_map = _pp_map
self._pp_nrows = _pp_nrows
def to_ddf(self, columns=None, cpu=None):
# Check if we are using cpu
cpu = self.cpu if cpu is None else cpu
if cpu:
# Return a Dask-Dataframe in CPU memory
for try_engine in ["pyarrow-dataset", "pyarrow"]:
# Try to use the "pyarrow-dataset" engine, if
# available, but fall back on vanilla "pyarrow"
# for older Dask versions.
try:
return dd.read_parquet(
self.paths,
engine=try_engine,
columns=columns,
index=None if columns is None else False,
gather_statistics=False,
split_row_groups=self.row_groups_per_part,
storage_options=self.storage_options,
)
except ValueError:
pass
raise RuntimeError("dask.dataframe.read_parquet failed.")
return dask_cudf.read_parquet(
self.paths,
columns=columns,
# can't omit reading the index in if we aren't being passed columns
index=None if columns is None else False,
gather_statistics=False,
split_row_groups=self.row_groups_per_part,
storage_options=self.storage_options,
)
def to_cpu(self):
self.cpu = True
def to_gpu(self):
self.cpu = False
def validate_dataset(
self,
add_metadata_file=False,
require_metadata_file=True,
row_group_max_size=None,
file_min_size=None,
):
"""Validate ParquetDatasetEngine object for efficient processing.
The purpose of this method is to validate that the raw dataset
meets the minimal requirements for efficient NVTabular processing.
Warnings are raised if any of the following conditions are not met:
- The raw dataset directory should contain a global "_metadata"
file. If this file is missing, ``add_metadata_file=True`` can
be passed to generate a new one.
- If there is no _metadata file, the parquet schema must be
consistent for all row-groups/files in the raw dataset.
Otherwise, a new _metadata file must be generated to avoid
errors at IO time.
- The row-groups should be no larger than the maximum size limit
(``row_group_max_size``).
- For multi-file datasets, the files should be no smaller than
the minimum size limit (``file_min_size``).
Parameters
-----------
add_metadata_file : bool, default False
Whether to add a global _metadata file to the dataset if one
is missing.
require_metadata_file : bool, default True
Whether to require the existence of a _metadata file to pass
the dataset validation.
row_group_max_size : int or str, default None
Maximum size (in bytes) of each parquet row-group in the
dataset. If None, the minimum of ``self.part_size`` and 500MB
will be used.
file_min_size : int or str, default None
Minimum size (in bytes) of each parquet file in the dataset. This
limit is only applied if there are >1 file in the dataset. If None,
``self.part_size`` will be used.
Returns
-------
valid : bool
Whether or not the input dataset is valid for efficient NVTabular
processing.
"""
meta_valid = True # Parquet format and _metadata exists
size_valid = False # Row-group sizes are appropriate
# Check for user-specified row-group size limit.
# Otherwise we use the smaller of the dataset partition
# size and 500MB.
if row_group_max_size is None:
row_group_max_size = min(self.part_size, 500_000_000)
else:
row_group_max_size = parse_bytes(row_group_max_size)
# Check for user-specified file size limit.
# Otherwise we use the smaller of the dataset partition
# size and 500MB.
if file_min_size is None:
file_min_size = self.part_size
else:
file_min_size = parse_bytes(file_min_size)
# Get dataset and path list
pa_dataset = self._dataset
paths = [p.path for p in pa_dataset.pieces]
root_dir, fns = _analyze_paths(paths, self.fs)
# Collect dataset metadata
metadata_file_exists = bool(pa_dataset.metadata)
schema_errors = defaultdict(set)
if metadata_file_exists:
# We have a metadata file
metadata = pa_dataset.metadata
else:
# No metadata file - Collect manually
metadata = None
for piece, fn in zip(pa_dataset.pieces, fns):
md = piece.get_metadata()
md.set_file_path(fn)
if metadata:
_append_row_groups(metadata, md, schema_errors, piece.path)
else:
metadata = md
# Check for inconsistent schemas.
# This is not a problem if a _metadata file exists
for field in schema_errors:
msg = f"Schema mismatch detected in column: '{field}'."
warnings.warn(msg)
for item in schema_errors[field]:
msg = f"[{item[0]}] Expected {item[1]}, got {item[2]}."
warnings.warn(msg)
# If there is schema mismatch, urge the user to add a _metadata file
if len(schema_errors):
meta_valid = False # There are schema-mismatch errors
# Check that the Dask version supports `create_metadata_file`
if LooseVersion(dask.__version__) <= "2.30.0":
msg = (
"\nThe installed version of Dask is too old to handle "
"schema mismatch. Try installing the latest version."
)
warnings.warn(msg)
return meta_valid and size_valid # Early return
# Collect the metadata with dask_cudf and then convert to pyarrow
metadata_bytes = dask_cudf.io.parquet.create_metadata_file(
paths,
out_dir=False,
)
with BytesIO() as myio:
myio.write(memoryview(metadata_bytes))
myio.seek(0)
metadata = pq.ParquetFile(myio).metadata
if not add_metadata_file:
msg = (
"\nPlease pass add_metadata_file=True to add a global "
"_metadata file, or use the regenerate_dataset utility to "
"rewrite your dataset. Without a _metadata file, the schema "
"mismatch may cause errors at read time."
)
warnings.warn(msg)
# Record the total byte size of all row groups and files
max_rg_size = 0
max_rg_size_path = None
file_sizes = defaultdict(int)
for rg in range(metadata.num_row_groups):
row_group = metadata.row_group(rg)
path = row_group.column(0).file_path
total_byte_size = row_group.total_byte_size
if total_byte_size > max_rg_size:
max_rg_size = total_byte_size
max_rg_size_path = path
file_sizes[path] += total_byte_size
# Check if any row groups are prohibitively large.
# Also check if any row groups are larger than recommended.
if max_rg_size > row_group_max_size:
# One or more row-groups are above the "required" limit
msg = (
f"Excessive row_group size ({max_rg_size}) detected in file "
f"{max_rg_size_path}. Please use the regenerate_dataset utility "
f"to rewrite your dataset."
)
warnings.warn(msg)
else:
# The only way size_valid==True is if we get here
size_valid = True
# Check if any files are smaller than the desired size.
# We only warn if there are >1 files in the dataset.
for path, size in file_sizes.items():
if size < file_min_size and len(pa_dataset.pieces) > 1:
msg = (
f"File {path} is smaller than the desired dataset "
f"partition size ({self.part_size}). Consider using the "
f"regenerate_dataset utility to rewrite your dataset with a smaller "
f"number of (larger) files."
)
warnings.warn(msg)
size_valid = False
# If the _metadata file is missing, we need to write
# it (or inform the user that it is missing)
if not metadata_file_exists:
if add_metadata_file:
# Write missing _metadata file
fs = self.fs
metadata_path = fs.sep.join([root_dir, "_metadata"])
with fs.open(metadata_path, "wb") as fil:
metadata.write_metadata_file(fil)
meta_valid = True
else:
# Inform user that the _metadata file is missing
msg = (
"For best performance with NVTabular, there should be a "
"global _metadata file located in the root directory of the "
"dataset. Please pass add_metadata_file=True to add the "
"missing file."
)
warnings.warn(msg)
if require_metadata_file:
meta_valid = False
# Return True if we have a parquet dataset with a _metadata file (meta_valid)
# and the row-groups and file are appropriate sizes (size_valid)
return meta_valid and size_valid
@classmethod
def regenerate_dataset(
cls,
dataset,
output_path,
columns=None,
file_size=None,
part_size=None,
cats=None,
conts=None,
labels=None,
storage_options=None,
):
"""Regenerate an NVTabular Dataset for efficient processing.
Example Usage::
dataset = Dataset("/path/to/data_pq", engine="parquet")
dataset.regenerate_dataset(
out_path, part_size="1MiB", file_size="10MiB"
)
Parameters
-----------
dataset : Dataset
Input `Dataset` object (to be regenerated).
output_path : string
Root directory path to use for the new (regenerated) dataset.
columns : list[string], optional
Subset of columns to include in the regenerated dataset.
file_size : int or string, optional
Desired size of each output file.
part_size : int or string, optional
Desired partition size to use within regeneration algorithm.
Note that this is effectively the size of each contiguous write
operation in cudf.
cats : list[string], optional
Categorical column list.
conts : list[string], optional
Continuous column list.
labels : list[string], optional
Label column list.
storage_options : dict, optional
Storage-option kwargs to pass through to the `fsspec` file-system
interface.
Returns
-------
result : int or Delayed
If `compute=True` (default), the return value will be an integer
corresponding to the number of generated data files. If `False`,
the returned value will be a `Delayed` object.
"""
# Specify ideal file size and partition size
row_group_size = 128_000_000
file_size = parse_bytes(file_size) or row_group_size * 100
part_size = parse_bytes(part_size) or row_group_size * 10
part_size = min(part_size, file_size)
fs, _, _ = get_fs_token_paths(output_path, mode="wb", storage_options=storage_options)
# Start by converting the original dataset to a Dask-Dataframe
# object in CPU memory. We avoid GPU memory in case the original
# dataset is prone to OOM errors.
_ddf = dataset.engine.to_ddf(columns=columns, cpu=True)
# Prepare general metadata (gmd)
gmd = {}
cats = cats or []
conts = conts or []
labels = labels or []
if not len(cats + conts + labels):
warnings.warn(
"General-metadata information not detected! "
"Please pass lists for `cats`, `conts`, and `labels` as"
"arguments to `regenerate_dataset` to ensure a complete "
"and correct _metadata.json file."
)
col_idx = {str(name): i for i, name in enumerate(_ddf.columns)}
gmd["cats"] = [{"col_name": c, "index": col_idx[c]} for c in cats]
gmd["conts"] = [{"col_name": c, "index": col_idx[c]} for c in conts]
gmd["labels"] = [{"col_name": c, "index": col_idx[c]} for c in labels]
# Get list of partition lengths
token = tokenize(
dataset,
output_path,
columns,
part_size,
file_size,
cats,
conts,
labels,
storage_options,
)
getlen_name = "getlen-" + token
name = "all-" + getlen_name
dsk = {(getlen_name, i): (len, (_ddf._name, i)) for i in range(_ddf.npartitions)}
dsk[name] = [(getlen_name, i) for i in range(_ddf.npartitions)]
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[_ddf])
size_list = Delayed(name, graph).compute()
# Get memory usage per row using first partition
p0_mem_size = _ddf.partitions[0].memory_usage(deep=True, index=True).sum().compute()
mem_per_row = int(float(p0_mem_size) / float(size_list[0]))
# Determine the number of rows to assign to each output partition
# and the number of output partitions to assign to each output file
rows_per_part = int(part_size / mem_per_row)
parts_per_file = int(file_size / part_size)
# Construct re-partition graph
dsk2 = {}
repartition_name = "repartition-" + token
split_name = "split-" + repartition_name
getitem_name = "getitem-" + repartition_name
gets = defaultdict(list)
out_parts = 0
remaining_out_part_rows = rows_per_part
for i, in_part_size in enumerate(size_list):
# The `split` dictionary will be passed to this input
# partition to dictate how that partition will be split
# into different output partitions/files. The "key" of
# this dict is the output partition, and the value is a
# tuple specifying the (start, end) row range.
split = {}
last = 0
while in_part_size >= remaining_out_part_rows:
gets[out_parts].append(i)
split[out_parts] = (last, last + remaining_out_part_rows)
last += remaining_out_part_rows
in_part_size = in_part_size - remaining_out_part_rows
remaining_out_part_rows = rows_per_part
out_parts += 1
if in_part_size:
gets[out_parts].append(i)
split[out_parts] = (last, last + in_part_size)
remaining_out_part_rows -= in_part_size
if remaining_out_part_rows == 0:
remaining_out_part_rows = rows_per_part
out_parts += 1
dsk2[(split_name, i)] = (_split_part, (_ddf._name, i), split)
npartitions = max(gets) + 1
for k, v_list in gets.items():
last = None
_concat_list = []
for v in v_list:
key = (getitem_name, v, k)
_concat_list.append(key)
dsk2[key] = (operator.getitem, (split_name, v), k)
ignore_index = True
dsk2[(repartition_name, k)] = (_concat, _concat_list, ignore_index)
graph2 = HighLevelGraph.from_collections(repartition_name, dsk2, dependencies=[_ddf])
divisions = [None] * (npartitions + 1)
_ddf2 = new_dd_object(graph2, repartition_name, _ddf._meta, divisions)
# Make sure the root directory exists
fs.mkdirs(output_path, exist_ok=True)
# Construct rewrite graph
dsk3 = {}
rewrite_name = "rewrite-" + token
write_data_name = "write-data-" + rewrite_name
write_metadata_name = "write-metadata-" + rewrite_name
inputs = []
final_inputs = []
for i in range(_ddf2.npartitions):
index = i // parts_per_file
nex_index = (i + 1) // parts_per_file
package_task = (index != nex_index) or (i == (_ddf2.npartitions - 1))
fn = f"part.{index}.parquet"
inputs.append((repartition_name, i))
if package_task:
final_inputs.append((write_data_name, i))
dsk3[(write_data_name, i)] = (
_write_data,
inputs,
output_path,
fs,
fn,
)
inputs = []
# Final task collects and writes all metadata
dsk3[write_metadata_name] = (
_write_metadata_file,
final_inputs,
fs,
output_path,
gmd,
)
graph3 = HighLevelGraph.from_collections(write_metadata_name, dsk3, dependencies=[_ddf2])
return Delayed(write_metadata_name, graph3)
def _write_metadata_file(md_list, fs, output_path, gmd_base):
# Prepare both "general" and parquet metadata
gmd = gmd_base.copy()
pmd = {}
data_paths = []
file_stats = []
for m in md_list:
for path in m.keys():
md = m[path]["md"]
rows = m[path]["rows"]
pmd[path] = md
data_paths.append(path)
fn = path.split(fs.sep)[-1]
file_stats.append({"file_name": fn, "num_rows": rows})
gmd["data_paths"] = data_paths
gmd["file_stats"] = file_stats
# Write general metadata file
GPUParquetWriter.write_general_metadata(gmd, fs, output_path)
# Write specialized parquet metadata file
GPUParquetWriter.write_special_metadata(pmd, fs, output_path)
# Return total file count (sanity check)
return len(data_paths)
def _write_data(data_list, output_path, fs, fn):
# Initialize chunked writer
path = fs.sep.join([output_path, fn])
writer = pwriter_cudf(path, compression=None)
rows = 0
# Loop over the data_list, convert to cudf,
# and append to the file
for data in data_list:
rows += len(data)
writer.write_table(cudf.from_pandas(data))
# Return metadata and row-count in dict
return {fn: {"md": writer.close(metadata_file_path=fn), "rows": rows}}
class BaseParquetWriter(ThreadedWriter):
def __init__(self, out_dir, suffix=".parquet", **kwargs):
super().__init__(out_dir, **kwargs)
self.data_paths = []
self.data_files = []
self.data_writers = []
self.data_bios = []
self._lock = threading.RLock()
self.pwriter = self._pwriter
self.pwriter_kwargs = {}
self.suffix = suffix
@property
def _pwriter(self):
"""Returns ParquetWriter Backend Class"""
raise (NotImplementedError)
def _read_parquet(self, source):
"""Read parquet data from source"""
raise (NotImplementedError)
def _to_parquet(self, df, sink):
"""Write data to parquet and return pq metadata"""
raise (NotImplementedError)
def _get_filename(self, i):
if self.fns:
fn = self.fns[i]
elif self.use_guid:
fn = f"{i}.{guid()}{self.suffix}"
else:
fn = f"{i}{self.suffix}"
return os.path.join(self.out_dir, fn)
def _append_writer(self, path, schema=None, add_args=None, add_kwargs=None):
# Add additional args and kwargs
_args = add_args or []
_kwargs = tlz.merge(self.pwriter_kwargs, add_kwargs or {})
if self.bytes_io:
bio = BytesIO()
self.data_bios.append(bio)
self.data_writers.append(self.pwriter(bio, *_args, **_kwargs))
else:
f = fsspec.open(path, mode="wb").open()
self.data_files.append(f)
self.data_writers.append(self.pwriter(f, *_args, **_kwargs))
def _get_or_create_writer(self, idx, schema=None):
# lazily initializes a writer for the given index
with self._lock:
while len(self.data_writers) <= idx:
# Append writer
path = self._get_filename(len(self.data_writers))
self.data_paths.append(path)
self._append_writer(path, schema=schema)
return self.data_writers[idx]
def _write_table(self, idx, data):
"""Write data"""
raise (NotImplementedError)
def _write_thread(self):
while True:
item = self.queue.get()
try:
if item is self._eod:
break
idx, data = item
with self.write_locks[idx]:
self._write_table(idx, data)
finally:
self.queue.task_done()
@classmethod
def write_special_metadata(cls, md, fs, out_dir):
"""Write global _metadata file"""
raise (NotImplementedError)
def _close_writers(self):
"""Close writers and return extracted metadata"""
raise (NotImplementedError)
def _bytesio_to_disk(self):
md = {}
for bio, path in zip(self.data_bios, self.data_paths):
df = self._read_parquet(bio)
bio.close()
if self.shuffle == Shuffle.PER_WORKER:
df = _shuffle_df(df)
md[path] = self._to_parquet(df, path)
return md
class GPUParquetWriter(BaseParquetWriter):
def __init__(self, out_dir, **kwargs):
super().__init__(out_dir, **kwargs)
# Passing index=False when creating ParquetWriter
# to avoid bug: https://github.com/rapidsai/cudf/issues/7011
self.pwriter_kwargs = {"compression": None, "index": False}
@property
def _pwriter(self):
return pwriter_cudf
def _read_parquet(self, source):
return cudf.io.read_parquet(source)
def _to_parquet(self, df, sink):
fn = sink.split(self.fs.sep)[-1]
return df.to_parquet(sink, metadata_file_path=fn, compression=None, index=False)
def _write_table(self, idx, data):
writer = self._get_or_create_writer(idx)
writer.write_table(data)
@classmethod
def write_special_metadata(cls, md, fs, out_dir):
# Sort metadata by file name and convert list of
# tuples to a list of metadata byte-blobs
md_list = [m[1] for m in sorted(list(md.items()), key=lambda x: natural_sort_key(x[0]))]
# Aggregate metadata and write _metadata file
_write_pq_metadata_file_cudf(md_list, fs, out_dir)
def _close_writers(self):
md_dict = {}
_fns = self.fns or [path.split(self.fs.sep)[-1] for path in self.data_paths]
for writer, fn in zip(self.data_writers, _fns):
md_dict[fn] = writer.close(metadata_file_path=fn)
for f in self.data_files:
f.close()
return md_dict
class CPUParquetWriter(BaseParquetWriter):
def __init__(self, out_dir, **kwargs):
super().__init__(out_dir, **kwargs)
self.md_collectors = {}
self.pwriter_kwargs = {"compression": None}
@property
def _pwriter(self):
return pwriter_pyarrow
def _read_parquet(self, source):
return pd.read_parquet(source, engine="pyarrow")
def _get_row_group_size(self, df):
# Make sure our `row_group_size` argument (which corresponds
# to the number of rows in each row-group) will produce
# row-groups ~128MB in size.
if not hasattr(self, "_row_group_size"):
row_size = df.memory_usage(deep=True).sum() / max(len(df), 1)
self._row_group_size = math.ceil(128_000_000 / row_size)
return self._row_group_size
def _to_parquet(self, df, sink):
md = []
df.to_parquet(
sink,
row_group_size=self._get_row_group_size(df),
metadata_collector=md,
compression=None,
index=False,
)
fn = sink.split(self.fs.sep)[-1]
md[0].set_file_path(fn)
return md
def _append_writer(self, path, schema=None):
# Define "metadata collector" for pyarrow
_md_collector = []
_args = [schema]
_kwargs = {"metadata_collector": _md_collector}
# Use `BaseParquetWriter` logic
super()._append_writer(path, add_args=_args, add_kwargs=_kwargs)
# Keep track of "metadata collector" for pyarrow
self.md_collectors[path] = _md_collector
def _write_table(self, idx, data):
table = pa.Table.from_pandas(data, preserve_index=False)
writer = self._get_or_create_writer(idx, schema=table.schema)
writer.write_table(table, row_group_size=self._get_row_group_size(data))
@classmethod
def write_special_metadata(cls, md, fs, out_dir):
# Sort metadata by file name and convert list of
# tuples to a list of metadata byte-blobs
md_list = [m[1] for m in sorted(list(md.items()), key=lambda x: natural_sort_key(x[0]))]
# Aggregate metadata and write _metadata file
_write_pq_metadata_file_pyarrow(md_list, fs, out_dir)
def _close_writers(self):
_fns = self.fns or [path.split(self.fs.sep)[-1] for path in self.data_paths]
for writer, fn in zip(self.data_writers, _fns):
writer.close()
_path = self.fs.sep.join([str(self.out_dir), fn])
self.md_collectors[_path][0].set_file_path(fn)
return self.md_collectors
def _write_pq_metadata_file_cudf(md_list, fs, path):
"""Converts list of parquet metadata objects into a single shared _metadata file."""
if md_list:
metadata_path = fs.sep.join([path, "_metadata"])
_meta = cudf.io.merge_parquet_filemetadata(md_list) if len(md_list) > 1 else md_list[0]
with fs.open(metadata_path, "wb") as fil:
fil.write(bytes(_meta))
return
def _write_pq_metadata_file_pyarrow(md_list, fs, path):
"""Converts list of parquet metadata objects into a single shared _metadata file."""
if md_list:
metadata_path = fs.sep.join([path, "_metadata"])
_meta = None
for md in itertools.chain(*md_list):
if _meta is None:
_meta = md
else:
_meta.append_row_groups(md)
with fs.open(metadata_path, "wb") as fil:
_meta.write_metadata_file(fil)
return
def guid():
"""Simple utility function to get random hex string"""
return uuid4().hex
def _memory_usage(df):
"""Return the total memory usage of a DataFrame"""
return df.memory_usage(deep=True).sum()
def _append_row_groups(metadata, md, err_collector, path):
"""Helper function to concatenate parquet metadata with
pyarrow, and catch relevant schema errors.
"""
try:
metadata.append_row_groups(md)
except RuntimeError as err:
if "requires equal schemas" in str(err):
schema = metadata.schema.to_arrow_schema()
schema_new = md.schema.to_arrow_schema()
for i, name in enumerate(schema.names):
if schema_new.types[i] != schema.types[i]:
err_collector[name].add((path, schema.types[i], schema_new.types[i]))
else:
raise err
def _split_part(x, split):
out = {}
for k, v in split.items():
out[k] = x.iloc[v[0] : v[1]]
return out
```
#### File: nvtabular/ops/difference_lag.py
```python
from nvtabular.dispatch import DataFrameType, _is_dataframe_object, annotate
from .operator import ColumnNames, Operator
class DifferenceLag(Operator):
"""Calculates the difference between two consecutive rows of the dataset. For instance, this
operator can calculate the time since a user last had another interaction.
This requires a dataset partitioned by one set of columns (userid) and sorted further by another
set (userid, timestamp). The dataset must already be partitioned and sorted before being passed
to the workflow. This can be easily done using dask-cudf::
# get a nvt dataset and convert to a dask dataframe
ddf = nvtabular.Dataset(PATHS).to_ddf()
# partition the dask dataframe by userid, then sort by userid/timestamp
ddf = ddf.shuffle("userid").sort_values(["userid", "timestamp"])
# create a new nvtabular dataset on the partitioned/sorted values
dataset = nvtabular.Dataset(ddf)
Once passed an appropriate dataset, this operator can be used to create a workflow to
compute the lagged difference within a partition::
# compute the delta in timestamp for each users session
diff_features = ["quantity"] >> ops.DifferenceLag(partition_cols=["userid"], shift=[1, -1])
processor = nvtabular.Workflow(diff_features)
Parameters
-----------
partition_cols : str or list of str
Column or Columns that are used to partition the data.
shift : int, default 1
The number of rows to look backwards when computing the difference lag. Negative values
indicate the number of rows to look forwards, making this compute the lead instead of lag.
"""
def __init__(self, partition_cols, shift=1):
super(DifferenceLag, self).__init__()
self.partition_cols = partition_cols
self.shifts = [shift] if isinstance(shift, int) else shift
@annotate("DifferenceLag_op", color="darkgreen", domain="nvt_python")
def transform(self, columns: ColumnNames, df: DataFrameType) -> DataFrameType:
# compute a mask indicating partition boundaries, handling multiple partition_cols
# represent partition boundaries by None values
output = {}
for shift in self.shifts:
mask = df[self.partition_cols] == df[self.partition_cols].shift(shift)
if _is_dataframe_object(mask):
mask = mask.fillna(False).all(axis=1)
mask[mask == False] = None # noqa pylint: disable=singleton-comparison
for col in columns:
output[self._column_name(col, shift)] = (df[col] - df[col].shift(shift)) * mask
return type(df)(output)
transform.__doc__ = Operator.transform.__doc__
def dependencies(self):
return self.partition_cols
def output_column_names(self, columns: ColumnNames) -> ColumnNames:
return [self._column_name(col, shift) for shift in self.shifts for col in columns]
def _column_name(self, col, shift):
return f"{col}_difference_lag_{shift}"
```
|
{
"source": "jershmagersh/yegsecbot",
"score": 3
}
|
#### File: jershmagersh/yegsecbot/yegsecbot.py
```python
from slackclient import SlackClient
import sys, json, sqlite3, time, re, datetime
MENTION_REGEX = "^<@(|[WU][A-Z0-9]+?)>(.*)"
class ConfigException(Exception):
pass
class ConnectionException(Exception):
pass
class YegsecDatabase:
def __init__(self, db_path):
self.path = db_path
self.conn = sqlite3.connect(db_path)
self.cursor = self.conn.cursor()
def confirm_user(self, user, month, year, pref):
self.cursor.execute("SELECT * FROM users WHERE user_id = ?", (user,))
result = self.cursor.fetchone()
if not result:
self.cursor.execute("INSERT INTO users (user_id) VALUES (?)", (user,))
self.cursor.execute("SELECT meetup_id FROM meetups WHERE month_id = ? and year_id = ?", (month, year))
meeting_id_a = self.cursor.fetchone()
if meeting_id_a:
meeting_id = meeting_id_a[0]
veg_bool = 0
if pref:
veg_bool = 1
else:
veg_bool = 0
self.cursor.execute("SELECT * FROM confirmations WHERE meetup_id = ? AND user_id = ?", (meeting_id, user))
if(self.cursor.fetchone()):
return False
else:
self.cursor.execute("INSERT INTO confirmations (user_id, meetup_id, pizza_pref) VALUES (?, ?, ?)", (user, meeting_id, veg_bool))
self.yegsec_commit()
return True
else:
return False
def remove_confirm_user(self, user, month, year):
self.cursor.execute("SELECT * FROM users WHERE user_id = ?", (user,))
result = self.cursor.fetchone()
#A user cannot remove a confirmation if they don't exist in the database already.
if not result:
return False
else:
self.cursor.execute("SELECT meetup_id FROM meetups WHERE month_id = ? and year_id = ?", (month, year))
meeting_id_a = self.cursor.fetchone()
if meeting_id_a:
meeting_id = meeting_id_a[0]
self.cursor.execute("DELETE FROM confirmations WHERE user_id = ? AND meetup_id = ?", (user, meeting_id))
self.yegsec_commit()
else:
return False
def yegsec_commit(self):
self.conn.commit()
#self.conn.close()
def get_summary(self):
result = self.cursor.execute("SELECT meetup_id FROM meetups")
results = {}
meetup_ids = []
meetup_id = self.cursor.fetchone()
while(meetup_id):
meetup_ids.append(meetup_id)
meetup_id = self.cursor.fetchone()
for meetup_id_a in meetup_ids:
meetup_id = meetup_id_a[0]
self.cursor.execute("SELECT count(*) FROM confirmations WHERE meetup_id = ? AND pizza_pref = 1", (meetup_id,))
veg_count = self.cursor.fetchone()
self.cursor.execute("SELECT count(*) FROM confirmations WHERE meetup_id = ? AND pizza_pref = 0", (meetup_id,))
other_count = self.cursor.fetchone()
self.cursor.execute("SELECT day_id, month_id, year_id FROM meetups WHERE meetup_id = ?", (meetup_id,))
date_result = self.cursor.fetchone()
results[meetup_id] = { "veg": veg_count[0],
"other": other_count[0],
"day": date_result[0],
"month": date_result[1],
"year": date_result[2]
}
return results
class YegsecBot:
def __init__(self, config):
db, token, rtm_delay = self.read_config(config)
self.db = YegsecDatabase(db)
self.bot = SlackClient(token)
self.rtm_delay = rtm_delay
if self.bot.rtm_connect(with_team_state=False):
self.bot_id = self.bot.api_call("auth.test")["user_id"]
try:
self.start()
except KeyboardInterrupt:
self.db.yegsec_commit()
else:
raise ConnectionException("Connection to Slack failed.")
def read_config(self, config_path):
f = open(config_path)
try:
frj = json.loads(f.read())
except:
raise ConfigException("Unable to read provided configuration: {}".format(config_path))
return frj['database'], frj['token'], frj['rtm_delay']
#Source: https://www.fullstackpython.com/blog/build-first-slack-bot-python.html
def parse_bot_commands(self, slack_events):
"""
Parses a list of events coming from the Slack RTM API to find bot commands.
If a bot command is found, this function returns a tuple of command and channel.
If its not found, then this function returns None, None.
"""
for event in slack_events:
if event["type"] == "message" and not "subtype" in event:
user_id, message = self.parse_direct_mention(event["text"])
if user_id == self.bot_id:
#print(event)
return message, event["channel"], event["user"]
return None, None, None
def parse_direct_mention(self, message_text):
"""
Finds a direct mention (a mention that is at the beginning) in message text
and returns the user ID which was mentioned. If there is no direct mention, returns None
"""
matches = re.search(MENTION_REGEX, message_text)
# the first group contains the username, the second group contains the remaining message
return (matches.group(1), matches.group(2).strip()) if matches else (None, None)
def get_next_meet(self):
return 3,2019
def add_user(self, command, channel, user):
"""
Main function of the bot. We use this command for adding user numbers and their preferred vegetarian options
to the database.
"""
rs = re.findall("add me for ([0-9]{1,2}), ?([0-9]{4}) (vegetarian|any)", command, re.IGNORECASE)
rsm = re.findall("add me next (vegetarian|any)", command, re.IGNORECASE)
if(len(rs) == 1 or len(rsm) == 1):
try:
if len(rs) == 1:
month = int(rs[0][0])
year = int(rs[0][1])
elif len(rsm) == 1:
month, year = self.get_next_meet()
rs = rsm
month_str = datetime.datetime(year, month, 1).strftime("%B")
vegetarian = None
if("VEG" in rs[0][2].upper()):
vegetarian = False
resp_veg = "vegetarian"
vegetarian = True
else:
vegetarian = True
resp_veg = "non-vegetarian"
vegetarian = False
result = self.db.confirm_user(user, month, year, vegetarian)
if result:
return(":pizza::pizza::pizza:Thank you <@{}>, I will add you to the pizza numbers for the month {} for the year {} as a {} option:pizza::pizza::pizza:".format(user, month_str, year, resp_veg))
else:
return(":pizza::pizza::pizza:Sorry, <@{}> it looks like you've already been added for that month.:pizza::pizza::pizza:".format(user))
except:
return("Sorry, I tried to add you with that command, but I couldn't quite understand it. Please try again.")
def remove_user(self, command, channel, user):
"""
Another main function of the bot. We use this command for removing user numbers and their preferred vegetarian options
from the database.
"""
rs = re.findall("remove me for ([0-9]{1,2}), ?([0-9]{4})", command, re.IGNORECASE)
rsm = re.findall("remove me next", command, re.IGNORECASE)
if(len(rs) == 1 or len(rsm) == 1):
try:
if len(rs) == 1:
month = int(rs[0][0])
year = int(rs[0][1])
elif len(rsm) == 1:
month, year = self.get_next_meet()
rs = rsm
month_str = datetime.datetime(year, month, 1).strftime("%B")
self.db.remove_confirm_user(user, month, year)
return(":pizza::pizza::pizza:Thank you <@{}>, I will remove you to the pizza numbers for the month {} for the year {}:pizza::pizza::pizza:".format(user, month_str, year))
except:
return("Sorry, I tried to remove you with that command, but I couldn't quite understand it. Please try again.")
def get_summary(self):
result = self.db.get_summary()
response = ""
for meetup_id, meetup in result.items():
total_pizza_count = meetup['other'] + meetup['veg']
response += "*Summary*\nMeetup Date: `{}/{}/{}`\nTotal Pizza Count: `{}`\nNon-Vegetarian: `{}`\nVegetarian: `{}`\n\n".format(meetup['day'], meetup['month'], meetup['year'], total_pizza_count, meetup['other'], meetup['veg'])
return response
def get_help(self):
return "You can send me the following commands:\n\
To get added to the next meetup's pizza count do: `add me next [any|vegetarian]`\n\
To get added to a future meetup's pizza count do: `add me for [month],[year]`\n\
To get removed from the next meetup's pizza count do: `remove me next`\n\
To be removed from a future meetup's pizza count do: `remove me [month],[year]`"
def handle_command(self, command, channel, user):
"""
Executes bot command if the command is known
"""
print("Received command: {}".format(command))
# Default response is help text for the user
default_response = "Not sure what you mean. Try `{}`".format("help")
# Finds and executes the given command, filling in response
response = None
print("Command received: {}".format(command))
if command.startswith("add me for") or command.startswith("add me next"):
response = self.add_user(command, channel, user)
if command.startswith("remove me for") or command.startswith("remove me next"):
response = self.remove_user(command, channel, user)
if command.startswith("summary"):
response = self.get_summary()
if command.startswith("help"):
response = self.get_help()
# Sends the response back to the channel
# That only requested user can see
self.bot.api_call(
"chat.postEphemeral",
channel=channel,
user=user,
text=response or default_response,
as_user=True,
)
def start(self):
"""
self.bot.api_call(
"chat.postMessage",
channel="general",
text="I'm alive!",
as_user=True
)
"""
while True:
command, channel, user = self.parse_bot_commands(self.bot.rtm_read())
if command:
self.handle_command(command, channel, user)
time.sleep(self.rtm_delay)
if __name__ == "__main__":
bot = YegsecBot("config.json")
```
|
{
"source": "Jerska/jakevdp.github.io-source",
"score": 3
}
|
#### File: downloads/code/minesweeper.py
```python
import numpy as np
from itertools import product
from scipy.signal import convolve2d
import matplotlib.pyplot as plt
from matplotlib.patches import RegularPolygon
class MineSweeper(object):
covered_color = '#DDDDDD'
uncovered_color = '#AAAAAA'
edge_color = '#888888'
count_colors = ['none', 'blue', 'green', 'red', 'darkblue',
'darkred', 'darkgreen', 'black', 'black']
flag_vertices = np.array([[0.25, 0.2], [0.25, 0.8],
[0.75, 0.65], [0.25, 0.5]])
@classmethod
def beginner(cls):
return cls(8, 8, 10)
@classmethod
def intermediate(cls):
return cls(16, 16, 40)
@classmethod
def expert(cls):
return cls(30, 16, 99)
def __init__(self, width, height, nmines):
self.width, self.height, self.nmines = width, height, nmines
# Create the figure and axes
self.fig = plt.figure(figsize=((width + 2) / 3., (height + 2) / 3.))
self.ax = self.fig.add_axes((0.05, 0.05, 0.9, 0.9),
aspect='equal', frameon=False,
xlim=(-0.05, width + 0.05),
ylim=(-0.05, height + 0.05))
for axis in (self.ax.xaxis, self.ax.yaxis):
axis.set_major_formatter(plt.NullFormatter())
axis.set_major_locator(plt.NullLocator())
# Create the grid of squares
self.squares = np.array([[RegularPolygon((i + 0.5, j + 0.5),
numVertices=4,
radius=0.5 * np.sqrt(2),
orientation=np.pi / 4,
ec=self.edge_color,
fc=self.covered_color)
for j in range(height)]
for i in range(width)])
[self.ax.add_patch(sq) for sq in self.squares.flat]
# define internal state variables
self.mines = None
self.counts = None
self.clicked = np.zeros((self.width, self.height), dtype=bool)
self.flags = np.zeros((self.width, self.height), dtype=object)
self.game_over = False
# Create event hook for mouse clicks
self.fig.canvas.mpl_connect('button_press_event', self._button_press)
def _draw_mine(self, i, j):
self.ax.add_patch(plt.Circle((i + 0.5, j + 0.5), radius=0.25,
ec='black', fc='black'))
def _draw_red_X(self, i, j):
self.ax.text(i + 0.5, j + 0.5, 'X', color='r', fontsize=20,
ha='center', va='center')
def _toggle_mine_flag(self, i, j):
if self.clicked[i, j]:
pass
elif self.flags[i, j]:
self.ax.patches.remove(self.flags[i, j])
self.flags[i, j] = None
else:
self.flags[i, j] = plt.Polygon(self.flag_vertices + [i, j],
fc='red', ec='black', lw=2)
self.ax.add_patch(self.flags[i, j])
def _reveal_unmarked_mines(self):
for (i, j) in zip(*np.where(self.mines & ~self.flags.astype(bool))):
self._draw_mine(i, j)
def _cross_out_wrong_flags(self):
for (i, j) in zip(*np.where(~self.mines & self.flags.astype(bool))):
self._draw_red_X(i, j)
def _mark_remaining_mines(self):
for (i, j) in zip(*np.where(self.mines & ~self.flags.astype(bool))):
self._toggle_mine_flag(i, j)
def _setup_mines(self, i, j):
# randomly place mines on a grid, but not on space (i, j)
idx = np.concatenate([np.arange(i * self.height + j),
np.arange(i * self.height + j + 1,
self.width * self.height)])
np.random.shuffle(idx)
self.mines = np.zeros((self.width, self.height), dtype=bool)
self.mines.flat[idx[:self.nmines]] = 1
# count the number of mines bordering each square
self.counts = convolve2d(self.mines.astype(complex), np.ones((3, 3)),
mode='same').real.astype(int)
def _click_square(self, i, j):
# if this is the first click, then set up the mines
if self.mines is None:
self._setup_mines(i, j)
# if there is a flag or square is already clicked, do nothing
if self.flags[i, j] or self.clicked[i, j]:
return
self.clicked[i, j] = True
# hit a mine: game over
if self.mines[i, j]:
self.game_over = True
self._reveal_unmarked_mines()
self._draw_red_X(i, j)
self._cross_out_wrong_flags()
# square with no surrounding mines: clear out all adjacent squares
elif self.counts[i, j] == 0:
self.squares[i, j].set_facecolor(self.uncovered_color)
for ii in range(max(0, i - 1), min(self.width, i + 2)):
for jj in range(max(0, j - 1), min(self.height, j + 2)):
self._click_square(ii, jj)
# hit an empty square: reveal the number
else:
self.squares[i, j].set_facecolor(self.uncovered_color)
self.ax.text(i + 0.5, j + 0.5, str(self.counts[i, j]),
color=self.count_colors[self.counts[i, j]],
ha='center', va='center', fontsize=18,
fontweight='bold')
# if all remaining squares are mines, mark them and end game
if self.mines.sum() == (~self.clicked).sum():
self.game_over = True
self._mark_remaining_mines()
def _button_press(self, event):
if self.game_over or (event.xdata is None) or (event.ydata is None):
return
i, j = map(int, (event.xdata, event.ydata))
if (i < 0 or j < 0 or i >= self.width or j >= self.height):
return
# left mouse button: reveal square. If the square is already clicked
# and the correct # of mines are marked, then clear surroundig squares
if event.button == 1:
if (self.clicked[i, j]):
flag_count = self.flags[max(0, i - 1):i + 2,
max(0, j - 1):j + 2].astype(bool).sum()
if self.counts[i, j] == flag_count:
for ii, jj in product(range(max(0, i - 1),
min(self.width, i + 2)),
range(max(0, j - 1),
min(self.height, j + 2))):
self._click_square(ii, jj)
else:
self._click_square(i, j)
# right mouse button: mark/unmark flag
elif (event.button == 3) and (not self.clicked[i, j]):
self._toggle_mine_flag(i, j)
self.fig.canvas.draw()
if __name__ == '__main__':
ms = MineSweeper.intermediate()
plt.show()
```
|
{
"source": "jersmith/crypto",
"score": 3
}
|
#### File: crypto/crypto/app.py
```python
import sys
from crypto.common import commando
from crypto.ciphers import caesar
from crypto.ciphers import vigenere
def format_block_text(text, width=24):
""" Print text in block format. """
i = 0
out = ''
while i < len(text):
if i > 0 and i % width == 0:
print(out)
out = ''
out += text[i] + ' '
i += 1
if len(out) > 0:
print(out)
def run():
""" Drive the ciphers from command line input. """
(err, value) = commando.parse('cipher [key|<width>] (decrypt|raw)', sys.argv[1:])
if err:
print(value)
return
data = sys.stdin.readlines()
cipher = None
if value['cipher'] == 'caesar':
cipher = caesar
elif value['cipher'] == 'vigenere':
cipher = vigenere
output_text = ''
for line in data:
if value['decrypt']:
output_text += cipher.decrypt(value['key'], line)
else:
output_text += cipher.encrypt(value['key'], line)
if value['raw']:
print(output_text)
elif 'width' in value:
format_block_text(output_text, int(value['width']))
else:
format_block_text(output_text)
```
#### File: crypto/ciphers/caesar.py
```python
ALPHABET = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P',
'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
def encrypt(key, plain_text):
""" Using the key as the replacement alphabet, 'encrypt' the plain_text. """
cipher_text = ''
for letter in plain_text.upper():
if letter in ALPHABET:
print(f'{letter} : {key[ALPHABET.index(letter)]}')
cipher_text += key[ALPHABET.index(letter)]
return cipher_text
def decrypt(key, cipher_text):
""" Using the key as the replacement alphabet, 'decrypt' the cipher_text. """
cipher_text = ''.join(cipher_text.strip().split(' '))
plain_text = ''
for letter in cipher_text:
plain_text += ALPHABET[key.index(letter)]
return plain_text
```
#### File: crypto/ciphers/vigenere.py
```python
ALPHABET = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P',
'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
def encrypt(key, plain_text):
""" Use each letter of the key to find the rotation of the current alphabet. """
cipher_text = ''
key_index = 0
shifts = []
# Do this once, not for every iteration of the key
for letter in key:
shifts.append(ALPHABET.index(letter))
for letter in plain_text.upper():
if letter in ALPHABET:
shift_index = shifts[key_index % len(key)]
letter_index = ALPHABET.index(letter)
cipher_text += ALPHABET[(letter_index + shift_index) % 26]
key_index += 1
return cipher_text
def decrypt(key, cipher_text):
""" Use each letter of the key to unrotate the current alphabet. """
cipher_text = ''.join(cipher_text.strip().split(' '))
plain_text = ''
key_index = 0
shifts = []
# Do this once, not for every iteration of the key
for letter in key:
shifts.append(ALPHABET.index(letter))
for letter in cipher_text:
shift_index = shifts[key_index % len(key)]
letter_index = ALPHABET.index(letter)
plain_text += ALPHABET[(letter_index + (26 - shift_index)) % 26]
key_index += 1
return plain_text
```
|
{
"source": "jersmith/hitron-cable-cpe",
"score": 2
}
|
#### File: hitron-cable-cpe/hitron_cpe/app.py
```python
import sys
from hitron_cpe.common import commando
from hitron_cpe.router import commands
def _set_defaults(value):
""" Reasonable defaults for these save some typing. """
# should work unless you've gotten creative with your network
address = '192.168.0.1'
if 'address' not in value:
value['address'] = address
# not sure this can be changed
user = 'cusadmin'
if 'user' not in value:
value['user'] = user
# yes, some providers set this as a default.
password = 'password'
if 'password' not in value:
value['password'] = password
return value
def _load_config(value):
if 'config' in value:
config_file = value['config']
else:
config_file = '.hitronrc'
try:
with open(config_file) as config_file:
lines = config_file.readlines()
for line in lines:
pair = line.split('=')
if pair[0] in value:
kval = pair[1].strip()
if kval[0] == "'" or kval[0] == '"':
kval = kval[1:-1]
value[pair[0]] = kval
except OSError:
print(f'[+] {config_file} not found, using defaults')
return value
def run():
""" Execute the commands parsed from the command line. """
(err, value) = commando.parse(
'command [<address>|<user>|<password>|<toggle_ssid>|<config>] (verbose|help)',
sys.argv[1:])
if err:
print('Invalid command, try: ./hitron help')
return
value = _set_defaults(value)
value = _load_config(value)
commands.dispatch(value)
```
|
{
"source": "jersobh/aiowirecard",
"score": 2
}
|
#### File: aiowirecard/tests/test.py
```python
import asyncio
import json
import os
import random
import aiowirecard
async def main():
wirecard = aiowirecard.Wirecard(environment='sandbox', key=os.environ['WIRECARD_KEY'],
token=os.environ['WIRECARD_TOKEN'])
print('Creating customer...')
customer = {
"ownId": "%0.11d" % random.randint(0, 99999999999),
"fullname": "<NAME>",
"email": "<EMAIL>",
"birthDate": "1980-5-10",
"taxDocument": {
"type": "CPF",
"number": "%0.11d" % random.randint(0, 99999999999)
},
"phone": {
"countryCode": "55",
"areaCode": "11",
"number": "22226842"
},
"shippingAddress": {
"city": "Rio de Janeiro",
"district": "Ipanema",
"street": "Avenida Atlântica",
"streetNumber": "60",
"zipCode": "02446000",
"state": "RJ",
"country": "BRA"
},
"fundingInstrument": {
"method": "CREDIT_CARD",
"creditCard": {
"expirationMonth": "06",
"expirationYear": "22",
"number": "6362970000457013",
"cvc": "123",
"holder": {
"fullname": "<NAME>",
"birthdate": "1980-05-10",
"taxDocument": {
"type": "CPF",
"number": "10013390023"
},
"billingAddress": {
"city": "Rio de Janeiro",
"district": "Copacabana",
"street": "Rua Raimundo Corrêa",
"streetNumber": "1200",
"zipCode": "05246200",
"state": "RJ",
"country": "BRA"
},
"phone": {
"countryCode": "55",
"areaCode": "11",
"number": "22226842"
}
}
}
}
}
print('Customer data: ', customer)
create_user = await wirecard.post_customer(parameters=customer)
user_id = json.loads(create_user)['id']
print('Customer id: ', user_id)
get_user = await wirecard.get_customer(user_id)
print('Customer info:', get_user)
order = {
"ownId": "%0.11d" % random.randint(0, 99999999999),
"amount": {
"currency": "BRL",
"subtotals": {
"shipping": 1500
}
},
"items": [
{
"product": "Descrição do pedido",
"category": "CLOTHING",
"quantity": 1,
"detail": "Camiseta estampada branca",
"price": 9500
}
],
"customer": {
"id": user_id
}
}
new_order = await wirecard.post_order(order)
print('Creating an order... ')
order_id = json.loads(new_order)['id']
print('Order id: ', order_id)
order = await wirecard.get_order(order_id)
print('Getting order info: ', order)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
```
|
{
"source": "jersobh/docker-covidoff",
"score": 2
}
|
#### File: starter/announcements/views.py
```python
from django.views import View
from django.http import JsonResponse
from announcements.models import Announcement
from announcements.forms import AnnouncementForm
import json
class AnnouncementView(View):
def put(self, request):
try:
body = request.body.decode('utf-8')
body = json.loads(body)
except json.decoder.JSONDecodeError as ex:
return JsonResponse({ 'error': str(ex) }, status=400)
form = AnnouncementForm(body)
if not form.is_valid():
return JsonResponse(dict(form.errors.items()), status=422)
Announcement.objects.create(**{
'content': form.cleaned_data['content'],
})
return JsonResponse({})
```
#### File: starter/tracker/views.py
```python
from django.views import View
from django.http import JsonResponse
from tracker.models import Match
from tracker.forms import MatchForm
import json
class MatchView(View):
def put(self, request):
try:
body = request.body.decode('utf-8')
body = json.loads(body)
except json.decoder.JSONDecodeError as ex:
return JsonResponse({ 'error': str(ex) }, status=400)
form = MatchForm(body)
if not form.is_valid():
return JsonResponse(dict(form.errors.items()), status=422)
Match.objects.create(**{
'matcher': form.cleaned_data['matcher'],
'matchee': form.cleaned_data['matchee']
})
return JsonResponse({})
```
|
{
"source": "jersobh/Kafka-Live-Dashboard",
"score": 2
}
|
#### File: Kafka-Live-Dashboard/app/app.py
```python
import asyncio
import aiohttp_jinja2
import jinja2
from aiohttp import web
from controllers import stream
from core.router import routes
async def start_background_tasks(app):
app['stream_producer'] = asyncio.ensure_future(stream.produce())
app['stream_consumer'] = asyncio.ensure_future(stream.consume())
async def cleanup_background_tasks(app):
app['stream_producer'].cancel()
await app['stream_producer']
app['stream_consumer'].cancel()
await app['stream_consumer']
async def factory():
loop = asyncio.get_event_loop()
app = web.Application(loop=loop)
aiohttp_jinja2.setup(app,
loader=jinja2.FileSystemLoader('views/templates/'))
app.on_startup.append(start_background_tasks)
app.on_cleanup.append(cleanup_background_tasks)
routes(app)
return app
# web.run_app(app, host='0.0.0.0', port=APP_PORT)
```
#### File: app/controllers/stream.py
```python
import asyncio
import json
import random
import aiohttp
import motor.motor_asyncio
from aiohttp import web
from aiokafka import AIOKafkaProducer, AIOKafkaConsumer
loop = asyncio.get_event_loop()
client = motor.motor_asyncio.AsyncIOMotorClient('mongodb://mongodb:27017')
db = client['kafka_data']
clients = []
async def send_msg():
producer = AIOKafkaProducer(
loop=loop, bootstrap_servers=['kafka1:9092', 'kafka2:9093', 'kafka3:9094'])
await producer.start()
try:
# Produce message
sale_value = round(random.uniform(1.5, 99.99), 2)
random_message = json.dumps({'type': 'sale', 'value': sale_value})
await producer.send_and_wait("iot_messages", random_message.encode('utf-8'))
finally:
await producer.stop()
async def receive_msg():
consumer = AIOKafkaConsumer(
'iot_messages',
loop=loop, bootstrap_servers=['kafka1:9092', 'kafka2:9093', 'kafka3:9094'])
await consumer.start()
try:
msg = await consumer.getone()
return {'msg': msg.value.decode('utf-8'), 'time': msg.timestamp}
finally:
await consumer.stop()
async def produce():
try:
while True:
await asyncio.sleep(5)
await send_msg()
except asyncio.CancelledError:
print('Cancel consumer: close connections')
pass
async def consume():
try:
while True:
await asyncio.sleep(5)
data = await receive_msg()
for ws in clients:
await ws.send_str(json.dumps(data))
await db.kafka.insert_one(data)
except asyncio.CancelledError:
print('Cancel consumer: close connections')
pass
async def ws_handler(request):
ws = aiohttp.web.WebSocketResponse()
await ws.prepare(request)
if ws not in clients:
clients.append(ws)
try:
async for msg in ws:
if msg.type == aiohttp.WSMsgType.TEXT:
if msg.data == 'close':
await ws.close()
clients.remove(ws)
elif msg.type == aiohttp.WSMsgType.CLOSED:
break
elif msg.type == aiohttp.WSMsgType.ERROR:
break
except asyncio.CancelledError:
clients.remove(ws)
finally:
clients.remove(ws)
await ws.close()
return ws
```
|
{
"source": "jersobh/pybradesco",
"score": 2
}
|
#### File: pybradesco/pybradesco/__init__.py
```python
import json
import socket
import os
import OpenSSL
import requests
class BradescoBoleto(object):
def __init__(self, environment, cert_path, cert_passwd, sdk_ver="1.5.0"):
self.environment = environment
if os.path.isfile(cert_path):
self.cert = OpenSSL.crypto.load_pkcs12(open(cert_path).read(), passphrase=cert_passwd)
self.cert_info = self.cert.get_certificate()
else:
raise Exception("Error: Certificate file path not found")
self.cert_passwd = cert_passwd
self.sdk_ver = sdk_ver
self.host = socket.gethostname()
self.headers = {
'Content-Type': 'application/json',
'User-Agent': f'Bradesco-API-PHP/{self.sdk_ver};{self.host}'
}
if environment == 'production':
self.base_url = 'https://cobranca.bradesconetempresa.b.br/ibpjregistrotitulows/registrotitulo'
elif environment == 'sandbox':
self.base_url = 'https://cobranca.bradesconetempresa.b.br/ibpjregistrotitulows/registrotitulohomologacao'
self.session = requests.Session()
def get_request_api(self):
try:
self.session.get()
except:
raise Exception('Error')
def post_request_api(self):
try:
self.session.post()
except:
raise Exception('Error')
def response(self):
try
```
|
{
"source": "jersobh/pyconizer",
"score": 3
}
|
#### File: pyconizer/controllers/controller.py
```python
from views import render
from models import gen
from models import models
import face_recognition
def index(request):
img1 = "views/static/img/1.jpg"
img2 = "views/static/img/2.jpg"
img3 = "views/static/img/3.jpg"
known_image = face_recognition.load_image_file(img1)
unknown_image1 = face_recognition.load_image_file(img2)
unknown_image2 = face_recognition.load_image_file(img3)
biden_encoding = face_recognition.face_encodings(known_image)[0]
unknown_encoding = face_recognition.face_encodings(unknown_image1)[0]
unknown_encoding2 = face_recognition.face_encodings(unknown_image2)[0]
results = face_recognition.compare_faces([biden_encoding], unknown_encoding)
results2 = face_recognition.compare_faces([biden_encoding], unknown_encoding2)
context = {'compare': results, 'compare2': results2, 'img1': img1, 'img2': img2}
template = 'index.jinja2'
#models.Users.create(username='Charlie') #my example has USERS table with username field)
return render.view(request, template, context)
def jsonExample(request):
data = {}
data['name'] = 'Jhonny'
data['surname'] = 'test'
status = 200
return render.json(data, status)
```
|
{
"source": "jersobh/WeParty",
"score": 2
}
|
#### File: WeParty/module/server.py
```python
import asyncio
import uvloop
from module.devices import ServerVirtualDevice
import threading
import json
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
from autobahn.asyncio.websocket import WebSocketServerProtocol, \
WebSocketServerFactory
class MyServerFactory(WebSocketServerFactory):
def __init__(self):
WebSocketServerFactory.__init__(self, "ws://127.0.0.1:9000")
self.clients = set()
self.virtual_device = ServerVirtualDevice()
self.device = self.virtual_device.get_device()
def listen_tap(self):
while True:
data = self.device.read(1500)
print(data)
self.broadcast(data, True)
def register(self, client):
if client not in self.clients:
print("registered client {}".format(client.peer))
self.clients.add(client)
def unregister(self, client):
if client in self.clients:
print("unregistered client {}".format(client.peer))
self.clients.remove(client)
def broadcast(self, payload, isBinary):
# if isBinary:
# print("Binary message received: {0} bytes".format(len(payload)))
# else:
# print("Text message received: {0}".format(payload.decode('utf8')))
for c in self.clients:
# c.sendMessage(msg.encode('utf8'))
c.sendMessage(payload, isBinary)
class MyServerProtocol(WebSocketServerProtocol):
def onConnect(self, request):
print("Client connecting: {0}".format(request.peer))
def onOpen(self):
print("WebSocket connection open.")
self.factory.register(self)
print(self.factory.clients)
def onMessage(self, payload, isBinary):
if isBinary:
print(payload)
self.factory.device.write(payload)
self.factory.broadcast(payload, isBinary)
else:
data = json.loads(payload.decode('utf8'))
print("Text message received: {0}".format(payload.decode('utf8')))
def onClose(self, wasClean, code, reason):
self.factory.unregister(self)
print("WebSocket connection closed: {0}".format(reason))
if __name__ == '__main__':
factory = MyServerFactory()
factory.protocol = MyServerProtocol
loop = asyncio.get_event_loop()
coro = loop.create_server(factory, '0.0.0.0', 9000)
server = loop.run_until_complete(coro)
try:
b = threading.Thread(name='background', target=factory.listen_tap)
b.start()
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
server.close()
loop.close()
```
|
{
"source": "jersobh/zfs-resty",
"score": 3
}
|
#### File: zfs-resty/controllers/mainController.py
```python
import uuid
from datetime import datetime, timedelta
from controllers import zfsController
import jwt
import pam
import render
JWT_SECRET = "<KEY>"
JWT_ALGORITHM = "HS256"
JWT_EXP_DELTA_SECONDS = 4300
async def index(request):
return render.json({'error': 'nothing to see here...'}, 200)
async def auth(request):
try:
data = await request.json()
user = data['username']
password = data['password']
if pam.authenticate(user, password):
payload = {
'user': user,
'session_id': str(uuid.uuid4()),
'exp': datetime.utcnow() + timedelta(seconds=JWT_EXP_DELTA_SECONDS)
}
jwt_token = jwt.encode(payload, JWT_SECRET, JWT_ALGORITHM)
return await render.json({'token': jwt_token.decode('utf-8')}, 200)
else:
return None
except Exception as e:
return await render.json({'error': str(e)}, 200)
async def check_token(request):
try:
jwt_token = request.headers.get('Authorization', None)
payload = jwt.decode(jwt_token, JWT_SECRET, algorithms=[JWT_ALGORITHM])
return payload['session_id']
except (jwt.DecodeError, jwt.ExpiredSignatureError):
return False
async def create_pool(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.create_pool(data['name'], data['raid'], data['devices'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 200)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
async def delete_pool(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.delete_pool(data['name'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 200)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
async def check_status(request):
check = await check_token(request)
if check:
try:
res = await zfsController.get_status()
return await render.json({'msg': res}, 200)
except Exception as e:
print(str(e))
async def get_storage_info(request):
check = await check_token(request)
if check:
try:
res = await zfsController.get_disk_info()
return await render.json(res, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 500)
async def get_io_status(request):
check = await check_token(request)
if check:
try:
res = await zfsController.get_IO_stats()
return await render.json({'msg': res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 500)
async def add_disk(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.add_new_disk(data['pool'], data['device'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 500)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
async def add_spare_disk(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.add_spare_disk(data['pool'], data['device'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 200)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
async def replace_disk(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.replace_disk(data['pool'], data['old_device'], data['new_device'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 200)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
async def set_mountpoint(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.set_mountpoint(data['mountpoint'], data['pool'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 200)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
```
#### File: zfs-resty/controllers/zfsController.py
```python
import asyncio
# Create a Pool
async def create_pool(name='default-pool', mode='', devices=''):
cmd = f'zpool create {name} {mode} {devices}'
proc = await asyncio.create_subprocess_shell(
cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
stdout, stderr = await proc.communicate()
if stdout:
return stdout.decode()
if stderr:
return stderr.decode()
# Delete Pool
async def delete_pool(name):
cmd = f'zpool destroy {name}'
proc = await asyncio.create_subprocess_shell(
cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
stdout, stderr = await proc.communicate()
if stdout:
return stdout.decode()
if stderr:
return stderr.decode()
# Get Pool status
async def get_status():
cmd = 'zpool status'
proc = await asyncio.create_subprocess_shell(
cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
stdout, stderr = await proc.communicate()
if stdout:
return stdout.decode()
if stderr:
return stderr.decode()
# Get HDD devices info
async def get_disk_info():
cmd = 'fdisk -l'
proc = await asyncio.create_subprocess_shell(
cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
stdout, stderr = await proc.communicate()
if stdout:
result = {}
output = stdout.decode()
for line in output.split("\n"):
if not line.startswith("/"):
continue
parts = line.split()
inf = {}
if parts[1] == "*":
inf['bootable'] = True
del parts[1]
else:
inf['bootable'] = False
inf['start'] = int(parts[1])
inf['end'] = int(parts[2])
inf['blocks'] = int(parts[3].rstrip("+"))
inf['size'] = parts[4]
inf['type'] = " ".join(parts[6 :])
result[parts[0]] = inf
return result
if stderr:
return stderr.decode()
# Get pool I/O stats
async def get_IO_stats(pool):
cmd = f'zpool iostat -v {pool}'
proc = await asyncio.create_subprocess_shell(
cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
stdout, stderr = await proc.communicate()
if stdout:
return stdout.decode()
if stderr:
return stderr.decode()
# Add a new disk to pool
async def add_new_disk(pool, device):
cmd = f'zpool -f add {pool} {device}'
proc = await asyncio.create_subprocess_shell(
cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
stdout, stderr = await proc.communicate()
if stdout:
return stdout.decode()
if stderr:
return stderr.decode()
# Add a spare disk
async def add_spare_disk(pool, device):
cmd = f'zpool -f create {pool} spare {device}'
proc = await asyncio.create_subprocess_shell(
cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
stdout, stderr = await proc.communicate()
if stdout:
return stdout.decode()
if stderr:
return stderr.decode()
# Replace a corrupted disk
async def replace_disk(pool, old_device, new_device):
cmd = f'zpool replace {pool} {old_device} {new_device}'
proc = await asyncio.create_subprocess_shell(
cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
stdout, stderr = await proc.communicate()
if stdout:
return stdout.decode()
if stderr:
return stderr.decode()
# set custom mountpoint
async def set_mountpoint(mountpoint, pool):
cmd = f'zpool set mountpoint={mountpoint} {pool}'
proc = await asyncio.create_subprocess_shell(
cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
stdout, stderr = await proc.communicate()
if stdout:
return stdout.decode()
if stderr:
return stderr.decode()
```
|
{
"source": "jersson/learning-python-with-games",
"score": 4
}
|
#### File: learning-python-with-games/hangman/hangman.py
```python
import string
def isWordGuessed(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: boolean, True if all the letters of secretWord are in lettersGuessed;
False otherwise
'''
result = True
for letter in secretWord:
result = letter in lettersGuessed
if result == False:
break
return result
def getGuessedWord(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters and underscores that represents
what letters in secretWord have been guessed so far.
'''
result = ''
secretLetters = list(secretWord)
for i in range(len(secretLetters)):
if (secretLetters[i] not in lettersGuessed):
secretLetters[i] = '_'
result = ''.join(secretLetters)
return result
def getAvailableLetters(lettersGuessed):
'''
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters that represents what letters have not
yet been guessed.
'''
result = ''
alphabet = string.ascii_lowercase
lettersAlphabet = list(alphabet)
for letter in lettersGuessed:
if letter in lettersAlphabet:
lettersAlphabet.remove(letter)
result = ''.join(lettersAlphabet)
return result
def hangman(secretWord):
'''
secretWord: string, the secret word to guess.
Starts up an interactive game of Hangman.
* At the start of the game, let the user know how many
letters the secretWord contains.
* Ask the user to supply one guess (i.e. letter) per round.
* The user should receive feedback immediately after each guess
about whether their guess appears in the computers word.
* After each round, you should also display to the user the
partially guessed word so far, as well as letters that the
user has not yet guessed.
Follows the other limitations detailed in the problem write-up.
'''
welcomeMessage = 'Welcome to the game, Hangman!'
guessingMessage = 'I am thinking of a word that is 1 letter long.'
splitterLine = '-------------'
numberAttempts = 8
attemptsLeftTemplate = 'You have {} guesses left.'
avaliableLetters = string.ascii_lowercase
availableLettersTemplate = 'Available letters: {}'
inputMessage = 'Please guess a letter: '
letters = list()
lettersGuessed = list()
if len(secretWord) > 1:
guessingMessage = 'I am thinking of a word that is {} letters long.'.format(
len(secretWord))
print(welcomeMessage)
print(guessingMessage)
print(splitterLine)
while numberAttempts > 0:
print(attemptsLeftTemplate.format(numberAttempts))
print(availableLettersTemplate.format(avaliableLetters))
letter = input(inputMessage).lower()
if letter in letters:
print("Oops! You've already guessed that letter: {}".format(
getGuessedWord(secretWord, letters)))
else:
letters.append(letter)
if letter in secretWord:
if letter in lettersGuessed:
print("Oops! You've already guessed that letter: {}".format(
getGuessedWord(secretWord, letters)))
else:
lettersGuessed.append(letter)
print('Good guess: {}'.format(
getGuessedWord(secretWord, letters)))
else:
print('Oops! That letter is not in my word: {}'.format(
getGuessedWord(secretWord, letters)))
numberAttempts -= 1
avaliableLetters = getAvailableLetters(letters)
if isWordGuessed(secretWord, lettersGuessed):
print(splitterLine)
print('Congratulations, you won!')
break
print(splitterLine)
if numberAttempts == 0:
print('Sorry, you ran out of guesses. The word was {}.'.format(secretWord))
```
|
{
"source": "jersson/longest-github-contribution",
"score": 3
}
|
#### File: jersson/longest-github-contribution/HtmlParser.py
```python
from requests import get
from requests import Response
from requests.exceptions import RequestException
from bs4 import BeautifulSoup
from contextlib import closing
from utils import log_error
class HtmlParser():
def __is_valid_web_response(self, res: Response) -> bool:
'''
Returns True if the response seems to be HTML, False otherwise.
'''
content_type = res.headers['Content-Type'].lower()
return (res.status_code == 200
and content_type is not None
and content_type.find('html') > -1)
def request(self, url: str) -> BeautifulSoup:
'''
Attempts to get the content at `url` by making an HTTP GET request.
If the content-type of response is some kind of HTML/XML, return the
text content, otherwise return None.
'''
try:
result = None
with closing(get(url, stream=True)) as response:
if self.__is_valid_web_response(response):
result = BeautifulSoup(response.content, 'html.parser')
except RequestException as error:
log_error('Error making web requests to {}: {}'.format(url, error))
finally:
return result
```
|
{
"source": "jersson/mit-intro-cs-python",
"score": 4
}
|
#### File: 04-functions/ex-06-is-in/isIn.py
```python
def isIn(char, aStr):
'''
char: a single character
aStr: an alphabetized string
returns: True if char is in aStr; False otherwise
'''
result = False
size = len(aStr)
if size == 0:
result = False
elif size == 1:
if char == aStr[0]:
result = True
else:
begin = 0
end = size - 1
point = int((begin + end) / 2)
if aStr[point - 1] == char:
result = True
else:
if aStr[point] > char:
end = point
else:
begin = point
result = isIn(char, aStr[begin: end])
return result
print(isIn('a', 'abcdefghijklmnopqrstuvwxyz')) #True
print(isIn('u', 'abcdefghijklmnopqrstuvwxyz')) #True
print(isIn('u', 'abcdefghijklmnopqrst')) #False
print(isIn('d', 'abbcdloqrruw')) #True
print(isIn('a', '')) #False
```
#### File: week-03-mid-term-exam/problem-05/program.py
```python
def uniqueValues(aDict):
'''
aDict: a dictionary
'''
# Your code here
result = dict()
repeated = list()
while len(aDict) > 0:
item = aDict.popitem()
value = item[1]
values = aDict.values()
if (value not in values and value not in repeated):
result[item[0]] = item[1]
else:
repeated.append(value)
return sorted(result)
# testDict = {1: 1, 2: 2, 3: 3}
# print(uniqueValues(testDict))
# testDict = {1: 1, 2: 1, 3: 1}
# print(uniqueValues(testDict))
# testDict = {1: 1}
# print(uniqueValues(testDict))
testDict = {}
print(uniqueValues(testDict))
```
#### File: 07-testind-and-debugging/ex-06-remainder/program.py
```python
def rem(x, a):
"""
x: a non-negative integer argument
a: a positive integer argument
returns: integer, the remainder when x is divided by a.
"""
if x == a:
return 0
elif x < a:
return x
else:
return rem(x-a, a)
print(rem(2, 5))
print(rem(5, 5))
print(rem(7, 5))
```
#### File: 10-an-extended-example/ex-02-genPrimes/program.py
```python
def genPrimes():
next = 2
while True:
if next == 2:
yield next
next += 1
else:
tmp = True
for i in range(2,next):
tmp = tmp and (next % i != 0)
if tmp:
yield next
next += 1
test = genPrimes()
print(test.__next__())
print(test.__next__())
print(test.__next__())
print(test.__next__())
print(test.__next__())
print(test.__next__())
print(test.__next__())
print(test.__next__())
print(test.__next__())
print(test.__next__())
```
|
{
"source": "jersson/web-scraping-intro",
"score": 3
}
|
#### File: jersson/web-scraping-intro/test_Mathematicians.py
```python
from Mathematicians import Mathematicians
def test_get_names():
mathematicians = Mathematicians()
names = mathematicians.list_names(100)
assert names is not None
def test_get_names_with_default_parameter_value():
mathematicians = Mathematicians()
names = mathematicians.list_names()
assert len(names) == 100
def test_get_10_names():
mathematicians = Mathematicians()
names = mathematicians.list_names(10)
assert len(names) == 10
```
|
{
"source": "Jertusz/python-tf2-schema",
"score": 3
}
|
#### File: python-tf2-schema/tf2schema/webapi.py
```python
import requests
"""
:param {String} sub_api: Name of Api part being called
:param {String} version: Version of the api
:param {String} api_key: Api Key from https://steamcommunity.com/dev/apikey
:param {Int} input: Parameter to go through the whole list of item, required in /GetSchemaItems/
"""
def api_request(sub_api, version, api_key=None, input=None):
api = {
'url': 'http://api.steampowered.com/',
'interface': 'IEconItems_440',
'sub_api': sub_api,
'version': version,
'key': api_key,
}
if input is not None and input != 0:
result = requests.get(
f'{api["url"]}{api["interface"]}/{api["sub_api"]}/{api["version"]}/?key={api["key"]}&start={input}'
)
else:
result = requests.get(
f'{api["url"]}{api["interface"]}/{api["sub_api"]}/{api["version"]}/?key={api["key"]}'
)
code = result.status_code
if code == 200:
return result.json()
else:
print(f'Request failed with code: {code}, returned None')
return
```
|
{
"source": "jerubball/Dont-Starve-Together-Server-Tools",
"score": 2
}
|
#### File: Dont-Starve-Together-Server-Tools/Discord/discordbot.py
```python
import os
import subprocess
import types
import copy
import re
import json
import datetime
import asyncio
import discord
def findmatchlist(pattern, string):
matches = []
results = re.finditer(pattern, string)
for result in results:
matches.append(result)
return matches
def deleteallmatches(pattern, string):
pattern = pattern if pattern is not None else r'(<[@#][!&]?[0-9]+>|@[A-Za-z0-9]+)'
results = re.finditer(pattern, string)
index = 0
newstring = ''
for result in results:
newstring += string[index:result.start()]
index = result.end()
newstring += string[index:]
return newstring
class HimBotException(Exception):
'''Base class for Exceptions raised by HimBotClient module.'''
pass
class NameException(HimBotException):
'''Added subcommand to target command where command name key already exists.'''
def __init__(self, supercommandname, subcommandname):
super().__init__('Key ' + subcommandname + ' exists in ' + supercommandname + '.')
class PermissionException(HimBotException):
'''User invoked command with no applicable permission.'''
def __init__(self, commandname):
super().__init__('You do not have permission for command: `' + commandname + '`')
class ParameterException(HimBotException):
'''User invoked command with missing parameter.'''
def __init__(self, commandname):
super().__init__('Missing command after: `' + commandname + '`\nType `@HimBot help [command]` for more information.')
class UnknownException(HimBotException):
'''User invoked command with unknown parameter or extra parameter.'''
def __init__(self, commandname):
super().__init__('Unknown command: `' + commandname + '`\nType `@HimBot help [command]` for more information.')
class PermissionItem:
'''Data container for permission.
All id fields are of integer.
None field represent 'Any'.
Thus, PermissionItem() grants access to anyone in any channel in any server.
'''
def __init__(self, guildid = None, channelid = None, *, userid = None, roleid = None):
self.guildid = guildid
self.channelid = channelid
self.userid = userid
self.roleid = roleid
def check(self, textchannel, member):
'''Checks if permission is valid in given channel with given user.
textchannel must be type of discord.TextChannel and member must be type of discord.Member.'''
assert type(textchannel) is discord.TextChannel and type(member) is discord.Member
if self.guildid is None or self.guildid == textchannel.guild.id:
if self.channelid is None or self.channelid == textchannel.id:
if self.userid is None or self.userid == member.id:
if self.roleid is None:
return True
for role in member.roles:
if self.roleid == role.id:
return True
return False
class CommandItem:
'''Represents a command in single level.
name must contain single word with no whitespace.
aliases should be a list/tuple/set of strings or None.
description should be a string or None.
perm describes command permissiona and can be following types:
None to use parent permission,
True to grant all access,
False to deny all access,
a PeremissionItem or a list/tuple/set of PermissionItem for permission specs.
func should be a function or instance method, which can be coroutine, or None.
accepts describes function's parameter behavior and can be following types:
False to indicate no additional parameters (except that of subcommand),
True to indicate any additiona parameters,
or integer to indicate number of additional parameters.
This has no effect if func is None.
supeercommand should be a CommandItem or None.
subcommands should be a list/tuple/set of CommandItem or None.
'''
def __init__(self, name, *, aliases = None, description = None, perm = None, func = None, accepts = False, subcommands = None):
self.name = name
self.aliases = aliases
self.description = description
self.perm = perm
self.func = func
self.accepts = accepts
self.fullname = self.name
self.supercommand = None
self.subcommands = {}
self.subcommandaliases = {}
if subcommands is not None:
self.add(*subcommands)
def __str__(self):
return self.fullname
def __len__(self):
return len(self.subcommands)
def __contains__(self, subcommandname):
return subcommandname in self.subcommands or subcommandname in self.subcommandaliases
def __repr__(self):
return self.fullname
def __copy__(self):
item = CommandItem(self.name, aliases = self.aliases, description = self.description, perm = self.perm, func = self.func, accepts = self.accepts)
item.subcommands = self.subcommands
item.subcommandaliases = self.subcommandaliases
return item
def __deepcopy__(self, memo = None):
pass
def __deepcopy1__(self, memo = None):
memo[id(self)] = self
def setSuper(self, supercommand, memo = None):
'''Sets supercommand and update own and subcommand's fullname.
this commaand must be already subcommand of supercommand.'''
#if memo is None:
# memo = set()
#assert self not in memo
#memo.add(self)
self.supercommand = supercommand
assert self.supercommand.has(self.name)
self.fullname = self.name if self.supercommand is None else supercommand.fullname + ' ' + self.name
for command in self.subcommands.values():
command.setSuper(self, memo)
def add(self, *subcommands):
'''Adds subcommand and alias entry.
subcommand's supercommand field is also updated.'''
for subcommand in subcommands:
if self.has(subcommand.name):
raise NameException(self.name, subcommand.name)
else:
self.subcommands[subcommand.name] = subcommand
subcommand.setSuper(self)
if subcommand.aliases is not None:
for alias in subcommand.aliases:
if self.has(alias):
raise NameException(self.name, alias)
else:
self.subcommandaliases[alias] = subcommand.name
def get(self, subcommandname):
'''Get CommandItem that matches subcommandname'''
return self.subcommands[subcommandname] if subcommandname in self.subcommands else self.subcommands[self.subcommandaliases[subcommandname]]
def has(self, subcommandname):
'''Check if subcommandname is vaild subcommand key of this command'''
return subcommandname in self.subcommands or subcommandname in self.subcommandaliases
def check(self, message):
'''Check if given message's information meets permisssion spec.
message must be that from guild; with discord.TextChannel as type of message.channel and discord.Member as type of message.author'''
if self.perm is None:
return False if self.supercommand is None else self.supercommand.check(message)
elif type(self.perm) is bool:
return self.perm
elif type(self.perm) is PermissionItem:
return self.perm.check(message.channel, message.author)
elif type(self.perm) is dict:
for permname in self.perm:
if self.perm.permname.check(message.channel, message.author):
return True
return False
elif type(self.perm) in (list, set, tuple):
for perm in self.perm:
if perm.check(message.channel, message.author):
return True
return False
else:
return False
def getSubprocessSSHCommand(*remotecommand):
commands = ['ssh', '-i', '~/.ssh/id_rsa_nopasswd', '192.168.1.31']
for item in remotecommand:
commands.append(item)
return commands
class HimBotClient(discord.Client):
'''HimBotClient servers user command to automate remote command task.
commandlist is a reference to root CommandItem that functions as start point for tree search. The root item is expected to have no functionality.
All command set should be defined as child CommandItem of commandlist.
iddata contains integer id for guild, channel, user, and role. It is useful to have these information beforehand, as connection is not established when object is constructed.
permdata contains integer pair of guild, channel, user, and role. It is used to construct PermissionItems.
rootoverride is boolean that indicates that root, that is the application owner, overrides all permission structure.
owneroverride is boolean that indicates that the guild owner overrides all permission in that guild.
adminoveerride is boolean that indicates that the admins override all permission in that guild.
'''
def __init__(self, iddata=None, permdata=None):
super().__init__()
self.appinfo = None
self.iddata = iddata
self.rootoverride = True
self.owneroverride = False
self.adminoverride = False
self.memberperm = PermissionItem(self.iddata['guildid'], self.iddata['primarychannelid'], roleid = self.iddata['userroleid'])
self.subcommandgroup = {
'minecraft': [
CommandItem('spigot', aliases = ['plugin'], func = lambda *_, **__: '"StartMinecraftServerSpigot"'),
CommandItem('vanilla', aliases = ['original'], func = lambda *_, **__: '"StartMinecraftServerVanilla"') ]
}
self.commandlist = CommandItem('@HimBot', description = 'Type `@HimBot help [command]` for more information.', perm = True, subcommands = [
CommandItem('help', description = 'Get information about command.', aliases = [None], perm = True, accepts = True, func = self.cmdHelp),
CommandItem('status', description = 'Get HimBot status.', aliases = ['info', 'version'], perm = True, func = self.cmdStatus),
CommandItem('link', description = 'Get server invitation link.', perm = True, func = self.cmdLink, subcommands = [
CommandItem('raw', description = 'Format invitation as raw text.', func = lambda *_, **__: True) ]),
CommandItem('server', description = 'Controls server computer.', perm = self.memberperm, subcommands = [
CommandItem('wakeup', func = self.cmdServer31Wakeup),
CommandItem('shutdown', perm = False, func = self.cmdServer31Shutdown),
CommandItem('priority', func = self.cmdServer31Priority, subcommands = [
CommandItem('normal', aliases = [None], func = lambda *_, **__: '"SetServerProcessPriorityNormal"'),
CommandItem('minecraft', func = lambda *_, **__: '"SetServerProcessPriorityMinecraft"') ]),
CommandItem('status', func = self.cmdServer31Status) ]),
CommandItem('minecraftserver', description = 'Controls minecraft server.', aliases = ['mcserver', 'minecraft'], perm = self.memberperm, subcommands = [
CommandItem('start', func = self.cmdMinecraftServer31Start, subcommands = copy.copy(self.subcommandgroup['minecraft'])),
CommandItem('stop', func = self.cmdMinecraftServer31Stop, subcommands = copy.copy(self.subcommandgroup['minecraft'])),
CommandItem('priority', func = lambda cmd, res: self.cmdMinecraftServer31Priority(cmd, '"SetServerProcessPriorityMinecraft"')),
CommandItem('status', func = self.cmdMinecraftServer31Status) ])
])
self.versionstring = 'HimBot-DiscordClient v1.2.0'
self.starttime = datetime.datetime.now()
def checkRootPerm(self, user):
return self.rootoverride and user.id == self.appinfo.owner.id
def checkOwnerPerm(self, user):
return self.owneroverride and type(user) is discord.Member and user.guild.owner.id == user.id
def checkAdminPerm(self, user):
if self.adminoverride and type(user) is discord.Member:
if member.guild_permissions.administrator:
return True
for role in user.roles:
if role.permissions.administrator:
return True
return False
def checkClientMentionString(self, string):
#return self.user.mention == string
return re.fullmatch('^<@!?' + str(self.user.id) + '>$', string)
async def on_ready(self):
self.commandlist.alises = [self.user.mention]
self.appinfo = await self.application_info()
print('Logged on as', self.user)
async def on_disconnect(self):
print('Disconnected!')
async def on_resumed(self):
print('Resumed!')
async def on_message(self, message):
'''Determines if sent message is command to HimBot.
If the message is command for HimBot, further analyze command.
If the command chain returns string or list of strings, it will be sent to the same text channel.
If the command chain raises exception, the exception message will be sent as the reply, mentioning the author.
'''
#print(message)
if message.author == self.user \
or type(message.channel) is not discord.TextChannel or type(message.author) is not discord.Member:
return
commands = message.content.lower().split()
print(commands)
if len(commands) > 0 and self.checkClientMentionString(commands[0]) \
and len(message.mentions) > 0 and self.user in message.mentions:
try:
# run down through command chain
result = await self.runCommand(message, commands, self.commandlist)
if type(result) in (list, tuple, set):
for item in result:
await message.channel.send(item)
else:
await message.channel.send(result)
#await message.reply(result)
print(' * Successful')
except HimBotException as exception:
await message.reply(str(exception))
print(' * Failed with ', type(exception))
except Exception as exception:
await message.reply('Internal error occurred.')
raise exception
async def runCommand(self, message, commands, commandlist):
'''Recursively analyze and run the given command list.
message must be from TextChannel with Member as author.
commands is list of string that contains the commands. This list cannot be empty, and each string is expected to be a single word.
commandlist is a reference to current root node to look up next subcommand.
'''
#print('\t', commands)
assert len(commands) > 0
if commandlist.check(message) \
or self.checkAdminPerm(message.author) or self.checkOwnerPerm(message.author) or self.checkRootPerm(message.author):
# a given parameter
if len(commands) > 1:
# a subcommand list
if len(commandlist) > 0:
# subcommand match
if commandlist.has(commands[1]):
result = await self.runCommand(message, commands[1:], commandlist.get(commands[1]))
# subcommand mismatch
else:
raise UnknownException(commands[1])
# no subcommand list
else:
# no function or no additional parameter
if commandlist.func is None or not commandlist.accepts:
raise UnknownException(commands[1])
# expected function with additional parameter
else:
result = None
# no given parameter
else:
# None subcommand
if len(commandlist) > 0 and commandlist.has(None):
result = await self.runCommand(message, [None], commandlist.get(None))
# no function
elif commandlist.func is None:
# subcommand exists
if len(commandlist) > 0:
raise ParameterException(commands[0])
# no function and no subcommand
else:
raise HimBotException('Invalid configuration for command: ' + commands[0])
else:
result = None
# execute function
if commandlist.func:
result = commandlist.func(commands[1:], result)
if type(result) is types.CoroutineType:
result = await result
# cascade result
return result
# no permission
else:
raise PermissionException(commands[0])
async def on_message_edit(self, messagebefore, messageafter):
pass
async def on_reaction_add(self, reaction, user):
pass
def cmdHelp(self, commands, result):
commandlist = self.commandlist
while len(commands) > 0 and commandlist.has(commands[0]):
commandlist = commandlist.get(commands[0])
commands.pop(0)
content = 'Help topic for `' + str(commandlist) + '`:'
if commandlist.description is not None:
content += '\n> ' + commandlist.description
if commandlist.aliases is not None:
content += '\n> Aliases: '
firstitem = True
for alias in commandlist.aliases:
if firstitem:
firstitem = False
else:
content += ', '
if alias is None:
content += '(default)'
else:
content += '`' + alias + '`'
if len(commandlist) > 0:
content += '\n> Available subcommands: '
firstitem = True
for command in commandlist.subcommands.values():
if firstitem:
firstitem = False
else:
content += ', '
content += '`' + str(command.name) + '`'
return content
def cmdStatus(self, *_, **__):
content = self.user.mention + ' is online.\nStart time: `' + str(self.starttime) + '`\n' + self.versionstring
return content
def cmdLink(self, commands, result):
content = '''```fix
Welcome to our Discord Server!
Server UnServer NoServer.
a.k.a. SUNday server.
https://discord.gg/BBcKJs3
```'''
if result:
return '\\' + content
else:
return content
def cmdServer31Wakeup(self, commands, result):
wakeup = subprocess.run(['./wakeup.sh'], capture_output=True, text=True)
if wakeup.returncode == 0:
content = 'Sent wakeup signal to server computer.'
else:
content = 'Failed to send wakeup signal.'
return content
def cmdServer31Shutdown(self, commands, result):
shutdown = subprocess.run(getSubprocessSSHCommand(r'powershell Stop-Computer -Force'), capture_output=True, text=True)
if shutdown.returncode == 0:
content = 'Sent shutdown signal to server computer.'
else:
content = 'Failed to send shutdown signal.'
if shutdown.stdout and len(shutdown.stdout) > 0:
content += '\n```' + shutdown.stdout + '```'
return content
def cmdServer31Priority(self, commands, result):
start = subprocess.run(getSubprocessSSHCommand(r'powershell Start-ScheduledTask -TaskName ' + result), capture_output=True, text=True)
if start.returncode == 0:
content = 'Sent command to server computer.'
else:
content = 'Failed to send signal.'
if start.stdout and len(start.stdout) > 0:
content += '\n```' + start.stdout + '```'
return content
def cmdServer31Status(self, commands, result):
ping = subprocess.run(['ping', '-c1', '192.168.1.31'], capture_output=True, text=True)
if ping.returncode == 0:
content = 'The server computer is online.'
else:
content = 'The server computer is **offline**.'
if ping.stdout and len(ping.stdout) > 0:
content += '\n```' + ping.stdout[ping.stdout.find('\n\n')+2:] + '```'
return content
def cmdServer31Check(self, commands, result):
if result is not None:
groupnames = [result]
else:
groupnames = self.subcommandgroup.keys()
for groupname in groupnames:
for command in self.subcommandgroup[groupname]:
taskname = command.func(self, commands, result)
check = subprocess.run(getSubprocessSSHCommand(r'powershell (Get-ScheduledTask -TaskName ' + taskname + r').State'), capture_output=True, text=True)
if check.returncode == 0 and check.stdout and len(check.stdout) > 0 and check.stdout == 'Running':
return 'Running'
return 'Not Running'
def cmdMinecraftServer31Start(self, commands, result):
start = subprocess.run(getSubprocessSSHCommand(r'powershell Start-ScheduledTask -TaskName ' + result), capture_output=True, text=True)
if start.returncode == 0:
content = 'Sent start command to server computer.'
else:
content = 'Failed to send start signal.'
if start.stdout and len(start.stdout) > 0:
content += '\n```' + start.stdout + '```'
return content
def cmdMinecraftServer31Stop(self, commands, result):
pass
def cmdMinecraftServer31Status(self, commands, result):
contents = [self.cmdServer31Status(commands, result)]
status = subprocess.run(['/home/hasol/.local/bin/mcstatus', '192.168.1.31:25566', 'status'], capture_output=True, text=True)
if status.returncode == 0:
content = 'Minecraft server is running.'
else:
content = 'Minecraft server is **stopped**.'
if status.stdout and len(status.stdout) > 0:
content += '\n```' + status.stdout + '```'
contents.append(content)
return contents
if __name__ == "__main__": # main.
os.chdir('/home/hasol/Documents/Discord')
with open('.token') as tokenfile:
clienttoken = tokenfile.read().strip()
with open('idfile.json') as idfile:
iddata = json.loads(idfile.read())
client = HimBotClient(iddata)
client.run(clienttoken)
```
#### File: Minecraft/economy/recipes.py
```python
import os
import json
import re
#import sys
#os.chdir('recipes')
recipedir = 'recipes\\'
items = os.listdir(recipedir)
tagdir = 'tags\\items\\'
tags = os.listdir(tagdir)
def getItemName(item):
index = item.find('_from_')
if index != -1:
itemname = item[:index]
else:
itemname = item[:-5]
return itemname
def enumerateRecipeNames(items, outfilename='recipes.json', showentry=True):
names = {}
for item in items:
itemname = getItemName(item)
if itemname not in names:
names[itemname] = {'count': 1, 'entry': []}
else:
names[itemname]['count'] += 1
if showentry:
names[itemname]['entry'].append(item)
if outfilename is not None:
with open(outfilename, mode='w') as recipefile:
recipefile.write(json.dumps(names, sort_keys=True, indent=2))
return names
def enumerateRecipeTypes(items, itemprefix=recipedir, outfilename='types.json', showentry=True):
types = {}
for item in items:
with open(itemprefix + item) as recipe:
info = json.loads(recipe.read())
if info['type'] not in types:
types[info['type']] = {'count': 1, 'entry': []}
else:
types[info['type']]['count'] += 1
if showentry:
types[info['type']]['entry'].append(item)
if outfilename is not None:
with open(outfilename, mode='w') as outfile:
outfile.write(json.dumps(types, sort_keys=True, indent=2))
return types
def enumerateItemTagList(tags, tagprefix=tagdir, outfilename='itemtags.json'):
names = {}
for tag in tags:
with open(tagprefix + tag) as tagdata:
info = json.loads(tagdata.read())
tagname = 'minecraft:' + tag[:-5]
name = ''
for entry in info['values']:
if len(name) > 0:
name += '||'
name += entry
names[tagname] = name
for tagname in names:
while '#' in names[tagname]:
match = re.search('#([^|]+)', names[tagname])
searchname = match.group(1)
if searchname == tagname:
raise Exception('recursion detected')
else:
names[tagname] = names[tagname][:match.start()] + names[searchname] + names[tagname][match.end():]
if outfilename is not None:
with open(outfilename, mode='w') as outfile:
outfile.write(json.dumps(names, sort_keys=True, indent=2))
return names
def getIngredientName(data, exceptionmsg='unrecognized format'):
# data has list
if type(data) == list:
name = ''
for entry in data:
if len(name) > 0:
name += '||'
if 'item' in entry:
name += entry['item']
elif 'tag' in entry:
try:
name += itemtaglist[data['tag']]
except (NameError, KeyError):
name += 'tag:' + data['tag']
else:
raise Exception(exceptionmsg)
# data has 'item'
elif 'item' in data:
name = data['item']
# data has 'tag'
elif 'tag' in data:
try:
name = itemtaglist[data['tag']]
except (NameError, KeyError):
name = 'tag:' + data['tag']
# data has none
else:
raise Exception(exceptionmsg)
return name
def getCraftingResult(info, ingredient):
result = {}
if type(info['result']) == dict and 'count' in info['result']:
result['count'] = info['result']['count']
else:
result['count'] = 1
result['ingredient'] = ingredient
return result
def addIngredient(ingredients, itemname, result):
if itemname not in ingredients:
ingredients[itemname] = []
ingredients[itemname].append(result)
def readBaselineIngredients(infilename='baseingredients.json'):
ingredients = None
with open(infilename) as ingredientfile:
ingredients = json.loads(ingredientfile.read())
return ingredients
def enumerateRecipeIngredients(ingredients, items, itemprefix=recipedir, outfilename='ingredients.json'):
for item in items:
#itemname = getItemName(item)
with open(itemprefix + item) as recipe:
info = json.loads(recipe.read())
if 'crafting_special_' in info['type']:
continue
# shaped crafting recipe
if info['type'] == 'minecraft:crafting_shaped':
counter = {}
for line in info['pattern']:
for char in line:
if char not in counter:
counter[char] = 1
else:
counter[char] += 1
countresult = {}
for key, value in counter.items():
if key in info['key']:
name = getIngredientName(info['key'][key], 'unrecognized format for shaped recipe key')
countresult[name] = value
addIngredient(ingredients, info['result']['item'], getCraftingResult(info, countresult))
# shapeless crafting recipe
if info['type'] == 'minecraft:crafting_shapeless':
countresult = {}
for key in info['ingredients']:
name = getIngredientName(key, 'unrecognized format for shapeless recipe ingredients')
if name not in countresult:
countresult[name] = 1
else:
countresult[name] += 1
addIngredient(ingredients, info['result']['item'], getCraftingResult(info, countresult))
# smelting crafting recipe
if info['type'] == 'minecraft:smelting':
countresult = {'minecraft:coal': 0.125}
name = getIngredientName(info['ingredient'], 'unrecognized format for smelting recipe ingredient')
countresult[name] = 1
addIngredient(ingredients, info['result'], getCraftingResult(info, countresult))
# smithing crafting recipe
if info['type'] == 'minecraft:smithing':
countresult = {}
name = getIngredientName(info['base'])
countresult[name] = 1
name = getIngredientName(info['addition'])
if name not in countresult:
countresult[name] = 1
else:
countresult[name] += 1
addIngredient(ingredients, info['result']['item'], getCraftingResult(info, countresult))
if outfilename is not None:
with open(outfilename, mode='w') as outfile:
outfile.write(json.dumps(ingredients, sort_keys=True, indent=2))
return ingredients
def readBaselineCosts(infilename='basecosts.json'):
costs = None
with open(infilename) as costfile:
costs = json.loads(costfile.read())
return costs
#stacktrace = []
#baseitemnames = ['minecraft:wheat', 'minecraft:honey_bottle', 'minecraft:slime_ball']
def calculateRelativeCost(costs, outfilename='costs.json', verbose=False):
def calculateItemCost(itemname):
#stacktrace.append(itemname)
if itemname not in costs:
if itemname in ingredientlist:
itemdata = ingredientlist[itemname]
if len(itemdata) > 1:
if 'minecraft:lapis_lazuli' in itemdata[0]['ingredient']:
index = 1
elif 'minecraft:redstone_block' in itemdata[0]['ingredient']:
index = 1
elif 'minecraft:honey_bottle' in itemdata[0]['ingredient']:
index = 1
elif 'minecraft:coal_block' in itemdata[0]['ingredient']:
index = 1
elif 'minecraft:diamond_block' in itemdata[0]['ingredient']:
index = 1
elif 'minecraft:dried_kelp_block' in itemdata[0]['ingredient']:
index = 1
elif 'minecraft:emerald_block' in itemdata[0]['ingredient']:
index = 1
elif 'minecraft:honey_bottle' in itemdata[0]['ingredient']:
index = 1
else:
index = 0
#index = int(input('Enter recipe index to use in calculation: '))
if verbose:
print('Multiple recipes found for', itemname)
for index in range(len(itemdata)):
print(index, ':', itemdata[index])
print('using index', index)
recipe = itemdata[index]
else:
recipe = itemdata[0]
totalcost = 0.0
for key, value in recipe['ingredient'].items():
if '||' in key:
keyitems = key.split('||')
if keyitems[0] in ['minecraft:chiseled_quartz_block']:
index = 1
else:
index = 0
#index = int(input('Enter ingredient index to use in calculation: '))
if verbose:
print('Multiple ingredients found for', itemname)
for index in range(len(keyitems)):
print(index, ':', keyitems[index])
print ('using index', index)
key = keyitems[index]
totalcost += calculateItemCost(key) * value
totalcost /= recipe['count']
if 'factor' in recipe:
factor = recipe['factor']
else:
factor = 1 + min(max(0.5 / totalcost, 0.01), 0.5)
costs[itemname] = round(totalcost * factor, 2)
else:
#print('Base price for', itemname, 'is assumed to be 10')
#costs[itemname] = 10.0
#baseitemnames.append(itemname)
costs[itemname] = float(input('Enter base price for ' + itemname + ': '))
#stacktrace.pop()
return costs[itemname]
for itemname in ingredientlist:
calculateItemCost(itemname)
if outfilename is not None:
with open(outfilename, mode='w') as outfile:
outfile.write(json.dumps(costs, sort_keys=True, indent=2))
return costs
def generatePotionRecipeIngredients(outfilename='potioningredients.json'):
effectlist = {
'water': {'long': False, 'strong': False, 'ingredient': None},
'awkward': {'long': False, 'strong': False, 'ingredient': ('minecraft:water_potion', 'minecraft:nether_wart')},
'mundane': {'long': False, 'strong': False, 'ingredient': ('minecraft:water_potion', 'minecraft:sugar')},
'thick': {'long': False, 'strong': False, 'ingredient': ('minecraft:water_potion', 'minecraft:glowstone_dust')},
'fire_resistance': {'long': True, 'strong': False, 'ingredient': ('minecraft:awkward_potion', 'minecraft:magma_cream')},
'harming': {'long': False, 'strong': True, 'ingredient': ('minecraft:poison_potion', 'minecraft:fermented_spider_eye')},
'healing': {'long': False, 'strong': True, 'ingredient': ('minecraft:awkward_potion', 'minecraft:glistering_melon_slice')},
'invisibility': {'long': True, 'strong': False, 'ingredient': ('minecraft:night_vision_potion', 'minecraft:fermented_spider_eye')},
'leaping': {'long': True, 'strong': True, 'ingredient': ('minecraft:awkward_potion', 'minecraft:rabbit_foot')},
'night_vision': {'long': True, 'strong': False, 'ingredient': ('minecraft:awkward_potion', 'minecraft:golden_carrot')},
'poison': {'long': True, 'strong': True, 'ingredient': ('minecraft:awkward_potion', 'minecraft:spider_eye')},
'regeneration': {'long': True, 'strong': True, 'ingredient': ('minecraft:awkward_potion', 'minecraft:ghast_tear')},
'slowness': {'long': True, 'strong': True, 'ingredient': ('minecraft:swiftness_potion', 'minecraft:fermented_spider_eye')},
'slow_falling': {'long': True, 'strong': False, 'ingredient': ('minecraft:awkward_potion', 'minecraft:phantom_membrane')},
'strength': {'long': True, 'strong': True, 'ingredient': ('minecraft:awkward_potion', 'minecraft:blaze_powder')},
'swiftness': {'long': True, 'strong': True, 'ingredient': ('minecraft:awkward_potion', 'minecraft:sugar')},
'turtle_master': {'long': True, 'strong': True, 'ingredient': ('minecraft:awkward_potion', 'minecraft:turtle_helmet')},
'water_breathing': {'long': True, 'strong': False, 'ingredient': ('minecraft:awkward_potion', 'minecraft:pufferfish')},
'weakness': {'long': True, 'strong': False, 'ingredient': ('minecraft:water_potion', 'minecraft:fermented_spider_eye')}
}
def addKeyResults(results, key, keyname):
keyname1 = 'minecraft:' + key + '_splash_potion'
results[keyname1] = [{
'count': 2, 'ingredient': {'minecraft:blaze_powder': 0.1, keyname: 2, 'minecraft:gunpowder': 1}
}]
keyname2 = 'minecraft:' + key + '_lingering_potion'
results[keyname2] = [{
'count': 2, 'ingredient': {'minecraft:blaze_powder': 0.1, keyname1: 2, 'minecraft:dragon_breath': 1}
}]
keyname3 = 'minecraft:' + key + '_tipped_arrow'
results[keyname3] = [{
'count': 8, 'ingredient': {keyname2: 1, 'minecraft:arrow': 8}
}]
results = {}
for key, value in effectlist.items():
keyname = 'minecraft:' + key + '_potion'
if value['ingredient'] is not None:
results[keyname] = [{
'count': 2, 'ingredient': {'minecraft:blaze_powder': 0.1, value['ingredient'][0]: 2, value['ingredient'][1]: 1}
}]
addKeyResults(results, key, keyname)
if value['long']:
newkey = 'long_' + key
newkeyname = 'minecraft:' + newkey + '_potion'
results[newkeyname] = [{
'count': 2, 'ingredient': {'minecraft:blaze_powder': 0.1, keyname: 2, 'minecraft:redstone': 1}
}]
addKeyResults(results, newkey, newkeyname)
if value['strong']:
newkey = 'strong_' + key
newkeyname = 'minecraft:' + newkey + '_potion'
results[newkeyname] = [{
'count': 2, 'ingredient': {'minecraft:blaze_powder': 0.1, keyname: 2, 'minecraft:glowstone_dust': 1}
}]
addKeyResults(results, newkey, newkeyname)
if outfilename is not None:
with open(outfilename, mode='w') as outfile:
outfile.write(json.dumps(results, sort_keys=True, indent=2))
return results
#enumerateRecipeNames(items)
#enumerateRecipeTypes(items, showentry=True)
itemtaglist = enumerateItemTagList(tags, outfilename=None)
ingredientlist = enumerateRecipeIngredients(readBaselineIngredients(), items, outfilename=None)
costs = calculateRelativeCost(readBaselineCosts())
#generatePotionRecipeIngredients()
```
#### File: Minecraft/python/RandomBox.py
```python
def getLootTableCommand():
LootTables = ("minecraft:chests/abandoned_mineshaft",
"minecraft:chests/bastion_bridge",
"minecraft:chests/bastion_hoglin_stable",
"minecraft:chests/bastion_other",
"minecraft:chests/bastion_treasure",
"minecraft:chests/buried_treasure",
"minecraft:chests/desert_pyramid",
"minecraft:chests/end_city_treasure",
"minecraft:chests/igloo_chest",
"minecraft:chests/jungle_temple",
"minecraft:chests/nether_bridge",
"minecraft:chests/pillager_outpost",
"minecraft:chests/ruined_portal",
"minecraft:chests/shipwreck_map",
"minecraft:chests/shipwreck_supply",
"minecraft:chests/shipwreck_treasure",
"minecraft:chests/simple_dungeon",
"minecraft:chests/spawn_bonus_chest",
"minecraft:chests/stronghold_corridor",
"minecraft:chests/stronghold_crossing",
"minecraft:chests/stronghold_library",
"minecraft:chests/underwater_ruin_big",
"minecraft:chests/underwater_ruin_small",
"minecraft:chests/woodland_mansion",
"minecraft:chests/village/village_armorer",
"minecraft:chests/village/village_butcher",
"minecraft:chests/village/village_cartographer",
"minecraft:chests/village/village_mason",
"minecraft:chests/village/village_shepherd",
"minecraft:chests/village/village_tannery",
"minecraft:chests/village/village_weaponsmith",
"minecraft:chests/village/village_desert_house",
"minecraft:chests/village/village_plains_house",
"minecraft:chests/village/village_savanna_house",
"minecraft:chests/village/village_snowy_house",
"minecraft:chests/village/village_taiga_house",
"minecraft:chests/village/village_fisher",
"minecraft:chests/village/village_fletcher",
"minecraft:chests/village/village_temple",
"minecraft:chests/village/village_toolsmith") # len=40
#print(len(LootTables))
lootTableCommand = []
for entry in LootTables:
lootTableCommand.append('execute at @e[type=armor_stand,tag=box_target,sort=nearest,limit=1] if block ~ ~ ~ trapped_chest run data modify block ~ ~ ~ LootTable set value "' + entry + '"')
return lootTableCommand
def getSingleCommand(lootTableCommand, size=8, direction='negative-z'):
#summon minecraft:armor_stand ~ ~1 ~ {Marker:1b,Invulnerable:1b,NoGravity:1b,Invisible:1b,Tags:["command_random","box_target"],DisabledSlots:16191,CustomNameVisible:1b,CustomName:"{\"text\":\"?\"}"}
#execute at @e[type=minecraft:armor_stand,tag=box_randomizer,sort=random,limit=1] if block ~ ~ ~ command_block run setblock ~1 ~ ~ redstone_block
import SingleCommandGenerator
armorStandCommand = ['#@ remove+', '#@ ' + direction, '#@ default impulse']
armorStand = 'summon minecraft:armor_stand ~ ~-1 ~ {Marker:1b,Invulnerable:1b,NoGravity:1b,Invisible:1b,Tags:["command_random","box_randomizer"],DisabledSlots:16191,CustomNameVisible:1b,CustomName:"{\\"text\\":\\"box\\"}"}'
for i in range(size):
armorStandCommand.append(armorStand)
armorStandSingle = SingleCommandGenerator.parse(armorStandCommand, outfile=False)
def new_commandlist():
return ['#@ ' + direction, '#@ skip 1', '#@ default impulse', '#@ default manual', '#@ auto', armorStandSingle]
singleCommands = []
commandlist = new_commandlist()
count = 0
for command in lootTableCommand:
commandlist.append(command)
count += 1
if count == size:
singleCommands.append(SingleCommandGenerator.parse(commandlist, outfile=False))
commandlist = new_commandlist()
count = 0
return singleCommands
def getEnchantbookCommand():
enchantments = {"aqua_affinity": [1], #
"bane_of_arthropods": [1,2,3,4,5], #
"binding_curse": [1],
"blast_protection": [1,2,3,4], #
"channeling": [1], #
"depth_strider": [1,2,3], #
"efficiency": [1,2,3,4,5], # ****
"feather_falling": [1,2,3,4], #
"fire_aspect": [1,2], # *
"fire_protection": [1,2,3,4], #
"flame": [1], # *
"fortune": [1,2,3], # ***
"frost_walker": [1,2], #
"impaling": [1,2,3,4,5], #
"infinity": [1], # **
"knockback": [1,2], #
"looting": [1,2,3], # ***
"loyalty": [1,2,3], #
"luck_of_the_sea": [1,2,3], #
"lure": [1,2,3], #
"mending": [1], # *****
"multishot": [1], #
"piercing": [1,2,3,4], #
"power": [1,2,3,4,5], # **
"projectile_protection": [1,2,3,4], #
"protection": [1,2,3,4], # ****
"punch": [1,2], #
"quick_charge": [1,2,3], #
"respiration": [1,2,3], #
"riptide": [1,2,3], #
"sharpness": [1,2,3,4,5], # ****
"silk_touch": [1], # **
"smite": [1,2,3,4,5], #
"soul_speed": [1,2,3], #
"sweeping": [1,2,3], # *
"thorns": [1,2,3], # **
"unbreaking": [1,2,3], # *****
"vanishing_curse": [1]} # total=110
enchantmentCommand = []
for key, levels in enchantments.items():
for level in levels:
enchantmentCommand.append('give @p[scores={Transaction=152}] enchanted_book{StoredEnchantments:[{lvl:' + str(level) + ',id:' + key + '}]}')
return enchantmentCommand
def getSingleCommand2(enchantmentCommand, size=11, direction='negative-z'):
#execute at @e[type=minecraft:armor_stand,tag=book_randomizer,sort=random,limit=1] if block ~ ~ ~ command_block run setblock ~1 ~ ~ redstone_block
import SingleCommandGenerator
armorStandCommand = ['#@ remove+', '#@ ' + direction, '#@ default impulse']
armorStand = 'summon minecraft:armor_stand ~ ~-1 ~ {Marker:1b,Invulnerable:1b,NoGravity:1b,Invisible:1b,Tags:["command_random","book_randomizer"],DisabledSlots:16191,CustomNameVisible:1b,CustomName:"{\\"text\\":\\"book\\"}"}'
for i in range(size):
armorStandCommand.append(armorStand)
armorStandSingle = SingleCommandGenerator.parse(armorStandCommand, outfile=False)
def new_commandlist():
return ['#@ ' + direction, '#@ skip 1', '#@ default impulse', '#@ default manual', '#@ auto', armorStandSingle]
singleCommands = []
commandlist = new_commandlist()
count = 0
for command in enchantmentCommand:
commandlist.append(command)
count += 1
if count == size:
singleCommands.append(SingleCommandGenerator.parse(commandlist, outfile=False))
commandlist = new_commandlist()
count = 0
return singleCommands
if __name__ == '__main__':
lootTableCommand = getLootTableCommand()
singleCommands = getSingleCommand(lootTableCommand)
#for item in singleCommands: print(item);
enchantmentCommand = getEnchantbookCommand()
singleCommands2 = getSingleCommand2(enchantmentCommand)
for item in singleCommands2: print(item);
```
|
{
"source": "jerumanu/blogs",
"score": 3
}
|
#### File: blogs/app/request.py
```python
import urllib.request,json
base_url = None
def configure_request(app):
global base_url
base_url = app.config['QUOTE_API_BASE_URL']
def getquote():
getquote_url ='http://quotes.stormconsultancy.co.uk/random.json'
with urllib.request.urlopen(getquote_url) as url:
get_quote_data = url.read()
get_quote_response = json.loads(get_quote_data)
return get_quote_response
```
|
{
"source": "jerusalemmoore/CINS465-PROJECT",
"score": 2
}
|
#### File: mysite/myapp/views.py
```python
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse, JsonResponse
from django.contrib import messages
from django.contrib.auth import login, logout, authenticate, update_session_auth_hash
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm, PasswordChangeForm
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from . import models
from . import forms
# import json
# Create your views here.
#TEMPLATE HOME PAGE
@login_required(login_url='/login/')
def homeView(request):
if request.user.is_authenticated:#if user is logged in
if request.method =='POST':
postForm = forms.PostForm(request.POST,request.FILES)#form can be submitted
if postForm.is_valid():
postModel = models.PostModel(#create a post model with the following
user = request.user,
content = postForm.cleaned_data["content"],
image = postForm.cleaned_data["image"]
)
postModel.save()#save the postModel
postForm = forms.PostForm()#refresh the form
# li = models.PostModel.objects.filter(user=request.user)#list of user's post
li = models.PostModel.objects.all()
friend, created = models.Friend.objects.get_or_create(currentUser=request.user)
friends = friend.users.all()
else:
redirect('/login/')
context = {
"friends": friends,
"userPosts":li,
"hello_mesg":"CINS465 Hello World",
"title": "Hello World",
"userInfoForm": postForm,
}
return render(request, "index.html", context=context)
def postDelete(request, id):
post = get_object_or_404(models.PostModel, id=id)
if request.method == 'POST':
post.delete()
return redirect('/home/')
def changeFriendsUserView(request, operation, id):
li = models.PostModel.objects.filter(user=id)#list of posts from guest user
guestUser = models.User.objects.filter(id=id)[0]
friend, created = models.Friend.objects.get_or_create(currentUser=request.user)
friends = friend.users.all()
context = {
"userPosts":li,
"guestUser":guestUser,
"friends": friends
}
newFriend = User.objects.get(id=id)
if operation == 'add':
models.Friend.make_friend(request.user, newFriend)
elif operation == 'remove':
models.Friend.lose_friend(request.user, newFriend)
return render(request,"userPage.html", context=context)
def changeFriends(request,operation,id):
newFriend = User.objects.get(id=id)
if operation == 'add':
models.Friend.make_friend(request.user, newFriend)
elif operation == 'remove':
models.Friend.lose_friend(request.user, newFriend)
return redirect('/home/')
#LOGIN PAGE
# def loginView(request):
# if request.method == 'POST':
# form = AuthenticationForm(data=request.POST)
# return render(request,'login.html')
def loginView(request):
if request.method == 'POST':
form = AuthenticationForm(request.POST)
username=request.POST['username']
password=request.POST['password']
user = authenticate(username=username,password=password)
# form = AuthenticationForm(request.POST)
if user is not None:
if user.is_active:
login(request, user)
return redirect('/home/')
else:
messages.error(request, 'Invalid username/password, click the Sign Up button to create an account')
return redirect('/login/')
else:
form = AuthenticationForm()
return render(request, 'login.html', {'form':form})
#LOGOUT PAGE
def logoutView(request):
logout(request)
return redirect('/login/')
#SIGNUP PAGE
def signupView(request):
# include a login form
form = UserCreationForm(request.POST or None)
if request.method == 'POST':
if form.is_valid():
form.save()
return redirect('/login/')
else:
form = UserCreationForm()
return render(request, "signup.html",{'form':form})
def exploreView(request):
if request.user.is_authenticated:
li = models.PostModel.objects.all()
context={
"globalPosts":li,
"explorePage":"Explore Page"
}
return render(request,"explore.html", context=context)
else:
redirect("/login/")
def userView(request, id):
li = models.PostModel.objects.all()#list of posts from guest user
guestUser = models.User.objects.filter(id=id)[0]
friend, created = models.Friend.objects.get_or_create(currentUser=request.user)
friends = friend.users.all()
guestFriend, created = models.Friend.objects.get_or_create(currentUser=guestUser)
guestFriends = guestFriend.users.all()
context = {
"userPosts":li,
"guestFriends": guestFriends,
"guestUser":guestUser,
"friends": friends,
}
if request.user.id == id:
return redirect("/home/")
else:
return render(request,"userPage.html", context=context)
def getUsers(request):
userModels = models.UserInfoModel.objects.all()
userList = {}
userList["users"] = []
for user in userModels:
tempUser = {}
tempUser["id"] = user.id
tempUser["username"] = user.username
tempUser["userpassword"] = <PASSWORD>
tempUser["email"] = user.email
userList["users"].append(tempUser)
return JsonResponse(userList)
def friendsView(request):
friend, created = models.Friend.objects.get_or_create(currentUser=request.user)
friends = friend.users.all()
context = {
"friends":friends
}
return render(request, 'friends.html', context=context)
def guestFriendsView(request, id):
guestUser = models.User.objects.get(id=id)
friend, created = models.Friend.objects.get_or_create(currentUser=guestUser)
friends = friend.users.all()
context = {
"friends":friends,
"guestUser":guestUser,
}
return render(request,'guestFriends.html',context=context)
def chat(request):
return render(request, 'chat.html')
def room(request, room_name):
context = {
'room_name':room_name,
}
return render(request, 'room.html', context=context)
def settingsView(request):
if request.method =='POST':
form = forms.EditAccountForm(request.POST, instance=request.user)
if form.is_valid():
form.save()
return redirect('/home/')
else:
messages.error(request, 'User already exists')
return redirect('/settings/')
else:
form = forms.EditAccountForm(instance=request.user)
context = {
"form":form
}
return render(request, 'settings.html', context=context)
def changePassword(request):
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user)
messages.success(request, 'Your password was successfully updated!')
return redirect('/login/')
else:
messages.error(request, 'Please correct the error below.')
else:
form = PasswordChangeForm(request.user)
return render(request, 'changePassword.html', {
'form': form
})
```
|
{
"source": "jerushaotieno/blog",
"score": 2
}
|
#### File: app/main/views.py
```python
from app import app
from flask import render_template,redirect,url_for, abort,request,flash
from app.auth.forms import LoginForm
from . import main
from flask_login import login_required
from ..models import Blog, Comments, Subscribe, User
from .forms import SubscriberForm, UpdateProfile, BlogForm, UpdateBlog
from .. import db
from ..email import mail_message
from app.requests import get_random_quote
@main.route('/', methods = ["GET","POST"])
def index():
quotes = get_random_quote()
form = SubscriberForm()
if form.validate_on_submit():
subscribe = Subscribe(email = form.email.data)
db.session.add(subscribe)
db.session.commit()
mail_message("Welcome to the Blog Alert community!","email/subscribe",subscribe.email,user=subscribe)
flash("Subscribed successfully. You'll never run short on inspiration!")
return redirect (url_for('auth.login'))
# return redirect(url_for('main.index'))
blogs = Blog.query.all()
return render_template('index.html', form = form, quotes=quotes, blogs=blogs)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
# Update profile view function
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
# upload picture
# @main.route('/user/<uname>/update/pic',methods= ['POST'])
# @login_required
# def update_pic(uname):
# user = User.query.filter_by(username = uname).first()
# if 'photo' in request.files:
# # filename = photos.save(request.files['photo'])
# # path = f'photos/{filename}'
# user.profile_pic_path = path
# db.session.commit()
# return redirect(url_for('main.profile',uname=uname))
# blog
@main.route('/blog/', methods = ["GET","POST"])
@login_required
def post():
blog_form = BlogForm()
if blog_form.validate_on_submit():
blog = Blog(title=blog_form.title.data, description=blog_form.description.data, author=blog_form.author.data)
db.session.add(blog)
db.session.commit()
return redirect(url_for('main.post'))
return render_template('blog.html', blog_form= blog_form)
# blog update
@main.route('/updateblog/', methods = ["GET","POST"])
@login_required
def update():
form = UpdateBlog()
if form.validate_on_submit():
description = form.description.data
blog = UpdateBlog(description=description)
db.session.add(blog)
db.session.commit()
return redirect(url_for('main.post'))
return render_template('updateblog.html',form =form)
# blog delete
@main.route('/deleteblog/', methods = ["GET","POST"])
@login_required
def delete():
form = UpdateBlog()
if form.validate_on_submit():
description = form.description.data
blog = UpdateBlog(description=description)
db.session.add(blog)
db.session.commit()
return redirect(url_for('.home'))
return render_template('updateblog.html',form =form)
```
#### File: blog/app/models.py
```python
from . import db
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from . import login_manager
# for role
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer,primary_key = True)
name = db.Column(db.String(255))
users = db.relationship('User',backref = 'role',lazy="dynamic")
def __repr__(self):
return f'User {self.name}'
# for user
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255),index = True)
email = db.Column(db.String(255),unique = True,index = True)
role_id = db.Column(db.Integer,db.ForeignKey('roles.id'))
bio = db.Column(db.String(255))
profile_pic_path = db.Column(db.String())
password_hash = db.Column(db.String(255))
blog = db.relationship('Blog',backref = 'user',lazy = "dynamic")
@property
def password(self):
raise AttributeError('You can only read this attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def __repr__(self):
return f'User{self.username}'
# for blog
class Blog(db.Model):
__tablename__ = 'blog'
id = db.Column(db.Integer,primary_key = True)
title = db.Column(db.String(255))
description = db.Column(db.String(2000))
author = db.Column(db.String(255))
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
comment = db.relationship('Comments', backref = 'blog', lazy = 'dynamic')
def save_blog(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_blog(cls, id):
blog = Blog.query.filter_by(id=id).all()
return blog
def __repr__(self):
return f'User{self.username}'
# for comments
class Comments(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255))
comment = db.Column(db.String(255))
blog_id = db.Column(db.Integer,db.ForeignKey("blog.id"))
def save_comments(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_comments(cls, id):
comments= Comments.query.filter_by(id=id).all()
return comments
def __repr__(self):
return f'User{self.username}'
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
# subscribers
class Subscribe(db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255),unique = True,index = True)
def save_subscriber(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_subscriber(cls, id):
subscriber= Subscribe.query.filter_by(id=id).all()
return subscriber
def __repr__(self):
return f'User {self.name}'
# random quotes
class Quotes:
def __init__(self,author,quote):
self.author = author
self.quote = quote
```
|
{
"source": "jerushaotieno/news_api",
"score": 2
}
|
#### File: app/main/views.py
```python
from . import main
from flask import render_template
from ..requests import sources
@main.route('/')
def homepage():
general_news = sources('general')
return render_template('index.html', general = general_news)
@main.route('/')
def articles(source_id):
general_news = sources('general')
return render_template('index.html', general = general_news)
```
#### File: jerushaotieno/news_api/manage.py
```python
from app import create_app
from flask_script import Manager, Server
# app = Flask(__name__)
app = create_app('development')
manager = Manager(app)
manager.add_command('server', Server)
@manager.command
def test():
'''
Run unit tests
'''
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == "__main__":
manager.run()
@app.route('/')
def index():
return "News Homepage"
@app.route('/sources')
def sources():
return "Source News Articles"
from app import app
if __name__ == "__main__":
app.run()
```
|
{
"source": "jerushaotieno/photo-gallery",
"score": 3
}
|
#### File: photo-gallery/photos/tests.py
```python
from django.test import TestCase
from .models import Image, Category, Location
# Create your tests here
# Image Model Tests
class ImageTest(TestCase):
'''
testing image model
'''
def setUp(self):
'''
Creates image instances called before any tests are run
'''
self.new_category = Category(name='testing')
self.new_category.save_category()
self.new_location = Location(city='Nairobi', country='Kenya')
self.new_location.save_location()
self.new_picture = Image(image_link=' ', title=' ', description=' ', category=self.new_category, location=self.new_location)
self.new_picture.save_image()
self.another_picture = Image(image_link='', title=' ', description=' ', category=self.new_category, location=self.new_location)
self.another_picture.save_image()
def tearDown(self):
'''
test method for deleting image instances after running each test
'''
Image.objects.all().delete()
Location.objects.all().delete()
Category.objects.all().delete()
def test_instances(self):
'''
test method for asserting instances created during setUp
'''
self.assertTrue(isinstance(self.new_picture,Image))
self.assertTrue(isinstance(self.new_location, Location))
self.assertTrue(isinstance(self.new_category, Category))
def test_save_image(self):
'''
test method for correctly saving an image instance
'''
self.assertTrue(len(Image.objects.all()) == 2)
def test_delete_image(self):
'''
test method for correctly deleting an image instance
'''
self.new_picture.delete_image()
self.assertTrue(len(Image.objects.all()) == 1)
def test_update_image(self):
'''
test method for correctly updating an image instance
'''
update_test = self.new_picture.update_image(' ')
self.assertEqual(update_test.image_link, '')
def test_get_all(self):
'''
test method for retrieving all instances of the image class
'''
pictures = Image.get_all()
def test_get_image_by_id(self):
'''
test method for retrieving image instances by id
'''
obtained_image = Image.get_image_by_id(self.another_picture.id)
def test_search_image(self):
'''
test method for searching various image instances by category
'''
obtained_image = Image.search_image(self.new_picture.category)
def test_filter_by_location(self):
'''
test method for image instances by location
'''
obtained_image = Image.filter_by_location(self.another_picture.location)
print(obtained_image)
# Location Model Tests
class LocationTest(TestCase):
'''
test class for Locations model
'''
def setUp(self):
'''
test method for creating location instances before any tests are run
'''
self.new_location = Location(city='unknown city', country='unknown country')
self.new_location.save_location()
def test_save_location(self):
'''
test method for correctly saving a location instance
'''
self.assertTrue(len(Location.objects.all()) == 1)
def test_delete_location(self):
'''
test method for deleting a location instance correctly
'''
self.new_location.save_location()
self.new_location.delete_location()
self.assertTrue(len(Location.objects.all()) == 0)
def test_update_location(self):
'''
test method for updating a location instance correctly
'''
update_locale = Location.update_location('unknown location', 'new location')
self.assertEqual(update_locale.city, 'new location')
def test_get_all(self):
'''
test method for retrieving all instances of the location class
'''
locations = Location.get_all()
print(locations)
# Categor Model Tests
class CategoryTest(TestCase):
'''
test class for categories model
'''
def setUp(self):
'''
test method for creating category instances before running any tests
'''
self.new_category = Category(name='newCategory')
self.new_category.save_category()
def tearDown(self):
'''
test method for deleting category instances after running each test
'''
Category.objects.all().delete()
def test_save_category(self):
'''
test method for saving a category instance correctly
'''
self.assertTrue(len(Category.objects.all()) == 1)
def test_delete_category(self):
'''
test method for deleting a category instance correctly
'''
self.new_category.save_category()
self.new_category.delete_category()
self.assertTrue(len(Category.objects.all()) == 0)
def test_update_category(self):
'''
test method for updating a category insatnce correctly
'''
update_category = Category.update_category('newCategory', 'newCategory2')
self.assertEqual(update_category.name, 'newCategory2')
```
|
{
"source": "jerushaotieno/pitches",
"score": 2
}
|
#### File: app/main/views.py
```python
from app import app
from app import auth
from . import main
from flask_login import current_user, login_required
from flask import Flask, render_template,request,redirect,url_for,abort, flash
from ..models import User, Pitches, Comments
from .forms import UpdateProfile, PitchesForm, CommentsForm
from .. import db, photos
import os
from werkzeug.utils import secure_filename
from flask_sqlalchemy import SQLAlchemy
@main.route('/')
def index():
return render_template('index.html')
@main.route('/home/')
def home():
return render_template('home.html')
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@main.route('/user/<uname>/update/pic',methods= ['GET','POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# If the user does not select a file, the browser submits an
# empty file without a filename.
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
path = f'uploads/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
return 'add upload'
@app.route('/', methods=["GET", "POST"])
def pitches():
pitches= Pitches.query.all()
return render_template("home.html", pitches=pitches)
@main.route('/pitchesform/', methods=["GET", "POST"])
def pitches():
form = PitchesForm()
if form.validate_on_submit():
title = form.title.data
category = form.category.data
description = form.description.data
# Updated review instance
new_pitch = Pitches(title=title,category=category,description=description)
# save review method
new_pitch.save_pitches()
return redirect(url_for('main.index'))
return render_template("pitchesform.html", form=form, pitches=pitches)
# display pitches in one place
@main.route('/',methods = ['GET','POST'])
@login_required
def view():
pitches= Pitches.query.all()
return render_template('home.html', pitches=pitches)
@app.route('/comment/', methods=["GET", "POST"])
@login_required
def comments():
form = CommentsForm()
if form.validate_on_submit():
comment = form.comment.data
username = form.username.data
# Updated review instance
new_comment = Comments(comment=comment, username=username)
# save review method
new_comment.save_comments()
return redirect(url_for('main.index'))
comments= Comments.query.all()
return render_template("comment.html", form=form, comments=comments)
# display comments
@main.route('/displaycomment/',methods = ['GET','POST'])
@login_required
def views():
comments= Comments.query.all()
return render_template('displaycomment.html', comments=comments)
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
```
#### File: migrations/versions/1365722e6925_initial_migration.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1365722e6925'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_pitches_category', table_name='pitches')
op.drop_index('ix_pitches_description', table_name='pitches')
op.drop_index('ix_pitches_title', table_name='pitches')
op.drop_index('ix_pitches_username', table_name='pitches')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index('ix_pitches_username', 'pitches', ['username'], unique=False)
op.create_index('ix_pitches_title', 'pitches', ['title'], unique=False)
op.create_index('ix_pitches_description', 'pitches', ['description'], unique=False)
op.create_index('ix_pitches_category', 'pitches', ['category'], unique=False)
# ### end Alembic commands ###
```
#### File: pitches/tests/test_pitch.py
```python
import unittest
# from models import Pitch
from app.models import Pitch
class Pitch_Test(unittest.TestCase):
'''
'''
def setUp(self):
'''
'''
new_pitch=Pitch('test_pitch', 'sample_category', 'sample_author')
def test_init(self):
'''
'''
self.assertEqual(self.new_pitch.pitch_text,'test_pitch')
self.assertEqual(self.new_pitch.pitch_category,'sample_category')
self.assertEqual(self.new_pitch.pitch_author,'sample_author')
def test_instance(self):
self.assertIsInstance(self.new_pitch, Pitch)
# if __name__ == '__main__':
# unittest.main()
```
|
{
"source": "jeruu/VeroProveNutrAspect",
"score": 3
}
|
#### File: VeroProveNutrAspect/app/misc.py
```python
import datetime
import json
from bson import ObjectId
def yearToday(bDate):
today = datetime.datetime.today()
if bDate.month <= today.month:
if bDate.day <= today.day:
return int(today.year - bDate.year)
return int(today.year - bDate.year) - 1
def todayDate():
dt = datetime.datetime.today()
return dt.replace(hour=0, minute=0, second=0, microsecond=0)
def isNumber(number):
try:
int(number)
return True
except:
return False
def watCal(sex, wSport, yearUser, weight, height, objective):
dWat = 0
dCal = 0
if sex == 'male':
dWat = 2800
dCal = int(66.5 + (13.75 * float(weight)) + (5 * int(height)) - (6.775 * int(yearUser)))
else:
dWat = 2000
dCal = int(65.5 + (9.563 * float(weight)) + (1.850 * int(height)) - (4.676 * int(yearUser)))
if wSport == 0:
dCal *= 1.2
else:
if wSport == 1 or wSport == 2:
dCal *= 1.375
else:
if wSport == 3 or wSport == 4:
dCal *= 1.50
else:
if wSport == 5:
dCal *= 1.725
else:
dCal *= 1.9
if objective == 'wLoss':
dCal -= (dCal * 17) / 100
if objective == 'wGain':
dCal += 500
return [dWat, dCal]
# funzione per riempire la collezione partendo dal json
def fillCollection(collection, path, datename):
# apriamo il file in modalità lettura
tFile = open(path, 'r')
#trasformiamo il file in un json
tData = json.load(tFile)
# per ogni record in tData
for u in tData:
# riconvertiamo l'id e gestiamo la data e inserimento
u['_id'] = ObjectId(u['_id']['$oid'])
if datename is not None:
u[datename]['$date'] = u[datename]['$date'][:-14]
u[datename] = datetime.datetime.strptime(u[datename]['$date'], '%Y-%m-%d')
try:
collection.insert_one(u)
except:
pass
# funzione per trasferire i dati scalati dal record del cibo di oggi ad un array
def foodArrDump(collection,dailyMeal, meal):
foodArr = []
try:
dailyMeal[meal]
except:
return foodArr
for food in dailyMeal[meal]:
qr = collection.find({'name': food[0]})
grCf = int(food[1]) / 100
for x in qr:
foodArr.append(
[x["name"], food[1], int((x["cal"] * grCf)), int((x["carb"] * grCf)), int((x["protein"] * grCf)),
int((x["fat"] * grCf))])
return foodArr
```
|
{
"source": "Jervelund/VendingMachinePython",
"score": 3
}
|
#### File: Jervelund/VendingMachinePython/vending_tweet.py
```python
import sys
import time
import serial
from optparse import OptionParser, make_option
print "Loading twitter library"
execfile("twt.py")
from random import choice
sales = ['Sales are up 64% since last year!',
'With this rate, we\'re going to run out of stock soon!',
'All I do is - vend, vend, vend - no matter what!',
'Veni, Vidi, Vendi!',
'NASDAQ, here we come!',
'Say hello to my little vend!',
'I made them an offer they couldn\'t refuse.',
'Machine, Vending Machine.',
'HAL ain\'t got nothing on me!',
'A cold beverage. Shaken, not stirred.',
'Hasta la venda, Baby!',
'Madness? This is BEVERAGES!',
'To vend, or not to vend. That is the question.',
'A day without vending is like a day without sunshine.',
'Ah, excellent, another valued customer!',
'Feeling overburdened by money?',
'A fantastic day for capitalism!',
'Luke, I am your father!']
dry =['Well that\'s enough to drink for today!',
'Whew, let me cool off for a minute.',
'It\'s not you - it\'s me.',
'It\'s time to kick ass and vend beverages - and I\'m all out of beverages!',
'I find my lack of beverages disturbing.']
undry =['Bring the big wallet, I\'m restocked!',
'Back on track, and ready to vend!']
jam =['Ugh - I don\'t feel so good.',
'I\'ll be back!',
'I just can\'t do it captain - I don\'t have the power!']
unjam =['I feel better already!',
'It\'s alive! It\'s alive!',
'Good as new, I think. Am I leaking?']
def tweet(str):
try:
response = client.api.statuses.update.post(status=str)
print "Sent tweet: " + str
except:
print "Could not tweet: " + str
def tweetStatus(type,i='',action=''):
if type == ord('B'):
tweet(choice(sales) + " (total vend count is now " + i + ")")
elif type == ord('D'):
if action == "add":
tweet(choice(dry) + " (slot " + i + " is empty) @CUnnerup")
else:
tweet(choice(undry) + " (slot " + i + " refilled)")
elif type == ord('J'):
if action == "add":
tweet(choice(jam) + " (slot " + i + " jammed) @CUnnerup")
else:
tweet(choice(unjam) + " (slot " + i + " is no longer jammed)")
elif type == ord('R'):
if action == "add":
#tweet("Out of coins " + i)
print "Out of coins " + i
else:
#tweet("Restocked coins " + i)
print "Restocked coins " + i
elif type == ord('C'):
#tweet("Card swiped - current credits: " + i)
print "Card swiped - current credits: " + i
elif type == ord('F'):
if action == 'deposit':
#tweet("Card swiped - deposited: " + str(i))
print "Card swiped - deposited: " + str(i)
else:
#tweet("Card swiped - withdrew: " + str(i))
print "Card swiped - withdrew: " + str(i)
elif type == ord('E'):
if action == 0:
tweet("Shoot! I'm really in trouble now - couldn't withdraw! :( (EEPROM Error) @Jervelund @Lauszus @CUnnerup")
else:
tweet("Shoot! I'm really in trouble now - couldn't deposit. :( (EEPROM Error) @Jervelund @Lauszus @CUnnerup")
elif type == ord('O'):
tweet("Why can't I hold all these card UIDs. :( (EEPROM full) @Jervelund @Lauszus @CUnnerup")
elif type == ord('N'):
#tweet("I ain't saying I'm a gold digger, but I ain't messing with no empty cards. (No credit)")
print "I ain't saying I'm a gold digger, but I ain't messing with no empty cards. (No credit)"
elif type == ord('c'):
#tweet("Added " + str(i) + " kr with coins")
print "Added " + str(i) + " kr with coins"
elif type == ord('r'):
#tweet("Returned a " + str(i) + " kr coin")
print "Returned a " + str(i) + " kr coin"
else:
tweet("Error! Unknown command: " + str(type) + " @Jervelund @Lauszus @CUnnerup")
def tweetDiff(type, str, old_str):
l_added = list(set(str) - set(old_str))
l_removed = list(set(old_str) - set(str))
for item in l_added:
tweetStatus(type, item, "add")
for item in l_removed:
tweetStatus(type, item, "rem")
oldBuffer = {}
parseBuffer = ''
if False: # Debug messages for all possible RFID transactions
# Withdraw
parseBuffer += 'CabababababN' # No credits
parseBuffer += 'CabababababSxyF' # withdrew xy credits
parseBuffer += 'CabababababE' # Bad EEPROM error
# Deposit
parseBuffer += 'CabababababZZZZZF' # Deposited ab credits
parseBuffer += 'CabababababZZZZZSabE' # Could not deposit credits, due to bad EEPROM
parseBuffer += 'CabababababZZZZZSabO' # Could not deposit credits, due to out of EEPROM
currentCreditsInMachine = 0
currentMode = 'withdraw'
setCredits = 0
def parseStatus(stat):
global parseBuffer, currentMode, setCredits, currentCreditsInMachine, oldBuffer
parseBuffer += stat
length = len(parseBuffer)
if length == 0:
return
cmd = ord(parseBuffer[0]) # Case sensitive matching
if cmd == ord('B') or cmd == ord('J') or cmd == ord('D') or cmd == ord('R'): # Beverages dispensed or jammed slots or empty beverage slots (dry) or empty coin return slots
if ',' in parseBuffer:
indx = parseBuffer.index(',')
if parseBuffer[1:indx].isdigit() or indx == 1:
value = parseBuffer[1:indx]
if cmd in oldBuffer and value != oldBuffer[cmd]:
if cmd == ord('B'):
tweetStatus(cmd, value, '')
else:
tweetDiff(cmd, value, oldBuffer[cmd])
oldBuffer[cmd] = value
parseBuffer = parseBuffer[indx + 1:]
else:
return
elif cmd == ord('C'): # Credits in machine
# 'Cxy_xy_xy_xy_xy_'
if len(parseBuffer) < 16:
return
value = str(ord(parseBuffer[1]) | (ord(parseBuffer[2]) << 8))
for i in range(4,16,3):
if value != str(ord(parseBuffer[i]) | (ord(parseBuffer[i + 1]) << 8)):
parseBuffer = parseBuffer[i:]
parseStatus('')
return # Stop if error detected with 'C' command
# Set/reset state variables
currentCreditsInMachine = value
currentMode = 'withdraw'
setCredits = 0
parseBuffer = parseBuffer[16:]
elif cmd == ord('S'): # Set current credits
# 'Sxy'
if len(parseBuffer) < 3:
return
value = str(ord(parseBuffer[1]) | (ord(parseBuffer[2]) << 8))
setCredits = value
parseBuffer = parseBuffer[3:]
elif cmd == ord('E'): # Error EEPROM bad
# 'E'
tweetStatus(cmd, '', setCredits)
elif cmd == ord('O'): # Out of memory
# 'O'
tweetStatus(cmd)
elif cmd == ord('N'): # No credit
# 'N'
tweetStatus(cmd)
elif cmd == ord('F'): # No credit
# 'F'
if currentMode == 'deposit':
tweetStatus(cmd, currentCreditsInMachine, currentMode);
else:
tweetStatus(cmd, setCredits, currentMode);
elif cmd == ord('Z'): # Credits zeroed - deposit mode
# 'Z'
currentMode = 'deposit'
elif cmd == ord('c'): # coins added
# 'cx' - byte encoded value
if len(parseBuffer) < 2:
return
tweetStatus(cmd , ord(parseBuffer[1]))
parseBuffer = parseBuffer[2:]
elif cmd == ord('r'): # return coins
# 'rx' - byte encoded value
if len(parseBuffer) < 2:
return
tweetStatus(cmd , ord(parseBuffer[1]))
parseBuffer = parseBuffer[2:]
if len(parseBuffer) == length:
parseBuffer = parseBuffer[1:]
parseStatus('')
def main():
while True:
ser = ''
try:
print "Trying to establish Bluetooth connection."
ser = serial.Serial('/dev/rfcomm0') # Create serial port
except:
print "Could not initialize Bluetooth connection. Retrying."
time.sleep(10)
if ser:
if ser.isOpen():
print "Connection established."
while True:
sodaStatus = ''
try:
sodaStatus = ser.read(ser.inWaiting())
except:
print "Dropped Bluetooth connection unexpectedly."
break
if sodaStatus:
parseStatus(sodaStatus)
time.sleep(5)
print "Retrying..."
parseBuffer = ''
if ser:
ser.close()
main()
```
|
{
"source": "jerverme/infi.pyvmomi_wrapper",
"score": 2
}
|
#### File: infi/pyvmomi_wrapper/property_collector.py
```python
from pyVmomi import vim
from infi.pyutils.decorators import wraps
from infi.pyutils.lazy import cached_method
from logging import getLogger
from munch import Munch
from copy import deepcopy, copy
try:
from gevent.lock import Semaphore as Lock
except ImportError:
from threading import Lock
logger = getLogger(__name__)
INITIAL_VERSION = ''
# foo.bar
# foo.arProp["key val"]
# foo.arProp["key val"].baz
PROPERTY_NAME_PATTERN = r'\w+|\["[^"\]]+"\]'
def locking_decorator(wrapped):
@wraps(wrapped)
def wrapper(self, *args, **kwargs):
self._lock.acquire()
try:
return wrapped(self, *args, **kwargs)
finally:
self._lock.release()
return wrapper
class CachedPropertyCollector(object):
"""
Facade for using PropertyCollectors to fetch a list of properties from all instances of a specific object_type
:param client: :py:class:`Client` instance
:param managed_object_type: A managed object type, e.g. vim.HostSystem
:param properties_list: A list of properties to fetch, can be nested, e.g. config.storageDevice
"""
def __init__(self, client, managed_object_type, properties_list):
super(CachedPropertyCollector, self).__init__()
self._client = client
self._property_collector = None
self._managed_object_type = managed_object_type
self._properties_list = properties_list
self._version = INITIAL_VERSION
self._result = {}
self._lock = Lock()
def __del__(self):
if self._property_collector is not None:
try:
self._property_collector.Destroy()
except vim.ManagedObjectNotFound:
# in case session ended, property collector may already be destroyed
pass
self._property_collector = None
def __repr__(self):
args = (self.__class__.__name__, getattr(self, '_managed_object_type', None),
getattr(self, '_properties_list', []), getattr(self, '_version', repr('')))
return "<{}: object_type={!r}, properties={!r}, version={}>".format(*args)
def _create_traversal_spec(self, name, managed_object_type, property_name, next_selector_names=[]):
return self._client.create_traversal_spec(name, managed_object_type, property_name, next_selector_names)
@cached_method
def _get_container_view(self):
kwargs = dict(container=self._client.root, type=[self._managed_object_type], recursive=True)
return self._client.service_content.viewManager.CreateContainerView(**kwargs)
@cached_method
def _get_object_set(self):
return [vim.ObjectSpec(obj=self._get_container_view(), selectSet=self._get_select_set())]
@cached_method
def _get_prop_set(self):
return [vim.PropertySpec(type=self._managed_object_type, pathSet=self._properties_list)]
@cached_method
def _get_property_collector(self):
self._property_collector = self._client.service_content.propertyCollector.CreatePropertyCollector()
self._property_filter = self._property_collector.CreateFilter(self._get_property_filter_spec(), partialUpdates=True)
return self._property_collector
@cached_method
def _get_property_filter_spec(self):
# http://vijava.sourceforge.net/vSphereAPIDoc/ver5/ReferenceGuide/vmodl.query.PropertyCollector.FilterSpec.html
return vim.PropertyFilterSpec(propSet=self._get_prop_set(), objectSet=self._get_object_set())
@cached_method
def _get_select_set(self):
"""This method returns a SelectSet that travels the entire heirarchy.
If you want to go over heirarchy in a more efficient way, overload this method"""
select_set = list(self._client._build_full_traversal())
select_set.append(self._create_traversal_spec('container', vim.ContainerView, "container",
[select.name for select in select_set]))
return select_set
def _get_changes(self, time_in_seconds=0, truncated_version=None):
# http://vijava.sourceforge.net/vSphereAPIDoc/ver5/ReferenceGuide/vmodl.query.PropertyCollector.html#WaitForUpdatesEx
from pyVmomi import vim
property_collector = self._get_property_collector()
wait_options = vim.WaitOptions(maxWaitSeconds=time_in_seconds)
logger.debug("Checking for updates on property collector {!r}".format(self))
try:
update = property_collector.WaitForUpdatesEx(truncated_version or self._version, wait_options)
logger.debug("There is {} pending update".format('no' if update is None else 'indeed a'))
return update
except vim.InvalidCollectorVersion:
logger.error("caught InvalidCollectorVersion fault, collector version is out of date or invalid")
self._version = INITIAL_VERSION
return self._get_changes(time_in_seconds=time_in_seconds)
def _merge_object_update_into_cache__enter(self, object_ref_key, objectUpdate):
# Rebuild the properties dict
properties = {propertyChange.name: propertyChange.val
for propertyChange in [propertyChange for propertyChange in objectUpdate.changeSet if propertyChange.op in ['add', 'assign']]}
message = "Replacing cache for object_ref_key {} with a dictionary of the following keys {}"
logger.debug(message.format(object_ref_key, list(properties.keys())))
self._result = dict(self._result) # copy
self._result[object_ref_key] = properties
def _merge_object_update_into_cache__leave(self, object_ref_key, objectUpdate=None):
# the object no longer exists, we drop it from the result dictionary
logger.debug("Removing object_ref_key {} from cache".format(object_ref_key))
self._result = dict(item for item in self._result.iteritems() if item[0] != object_ref_key)
def _walk_on_property_path(self, path):
from re import findall
matches = [Munch(value=item) for item in findall(PROPERTY_NAME_PATTERN, path)]
for match in matches:
if match.value.startswith('['):
match.type = "key"
match.value = match.value[2:-2]
else:
match.type = "property"
return matches
def _get_list_or_object_to_update(self, object_ref_key, property_dict, path, value, last=False):
for key in property_dict.keys():
if path.startswith(key):
break
else:
raise Exception("HIPVM-665 property collector is trying to modify an empty dict")
# key is a prefix of path
if path == key:
# we want to return the top-level 'property_dict', but we need to clone it from and replace it in
# self._result, in order for the result to actually update (and without replacing the reference)
# for code that use it
new_dict = dict(self._result[object_ref_key])
self._result[object_ref_key] = new_dict
return new_dict
object_to_update = property_dict[key]
path = path.replace(key, '').lstrip('.')
walks = self._walk_on_property_path(path)
parent_object = property_dict
key_to_update = key
for item in walks if last else walks[:-1]:
key_to_update = item.value
parent_object = object_to_update
if item.type == "key":
object_to_update = [element for element in object_to_update if element.key == key_to_update][0]
else:
if isinstance(object_to_update, (dict, Munch)):
object_to_update = object_to_update.get(key_to_update)
else:
object_to_update = getattr(object_to_update, key_to_update)
new_object = copy(object_to_update)
if isinstance(parent_object, dict):
parent_object[key_to_update] = new_object
elif isinstance(parent_object, list):
parent_object[parent_object.index(object_to_update)] = new_object
else:
setattr(parent_object, key_to_update, new_object)
return new_object
def _get_property_name_to_update(self, property_dict, path):
for key in property_dict.keys():
if path == key:
return key
return self._walk_on_property_path(path)[-1].value
def _get_key_to_remove(self, key):
return self._walk_on_property_path(key)[-1].value
def _merge_property_change__add(self, object_ref_key, property_dict, key, value):
# http://vijava.sourceforge.net/vSphereAPIDoc/ver5/ReferenceGuide/vmodl.query.PropertyCollector.Change.html
list_to_update = self._get_list_or_object_to_update(object_ref_key, property_dict, key, value)
list_to_update.insert(-1, value)
def _merge_property_change__assign(self, object_ref_key, property_dict, key, value):
# http://vijava.sourceforge.net/vSphereAPIDoc/ver5/ReferenceGuide/vmodl.query.PropertyCollector.Change.html
object_to_update = self._get_list_or_object_to_update(object_ref_key, property_dict, key, value, key.endswith(']'))
name = self._get_property_name_to_update(property_dict, key)
assignment_method = getattr(object_to_update, "__setitem__", object_to_update.__setattr__)
assignment_method(name, value)
def _merge_property_change__remove(self, object_ref_key, property_dict, key, value):
# http://vijava.sourceforge.net/vSphereAPIDoc/ver5/ReferenceGuide/vmodl.query.PropertyCollector.Change.html
list_to_update = self._get_list_or_object_to_update(object_ref_key, property_dict, key, value)
key_to_remove = self._get_key_to_remove(key)
value_list = [item for item in list_to_update if item.key == key_to_remove]
if value_list:
value = value_list[0]
list_to_update.remove(value)
def _merge_object_update_into_cache__modify(self, object_ref_key, objectUpdate):
# http://vijava.sourceforge.net/vSphereAPIDoc/ver5/ReferenceGuide/vmodl.query.PropertyCollector.ObjectUpdate.html
# http://vijava.sourceforge.net/vSphereAPIDoc/ver5/ReferenceGuide/vmodl.query.PropertyCollector.Change.html
# http://vijava.sourceforge.net/vSphereAPIDoc/ver5/ReferenceGuide/vmodl.query.PropertyCollector.MissingProperty.html
properties = self._result[object_ref_key]
logger.debug("Modifying cache for object_ref_key {}".format(object_ref_key))
updatemethods = dict(add=self._merge_property_change__add,
assign=self._merge_property_change__assign,
remove=self._merge_property_change__remove,
indirectRemove=self._merge_property_change__remove)
for propertyChange in objectUpdate.changeSet:
logger.debug("Modifying property {}, operation {}".format(propertyChange.name, propertyChange.op))
updatemethods[propertyChange.op](object_ref_key, properties, propertyChange.name, propertyChange.val)
for missingSet in objectUpdate.missingSet:
logger.debug("Removing from cache a property that has gone missing {}".format(missingSet.path))
self._merge_property_change__remove(object_ref_key, properties, missingSet.path, None)
def _merge_object_update_into_cache(self, objectUpdate):
# http://vijava.sourceforge.net/vSphereAPIDoc/ver5/ReferenceGuide/vmodl.query.PropertyCollector.ObjectUpdate.html
updateMethods = dict(enter=self._merge_object_update_into_cache__enter,
leave=self._merge_object_update_into_cache__leave,
modify=self._merge_object_update_into_cache__modify)
object_ref_key = self._client.get_reference_to_managed_object(objectUpdate.obj)
logger.debug("Update kind {} on cache key {}".format(objectUpdate.kind, object_ref_key))
updateMethods[objectUpdate.kind](object_ref_key, objectUpdate)
def _remove_missing_object_from_cache(self, missingObject):
key = self._client.get_reference_to_managed_object(missingObject.obj)
logger.debug("Removing key {} from cache because it is missing in the filterSet".format(key))
self._result = dict(item for item in self._result.iteritems() if item[0] != key)
def _merge_changes_into_cache(self, update):
# http://vijava.sourceforge.net/vSphereAPIDoc/ver5/ReferenceGuide/vmodl.query.PropertyCollector.UpdateSet.html
# http://vijava.sourceforge.net/vSphereAPIDoc/ver5/ReferenceGuide/vmodl.query.PropertyCollector.FilterUpdate.html
for filterSet in update.filterSet:
for missingObject in filterSet.missingSet:
self._remove_missing_object_from_cache(missingObject)
for objectUpdate in filterSet.objectSet:
self._merge_object_update_into_cache(objectUpdate)
if update.truncated:
self._merge_changes_into_cache(self._get_changes(0, update.version))
else:
self._version = update.version
logger.debug("Cache of {!r} is updated for version {}".format(self, self._version))
def _reset_and_update(self):
self._version = INITIAL_VERSION
self._result = {}
update = self._get_changes()
self._merge_changes_into_cache(update)
def check_for_updates(self):
""":returns: True if the cached data is not up to date"""
return self.wait_for_updates(0)
@locking_decorator
def get_properties(self):
"""This method checks first if there are changes in the server.
If there are, the changes are merged into the cache and then returned from the cache.
If there are not, the data is returned from the cache.
:rtype: a dictionary with MoRefs as keys, and propertyName=propertyValue dictionary as values"""
update = self._get_changes()
if update is not None:
try:
self._merge_changes_into_cache(update)
except:
logger.exception("Caught unexpected exception during property collector update merge. Resetting.")
self._reset_and_update()
return self.get_properties_from_cache()
def get_properties_from_cache(self):
""":returns: the cached properties immediately from the cache.
:rtype: a dictionary with MoRefs as keys, and propertyName=propertyValue dictionary as values"""
return self._result
@locking_decorator
def wait_for_updates(self, time_in_seconds):
"""This method is blocking a maximum time of time_in_seconds, depending if there are changes on the server.
This method does not update the cache with the changes, if there are any.
:returns: True if there are updates on the server, False if there are not."""
update = self._get_changes(time_in_seconds)
return update is not None
class HostSystemCachedPropertyCollector(CachedPropertyCollector):
"""
Facade for fetching host attributes by using a faster traversal (e.g no need to traverse inside HostSystem)
"""
def __init__(self, client, host_properties):
super(HostSystemCachedPropertyCollector, self).__init__(client, vim.HostSystem, host_properties)
@cached_method
def _get_select_set(self):
crToH = self._create_traversal_spec("crToH", vim.ComputeResource, "host")
dcToHf = self._create_traversal_spec("dcToHf", vim.Datacenter, "hostFolder", ["visitFolders"])
visitFolders = self._create_traversal_spec("visitFolders", vim.Folder, "childEntity",
["visitFolders", "dcToHf", "crToH"])
container = self._create_traversal_spec("container", vim.ContainerView, "container", ["visitFolders"])
return [container, visitFolders, dcToHf, crToH]
class VirtualMachinePropertyCollector(CachedPropertyCollector):
def __init__(self, client, properties):
super(VirtualMachinePropertyCollector, self).__init__(client, vim.VirtualMachine, properties)
@cached_method
def _get_select_set(self):
rpToRp = self._create_traversal_spec("rpToRp", vim.ResourcePool, "resourcePool", ["rpToRp", "rpToVm"])
rpToVm = self._create_traversal_spec("rpToVm", vim.ResourcePool, "vm")
crToRp = self._create_traversal_spec("crToRp", vim.ComputeResource, "resourcePool", ["rpToRp", "rpToVm"])
dcToHf = self._create_traversal_spec("dcToHf", vim.Datacenter, "hostFolder", ["visitFolders"])
visitFolders = self._create_traversal_spec("visitFolders", vim.Folder, "childEntity",
["visitFolders", "dcToHf", "crToRp"])
container = self._create_traversal_spec("container", vim.ContainerView, "container", ["visitFolders"])
return [container, visitFolders, dcToHf, crToRp, rpToRp, rpToVm]
class TaskPropertyCollector(CachedPropertyCollector):
def __init__(self, client, tasks, properties=["info.state"]):
super(TaskPropertyCollector, self).__init__(client, vim.Task, properties)
self.tasks = tasks
def _get_object_set(self):
return [vim.ObjectSpec(obj=task) for task in self.tasks]
def iter_task_states_changes(self, timeout_in_seconds=None):
update = self._get_changes(time_in_seconds=timeout_in_seconds)
if update is None:
return
for filter_set in update.filterSet:
for obj_set in filter_set.objectSet:
task = obj_set.obj
for change in obj_set.changeSet:
if change.name == 'info.state': # we don't look for any other changes so this should be true
yield task, change.val
self._version = update.version
```
|
{
"source": "jervi/requirementslib",
"score": 2
}
|
#### File: src/environ/_environ_config.py
```python
from __future__ import absolute_import, division, print_function
import logging
import os
import attr
from .exceptions import MissingEnvValueError
log = logging.getLogger(__name__)
CNF_KEY = "environ_config"
@attr.s
class Raise(object):
pass
RAISE = Raise()
def config(maybe_cls=None, prefix="APP"):
def wrap(cls):
cls._prefix = prefix
return attr.s(cls, slots=True)
if maybe_cls is None:
return wrap
else:
return wrap(maybe_cls)
@attr.s(slots=True)
class _ConfigEntry(object):
name = attr.ib(default=None)
default = attr.ib(default=RAISE)
sub_cls = attr.ib(default=None)
callback = attr.ib(default=None)
def var(default=RAISE, converter=None, name=None, validator=None):
return attr.ib(
default=default,
metadata={CNF_KEY: _ConfigEntry(name, default, None)},
converter=converter,
validator=validator,
)
def _env_to_bool(val):
"""Convert *val* to a bool if it's not a bool in the first place."""
if isinstance(val, bool):
return val
val = val.strip().lower()
if val in ("1", "true", "yes"):
return True
return False
def bool_var(default=RAISE, name=None):
return var(
default=default,
name=name,
converter=_env_to_bool,
)
def group(cls):
return attr.ib(default=None, metadata={CNF_KEY: _ConfigEntry(None, None, cls, True)})
def to_config(config_cls, environ=os.environ):
if config_cls._prefix:
app_prefix = (config_cls._prefix,)
else:
app_prefix = ()
def default_get(environ, metadata, prefix, name):
ce = metadata[CNF_KEY]
if ce.name is not None:
var = ce.name
else:
var = ("_".join(app_prefix + prefix + (name,))).upper()
log.debug("looking for env var '%s'." % (var,))
val = environ.get(var, ce.default)
if val is RAISE:
raise MissingEnvValueError(var)
return val
return _to_config(config_cls, default_get, environ, ())
def _to_config(config_cls, default_get, environ, prefix):
vals = {}
for a in attr.fields(config_cls):
try:
ce = a.metadata[CNF_KEY]
except KeyError:
continue
if ce.sub_cls is None:
get = ce.callback or default_get
val = get(environ, a.metadata, prefix, a.name)
else:
val = _to_config(
ce.sub_cls,
default_get,
environ,
prefix + ((a.name if prefix else a.name),),
)
vals[a.name] = val
return config_cls(**vals)
```
|
{
"source": "jervisfm/rebbr",
"score": 3
}
|
#### File: rebbr/mahimahi/client.py
```python
from bbr_logging import debug_print, debug_print_error, debug_print_verbose
import os
import random
import socket
import string
import time
def run_client(cong_control, size=1024, address=(os.environ.get("MAHIMAHI_BASE") or "127.0.0.1"), port=5050):
"""Run the client."""
TCP_CONGESTION = 13
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.IPPROTO_TCP, TCP_CONGESTION, cong_control)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 6553600)
debug_print("Client Connecting to: " + str(address) + ":" + str(port))
try:
s.connect((address, port))
except socket.error as msg:
debug_print_error("Cannot Connect: " + str(msg))
return
debug_print("Connection Established.")
# Generate a random message of SIZE a single time. Send this over and over.
msg = ''.join(random.choice(string.ascii_lowercase) for _ in range(size))
debug_print_verbose(msg)
msg_count = 1
# It can take different amount of time to send message depending on network
# configurations. Thus, log progress based on time intervals.
last_log_time_secs = time.time()
log_interval_secs = 5
debug_print("Client Starting Sending Messages...")
while True:
time_now_secs = time.time()
delta_secs = time_now_secs - last_log_time_secs
if (delta_secs > log_interval_secs):
debug_print("Sending Message #%d" % msg_count)
last_log_time_secs = time_now_secs
try:
s.send(msg)
except Exception as e:
debug_print_error("Socket Send Exception: " + str(e))
return
msg_count += 1
```
|
{
"source": "jerwallace/aws-robomaker-sample-apps-gzweb",
"score": 3
}
|
#### File: jerwallace/aws-robomaker-sample-apps-gzweb/fixme.py
```python
import glob,os,sys
from xml.dom import minidom
def xml_to_pretty_xml(input_file_name):
print ("Fixing the formatting in file %s \n", input_file_name)
doc = minidom.parse(input_file_name)
xmlstr = doc.toprettyxml(encoding="utf-8")
temp_file_name = input_file_name + "_temp"
old_file_name = input_file_name + "_old"
with open(temp_file_name, "w") as f:
f.write(xmlstr)
os.rename(input_file_name, old_file_name)
os.rename(temp_file_name, input_file_name)
os.remove(old_file_name)
def prettify_all_dae_in_this_folder(input_folder_path):
os.chdir(input_folder_path)
result = [y for x in os.walk(input_folder_path) \
for y in glob.glob(os.path.join(x[0], '*.DAE'))]
print(result)
for filename in result:
if "visual" in filename:
try:
xml_to_pretty_xml(filename)
except:
print ("Could not fix %s.", filename)
if __name__=="__main__":
path = sys.argv[1]
print ("Main path is %s", path)
prettify_all_dae_in_this_folder(path)
```
|
{
"source": "jerwyk/CppUMLGenerator",
"score": 3
}
|
#### File: CppUMLGenerator/source/Cpp.py
```python
class CppVariable:
def __init__(self, varType, name):
self.variable_type = varType
self.name = name
def __str__(self):
return self.name + ': ' + self.variable_type
def __repr__(self):
return str(self)
class CppFunction:
def __init__(self, retType, name):
self.return_type = retType
self.name = name
self.params = []
def AddParameter(self, param):
split = param.split()
if(len(split) == 2):
self.params.append(CppVariable(split[0], split[1]))
class CppHeader:
def __init__(self, name):
self.name = name
self.dependencies = []
self.variables = []
self.functions = []
self.ClassName = ""
def AddDependency(self, dependency):
self.dependencies.append(dependency)
def AddVariavle(self, varType, name):
self.variables.append(CppVariable(varType, name))
def AddFunction(self, func):
split_str = func.strip().split(' ', 1)
left_bracket = split_str[1].find('(')
right_bracket = split_str[1].find(')')
name = split_str[1][:left_bracket]
params = split_str[1][left_bracket + 1:right_bracket].split(',')
f = CppFunction(split_str[0], name)
for p in params:
f.AddParameter(p)
self.functions.append(f)
```
|
{
"source": "jerxdev/Python-Demo",
"score": 4
}
|
#### File: jerxdev/Python-Demo/JerwinAntivolaAs4PrettyPattern.py
```python
import turtle
def draw_square(some_turtle):
for i in range(0,4):
some_turtle.forward(100)
some_turtle.right(90)
def draw_art():
window = turtle.Screen()
window.bgcolor("lightgreen")
brad = turtle.Turtle()
brad.shape("turtle")
brad.color("blue")
brad.speed(6)
for i in range (0, 20):
draw_square(brad)
brad.right(18)
window.exitonclick()
draw_art()
```
#### File: jerxdev/Python-Demo/JerwinAntivolaSW8Replace.py
```python
def replace(s, old, new):
return new.join(s.split(old))
print(replace('Mississippi', 'i', 'I'))
replace('Mississippi', 'i', 'I'),
s = 'I love spom! Spom is my favorite food. Spom, spom, spom, yum!'
print (replace(s, 'om', 'am')),
print (replace(s, 'o', 'a')),
```
|
{
"source": "Jeryter/File2Image",
"score": 3
}
|
#### File: Jeryter/File2Image/img2file_demo.py
```python
from PIL import Image
import logging
import hashlib
logging.basicConfig(level=logging.INFO)
def check_hash(file_name, file_hash):
f = open(file_name, 'rb')
d = f.read()
# if hash == hashlib.sha256(d).hexdigest():
# return True
f_hash = hashlib.sha256(d).hexdigest()
logging.info(f'New file hash: \t{f_hash}')
logging.info(f'Old file hash: \t{file_hash}')
if file_hash == f_hash:
return True
return False
# This properties should be written inside the image file
# Note that this is just a demo and needs further developments
# File length in Bytes
file_length = 458053
# It should be original file name
export_file = "dcx_re.pdf"
# SHA256
file_sha256 = '953ffebe8456f93ad2cf79e097f0de8b9883702646af3089790d34a5e8dedf07'
recovery_img = 'dcx2.png'
img = Image.open(recovery_img)
pixel = img.load()
recovery_file = open(export_file, 'wb')
# # Logs
logs = open('img2dcx.txt', 'wb')
pos = 0
finished = False
for i in range(img.height):
for j in range(img.width):
recovery_file.write(pixel[j, i].to_bytes(1, 'big'))
if pos < 10:
logs.write(f'({j}, {i}) {pixel[j, i]}\n'.encode())
pos += 1
if pos == file_length:
finished = True
break
if finished:
break
logs.close()
logging.info(f'Pixels read: pos is {pos}')
img.close()
recovery_file.close()
if check_hash(export_file, file_sha256):
print("Done.")
else:
print("Error.")
```
|
{
"source": "jerzabek/mobilni-uredaju-dataset",
"score": 3
}
|
#### File: mobilni-uredaju-dataset/api/or_api_db.py
```python
import mysql.connector
from mysql.connector import errorcode
class OR_API_DB:
def __init__(self, config) -> None:
self.config = config
def connect(self) -> bool:
try:
db_conn = mysql.connector.connect(**self.config)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Could not connect to database: invalid username/password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
return False
else:
self.db_conn = db_conn
return True
def get_cursor(self, dictionary=True):
if not self.db_conn.is_connected():
self.db_conn.reconnect(attempts=3)
try:
cursor = self.db_conn.cursor(dictionary=dictionary)
return cursor
except mysql.connector.Error as err:
return False
def commit(self):
self.db_conn.commit()
```
|
{
"source": "Jerzha/rknn_facenet",
"score": 2
}
|
#### File: Jerzha/rknn_facenet/facenet_train_by_tripletloss.py
```python
import tensorflow as tf
import datasets.casia_webface as webface
import models.inception_resnet_v1 as facenet
BATCH_SIZE = 32
def triple_loss(y_true, y_pred, batch_size=BATCH_SIZE, alpha=0.2):
print('True Shape:' + str(y_true.get_shape()))
print('Pred Shape:' + str(y_pred.get_shape()))
anchor = y_pred[0:batch_size, :]
positive = y_pred[batch_size:batch_size+batch_size, :]
negative = y_pred[batch_size+batch_size:batch_size*3, :]
pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)
basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha)
loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
return loss
def train_from_folder():
tf.keras.backend.set_learning_phase(1) # 设置成训练模式(默认)
data_gen = webface.CASIAWebFaceSequence('/datasets/CASIA-WebFace_aligned', target_shape=[149, 149], batch_size=BATCH_SIZE, shuffle=True)
callbacks = [
tf.keras.callbacks.ModelCheckpoint(filepath='./checkpoints/facenet_rgb-{epoch:02d}.h5'),
tf.keras.callbacks.TensorBoard(log_dir='./logs')
]
model = facenet.InceptionResnetV1()
model.compile(optimizer=tf.keras.optimizers.SGD(),
loss=triple_loss,
metrics=[tf.keras.metrics.categorical_accuracy])
model.fit_generator(data_gen, epochs=20, max_queue_size=10, workers=8, callbacks=callbacks)
model.save_weights('./weights/facenet_rgb_weights.h5')
model.save('./facenet_rgb.h5')
if __name__ == '__main__':
# TODO
```
#### File: rknn_facenet/models/inception_resnet_v1.py
```python
import tensorflow as tf
import tensorflow.keras.backend as K
from models.base_model import BaseModel
class InceptionResnetV1(BaseModel):
def __init__(self, input_shape=(149, 149, 3), classes=128, dropout_keep_prob=0.8):
self.__classes = classes
self.__dropout_keep_prob = dropout_keep_prob
inputs = tf.keras.Input(shape=input_shape)
# input stem
x = self.InputStem(inputs)
# 5 * inception-resnet-a
for i in range(1, 6):
x = self.InceptionResnetA(x, scale=0.17, idx=i)
# reduction-a
x = self.Reduction_A(x)
# 10 * inception-resnet-b
for i in range(1, 11):
x = self.InceptionResnetB(x, scale=0.10, idx=i)
# reduction-b
x = self.Reduction_B(x)
# 5 * inception-resnet-c
for i in range(1, 6):
x = self.InceptionResnetC(x, scale=0.20, idx=i)
x = self.InceptionResnetC(x, scale=1, idx=6)
# Classification
x = tf.keras.layers.GlobalAveragePooling2D(name='AvgPool')(x)
x = tf.keras.layers.Dropout(rate=1-self.__dropout_keep_prob, name='Dropout')(x)
x = tf.keras.layers.Dense(self.__classes, use_bias=False, name='Bottleneck')(x)
x = tf.keras.layers.BatchNormalization(momentum=0.995, epsilon=0.001, scale=False, name='Bottleneck_bn')(x)
#x = tf.keras.layers.Dense(100, activation=tf.keras.activations.softmax)(x)
super(InceptionResnetV1, self).__init__(inputs, x, name='InceptionResnetV1')
def Conv2D_Bn_Relu(self, x, filters, kernel_size, strides, padding='same', name=''):
bn_axis = 1 if K.image_data_format() == 'channels_first' else 3
bn_momentum = 0.995
bn_epsilon = 0.001
x = tf.keras.layers.Conv2D(filters, kernel_size, strides=strides, padding=padding, name=name+'_conv2d')(x)
x = tf.keras.layers.BatchNormalization(axis=bn_axis, momentum=bn_momentum, epsilon=bn_epsilon, scale=False, name=name+'_bn')(x)
x = tf.keras.layers.Activation(tf.keras.activations.relu, name=name+'_relu')(x)
return x
def InputStem(self, x):
x = self.Conv2D_Bn_Relu(x, 32, 3, strides=2, name='input_stem1') # 149 x 149 x 32
x = self.Conv2D_Bn_Relu(x, 32, 3, strides=1, name='input_stem2') # 147 x 147 x 32
x = self.Conv2D_Bn_Relu(x, 64, 3, strides=1, name='input_stem3') # 147 x 147 x 64
x = tf.keras.layers.MaxPool2D(3, strides=2, name='input_stem4_maxpool')(x) # 73 x 73 x 64
x = self.Conv2D_Bn_Relu(x, 80, 1, strides=1, name='input_stem5') # 73 x 73 x 80
x = self.Conv2D_Bn_Relu(x, 192, 3, strides=1, name='input_stem6') # 71 x 71 x 192
x = self.Conv2D_Bn_Relu(x, 256, 3, strides=2, name='input_stem7') # 35 x 35 x 256
return x
# Inception-resnet-A / Block35
def InceptionResnetA(self, x, scale=0.17, idx=0):
channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
branch0 = self.Conv2D_Bn_Relu(x, 32, 1, strides=1, name='irA'+str(idx)+'-b0')
branch1 = self.Conv2D_Bn_Relu(x, 32, 1, strides=1, name='irA'+str(idx)+'-b1a')
branch1 = self.Conv2D_Bn_Relu(branch1, 32, 3, strides=1, name='irA'+str(idx)+'-b1b')
branch2 = self.Conv2D_Bn_Relu(x, 32, 1, strides=1, name='irA'+str(idx)+'-b2a')
branch2 = self.Conv2D_Bn_Relu(branch2, 32, 3, strides=1, name='irA'+str(idx)+'-b2b')
branch2 = self.Conv2D_Bn_Relu(branch2, 32, 3, strides=1, name='irA'+str(idx)+'-b2c')
mixed = tf.keras.layers.Concatenate(axis=channel_axis, name='irA'+str(idx)+'-concat')([branch0, branch1, branch2])
up = tf.keras.layers.Conv2D(K.int_shape(x)[channel_axis], 1, strides=1, use_bias=True, name='irA'+str(idx)+'-conv2d')(mixed)
up = tf.keras.layers.Lambda(lambda x: x*scale)(up)
x = tf.keras.layers.Add()([x, up])
x = tf.keras.layers.Activation(tf.keras.activations.relu, name='irA'+str(idx)+'-relu')(x)
return x
# Inception-resnet-B / Block17
def InceptionResnetB(self, x, scale=0.10, idx=0):
channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
branch0 = self.Conv2D_Bn_Relu(x, 128, 1, strides=1, name='irB'+str(idx)+'-b0')
branch1 = self.Conv2D_Bn_Relu(x, 128, 1, strides=1, name='irB'+str(idx)+'-b1a')
branch1 = self.Conv2D_Bn_Relu(branch1, 128, [1, 7], strides=1, name='irB'+str(idx)+'-b1b')
branch1 = self.Conv2D_Bn_Relu(branch1, 128, [7, 1], strides=1, name='irB'+str(idx)+'-b1c')
mixed = tf.keras.layers.Concatenate(axis=channel_axis, name='irB'+str(idx)+'-concat')([branch0, branch1])
up = tf.keras.layers.Conv2D(K.int_shape(x)[channel_axis], 1, strides=1, use_bias=True, name='irB'+str(idx)+'-conv2d')(mixed)
up = tf.keras.layers.Lambda(lambda x: x * scale)(up)
x = tf.keras.layers.Add()([x, up])
x = tf.keras.layers.Activation(tf.keras.activations.relu, name='irB' + str(idx) + '-relu')(x)
return x
# Inception-resnet-C / Block8
def InceptionResnetC(self, x, scale=0.20, idx=0):
channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
branch0 = self.Conv2D_Bn_Relu(x, 192, 1, strides=1, name='irC'+str(idx)+'-b0')
branch1 = self.Conv2D_Bn_Relu(x, 192, 1, strides=1, name='irC'+str(idx)+'-b1a')
branch1 = self.Conv2D_Bn_Relu(branch1, 192, [1, 3], strides=1, name='irC'+str(idx)+'-b1b')
branch1 = self.Conv2D_Bn_Relu(branch1, 192, [3, 1], strides=1, name='irC'+str(idx)+'-b1c')
mixed = tf.keras.layers.Concatenate(axis=channel_axis, name='irC' + str(idx) + '-concat')([branch0, branch1])
up = tf.keras.layers.Conv2D(K.int_shape(x)[channel_axis], 1, strides=1, use_bias=True, name='irC'+str(idx)+'-conv2d')(mixed)
up = tf.keras.layers.Lambda(lambda x: x * scale)(up)
x = tf.keras.layers.Add()([x, up])
x = tf.keras.layers.Activation(tf.keras.activations.relu, name='irC' + str(idx) + '-relu')(x)
return x
def Reduction_A(self, x):
channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
branch0 = self.Conv2D_Bn_Relu(x, 384, 3, strides=2, padding='valid', name='reduA_conv2d_0a')
branch1 = self.Conv2D_Bn_Relu(x, 192, 1, strides=1, name='reduA_conv2d_1a')
branch1 = self.Conv2D_Bn_Relu(branch1, 192, 3, strides=1, name='reduA_conv2d_1b')
branch1 = self.Conv2D_Bn_Relu(branch1, 256, 3, strides=2, padding='valid', name='reduA_conv2d_1c')
branch2 = tf.keras.layers.MaxPool2D(3, strides=2, padding='valid', name='reduA_maxpool_2a')(x)
net = tf.keras.layers.Concatenate(axis=channel_axis, name='reduA-concat')([branch0, branch1, branch2])
return net
def Reduction_B(self, x):
channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
branch0 = self.Conv2D_Bn_Relu(x, 256, 1, strides=1, name='reduB_conv2d_0a')
branch0 = self.Conv2D_Bn_Relu(branch0, 384, 3, strides=2, padding='valid', name='reduB_conv2d_0b')
branch1 = self.Conv2D_Bn_Relu(x, 256, 1, strides=1, name='reduB_conv2d_1a')
branch1 = self.Conv2D_Bn_Relu(branch1, 256, 3, strides=2, padding='valid', name='reduB_conv2d_1b')
branch2 = self.Conv2D_Bn_Relu(x, 256, 1, strides=1, name='reduB_conv2d_2a')
branch2 = self.Conv2D_Bn_Relu(branch2, 256, 3, strides=1, name='reduB_conv2d_2b')
branch2 = self.Conv2D_Bn_Relu(branch2, 256, 3, strides=2, padding='valid', name='reduB_conv2d_2c')
branch3 = tf.keras.layers.MaxPool2D(3, strides=2, padding='valid', name='reduB_maxpool_3a')(x)
net = tf.keras.layers.Concatenate(axis=channel_axis, name='reduB-concat')([branch0, branch1, branch2, branch3])
return net
```
|
{
"source": "jerzydziewierz/typobs",
"score": 3
}
|
#### File: jerzydziewierz/typobs/to_typora.py
```python
def run():
"""
parses the arguments and runs to_typora() function
:return: spews darnduff to the console.
"""
default_source_path = r'/home/mib07150/git/2020_portfolio/PMBOK templates/'
import argparse
import os
parser = argparse.ArgumentParser(
description='convert all links inside markdown files to the [text](link) style used by http://typora.io',
epilog='have a beautiful day!'
)
parser.add_argument('-d', '--default_path', action="store_true", dest='use_default_path', default=False, help=f'use the default hard-coded path of "{default_source_path}"')
parser.add_argument('-p', '--path', action="store", metavar='path', dest='path', type=str,
help='top-level source path (explored recursively)')
arguments = parser.parse_args()
if arguments.use_default_path:
source_path = default_source_path
print(f'using default path of "{source_path}"')
else:
if arguments.path is not None:
source_path = arguments.path
print(f'using supplied path of "{source_path}"')
else:
source_path = f'{os.getcwd()}/'
print(f'using current folder path (pwd) : "{source_path}"')
to_typora(source_path=source_path)
def to_typora(source_path=r'/home/mib07150/git/2020_portfolio/PMBOK templates/'):
"""
converts all the links inside the .md files to typora style, [text](link)
:param source_path: where to start
:return: spews darnduff to the console.
"""
source_folder_firstchar = len(source_path)
import os
# create the list of files of interest in the source_folder
fname_list = []
fpurename_list = []
for root, d_names, f_names in os.walk(source_path):
for f in f_names:
if ".md" in f[-3:] and not ".trash" in root:
# fname.append(os.path.join(root, f))
ths_fname = os.path.join(root, f)[source_folder_firstchar:]
print(f'"{ths_fname}" is "{f[:-3]}"')
fname_list.append(ths_fname)
fpurename_list.append(f[:-3])
import re
# This pattern matches [[something]]
# note that link descriptors are not supported here.
# convert [[x]] links to [x](path_to\x.md) ===============
pattern_basic_obslink_getter = r'\[\[ *(?P<link>.+?) *\]\]'
re_link = re.compile(pattern_basic_obslink_getter)
file_idx = 0
for file_idx in range(len(fname_list)):
filename = os.path.join(source_path, fname_list[file_idx])
filename_short = fname_list[file_idx]
# prepare link_prefix so that the files being in subfolders get correct up-folder path prefix
link_prefix = ''
file_depth = filename_short.count('/')
for depth_idx in range(file_depth):
link_prefix = f'{link_prefix}../'
print(f'processing ({file_idx})[{file_depth}]: "{filename_short}"...')
content = open(filename).read()
last_pos = 0
while True:
matched = re_link.search(content)
if not matched:
print('no more matches...', end='')
break
link_string = matched.groupdict()['link']
new_pos = matched.span()[1]
matched_string = content[matched.span()[0]:matched.span()[1]]
# check if this is a file in the list
f_candidate = None
for f_candidate in fname_list:
if link_string in f_candidate:
break
if f_candidate is not None:
# print(f'{z.span()} [[{link_string}]] -> {f_candidate}')
# create the replacement, commonmark link
link_file_string = re.sub(r" ", "%20", f_candidate) # replace spaces with "%20"
commonmark_link = f'[{link_string}]({link_prefix}{link_file_string})'
print(f'replacing {matched.span()} [[{link_string}]] -> {commonmark_link}')
# replace the old style link in the source text with new style link
content = content.replace(matched_string, commonmark_link)
last_pos = new_pos # move on
print('writing...', end='')
f = open(filename, "w")
f.write(content)
f.close()
print('done.')
print('')
```
|
{
"source": "JerzyDziewierz-UH/picosdk-python-wrappers",
"score": 3
}
|
#### File: picosdk-python-wrappers/picosdk/functions.py
```python
from __future__ import division
import numpy as np
from picosdk.constants import PICO_STATUS, PICO_STATUS_LOOKUP
from picosdk.errors import PicoSDKCtypesError
def adc2mV(bufferADC, range, maxADC):
"""
adc2mc(
c_short_Array bufferADC
int range
c_int32 maxADC
)
Takes a buffer of raw adc count values and converts it into millivolts
"""
channelInputRanges = [10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000, 20000, 50000, 100000, 200000]
vRange = channelInputRanges[range]
bufferV = [(x * vRange) / maxADC.value for x in bufferADC]
return bufferV
def mV2adc(volts, range, maxADC):
"""
mV2adc(
float millivolts
int range
c_int32 maxADC
)
Takes a voltage value and converts it into adc counts
"""
channelInputRanges = [10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000, 20000, 50000, 100000, 200000]
vRange = channelInputRanges[range]
adcValue = round((millivolts * maxADC.value)/vRange)
return adcValue
def splitMSOData(dataLength, data):
"""
This method converts an array of values for a ditial port into the binary equivalent, splitting the bits by
digital channel.
Returns a set of 8 variables, each of which corresponds to the binary data values over time of the different
digital channels from the lowest significant bit to the most significant bit. For PORT0 this will be in the order
(D0, D1, D2, ... D7) and for PORT1 this will be (D8, D9, D10, ... D15).
splitMSOData(
c_int32 dataLength
c_int16 array data
)
"""
# Makes an array for each digital channel
binaryBufferD0 = np.chararray((dataLength.value, 1))
binaryBufferD1 = np.chararray((dataLength.value, 1))
binaryBufferD2 = np.chararray((dataLength.value, 1))
binaryBufferD3 = np.chararray((dataLength.value, 1))
binaryBufferD4 = np.chararray((dataLength.value, 1))
binaryBufferD7 = np.chararray((dataLength.value, 1))
binaryBufferD5 = np.chararray((dataLength.value, 1))
binaryBufferD6 = np.chararray((dataLength.value, 1))
# Changes the data from int type to a binary type and then separates the data for each digital channel
for i in range(0, dataLength.value):
MSOData = data[i]
binaryMSOData = bin(MSOData)
binaryMSOData = binaryMSOData[2:]
binaryMSOData = binaryMSOData.zfill(8)
binaryBufferD0[i] = binaryMSOData[7]
binaryBufferD1[i] = binaryMSOData[6]
binaryBufferD2[i] = binaryMSOData[5]
binaryBufferD3[i] = binaryMSOData[4]
binaryBufferD4[i] = binaryMSOData[3]
binaryBufferD5[i] = binaryMSOData[2]
binaryBufferD6[i] = binaryMSOData[1]
binaryBufferD7[i] = binaryMSOData[0]
return binaryBufferD0, \
binaryBufferD1, \
binaryBufferD2, \
binaryBufferD3, \
binaryBufferD4, \
binaryBufferD5, \
binaryBufferD6, \
binaryBufferD7
def splitMSODataFast(dataLength, data):
"""
# This implementation will work on either channel in the same way as the splitMSOData method above, albeit in a
more efficient manner.
Returns a tuple of 8 arrays, each of which is the values over time of a different digital channel.
The tuple contains the channels in order (D7, D6, D5, ... D0) or equivalently (D15, D14, D13, ... D8).
splitMSODataFast(
c_int32 dataLength
c_int16 array data
)
"""
# Makes an array for each digital channel
bufferBinaryDj = (
np.chararray(dataLength.value),
np.chararray(dataLength.value),
np.chararray(dataLength.value),
np.chararray(dataLength.value),
np.chararray(dataLength.value),
np.chararray(dataLength.value),
np.chararray(dataLength.value),
np.chararray(dataLength.value),
)
# Splits out the individual bits from the port into the binary values for each digital channel/pin.
for i in range(dataLength.value):
for j in range(8):
bufferBinaryDj[j][i] = 1 if (data[i] & (1 << (7-j))) else 0
return bufferBinaryDj
def assert_pico_ok(status):
"""
assert_pico_ok(
status
)
"""
# checks for PICO_OK status return
if status != PICO_STATUS['PICO_OK']:
raise PicoSDKCtypesError("PicoSDK returned '{}'".format(PICO_STATUS_LOOKUP[status]))
def assert_pico2000_ok(status):
"""
assert_pico_ok(
status
)
"""
# checks for PICO_OK status return
if status > 0:
errorCheck = True
else:
errorCheck = False
raise PicoSDKCtypesError("Unsuccessful API call")
```
|
{
"source": "jerzygajda/FlatQL",
"score": 3
}
|
#### File: FlatQL/tests/test_helpers.py
```python
import unittest
from flatql.helpers import convert_pseudo_list, simple_rewrite
class TestDocument(unittest.TestCase):
def test_simple_rewrite(self):
result = simple_rewrite('aaa', 'bbb', {'test': 1})
self.assertEqual(result, ('bbb', {'test': 1}))
def test_convert_pseudo_list(self):
input_data = {'items': {'#1': {'test': 1},
'#0': {'test': 0},
'#2': {'test':2},
'#3': {'test': 3}}}
expected = {'items': [{'test': 0}, {'test': 1}, {'test': 2}, {'test': 3}]}
result = convert_pseudo_list(input_data)
self.assertEqual(result, expected)
```
#### File: FlatQL/tests/test_tools.py
```python
import unittest
from flatql import get_in, set_in, find_in_path, find_in_paths, transform, rewrite_path, extract
class TestDocument(unittest.TestCase):
def test_get_in_list_items(self):
data = {'a': ['b', 'c']}
self.assertEqual(get_in(data, 'a.#0'), 'b')
self.assertEqual(get_in(data, 'a.#1'), 'c')
self.assertEqual(get_in(data, 'a.#2'), None)
self.assertEqual(get_in(data, 'a.#-1'), 'c')
self.assertEqual(get_in(data, 'a.#-2'), 'b')
self.assertEqual(get_in(data, 'a.#-3'), None)
def test_get_with_dict(self):
data = {'a': {'b': {'c': 'Value'}}}
self.assertEqual(get_in(data, 'a.b.c'), 'Value')
def test_get_in_without_data(self):
self.assertEqual(get_in({}, 'a.b.c.d'), None)
self.assertEqual(get_in({}, 'a.#0'), None)
def test_set_in_with_dict(self):
data = {'a': {'b': 'c'}}
expected = {'a': {'b': 'item'}}
result = set_in(data, 'a.b', 'item')
self.assertEqual(result, expected)
def test_set_in_with_list(self):
data = {'item': ['a', 'b', 'c']}
expected = {'item': ['d', 'e', 'f']}
result = set_in(data, 'item.#0', 'd')
result = set_in(result, 'item.#1', 'e')
result = set_in(result, 'item.#2', 'f')
self.assertEqual(result, expected)
def test_set_in_without_data(self):
data = {}
expected = {'a': {'b': 'c'}, 'b': [{'name': 'Item 1'},
{'name': 'Item 2'},
{'name': 'Item 3'}],
'c': ['Item 1', 'Item 2']}
result = set_in(data, 'a.b', 'c')
result = set_in(result, 'b.#0.name', 'Item 1')
result = set_in(result, 'b.#2.name', 'Item 2')
result = set_in(result, 'b.#3.name', 'Item 3')
result = set_in(result, 'c.#0', 'Item 1')
result = set_in(result, 'c.#1', 'Item 2')
self.assertEqual(result, expected)
def test_find_in_path(self):
data = {'res_uuid': '00000000-0000-0000-0000-000000000001',
'test': {'id': '00000000-0000-0000-0000-000000000002'},
'list': [{'id': '00000000-0000-0000-0000-000000000003'},
{'id': '00000000-0000-0000-0000-000000000004'}],
'deep_list': [
{'items': [{'item': {'id': '00000000-0000-0000-0000-000000000005'}}]}]}
self.assertEqual(find_in_path(data, 'res_uuid'),
['00000000-0000-0000-0000-000000000001'])
self.assertEqual(find_in_path(data, 'list.#0.id'),
['00000000-0000-0000-0000-000000000003'])
self.assertEqual(find_in_path(data, 'deep_list.*.items.*.item.id'),
['00000000-0000-0000-0000-000000000005'])
def test_find_in_paths(self):
data = {'res_uuid': '00000000-0000-0000-0000-000000000001',
'test': {'id': '00000000-0000-0000-0000-000000000002'},
'list': [{'id': '00000000-0000-0000-0000-000000000003'},
{'id': '00000000-0000-0000-0000-000000000004'}],
'deep_list': [
{'items': [{'item': {'id': '00000000-0000-0000-0000-000000000005'}}]}]}
expected = ['00000000-0000-0000-0000-000000000005',
'00000000-0000-0000-0000-000000000003',
'00000000-0000-0000-0000-000000000004']
result = find_in_paths(data, ['deep_list.*.items.*.item.id', 'list.*.id'])
self.assertEqual(result, expected)
def test_transform_result(self):
resource = {'res_uuid': '00000000-0000-0000-0000-000000000001',
'res_name': 'Test resource',
'test': {'id': '00000000-0000-0000-0000-000000000002'},
'list': [{'id': '00000000-0000-0000-0000-000000000003'},
{'id': '00000000-0000-0000-0000-000000000004'}]}
config = {'res_uuid': 'id', 'list.*.id': 'list.{1}', 'res_name': 'name'}
result = transform(resource, config)
expected = {'id': '00000000-0000-0000-0000-000000000001',
'name': '<NAME>',
'list': ['00000000-0000-0000-0000-000000000003',
'00000000-0000-0000-0000-000000000004']}
self.assertEqual(result, expected)
def test_transform_with_manifest(self):
def transform_item(item, source_path, target_path, manifest):
target_path = rewrite_path(source_path, target_path)
res_id = item.get('id')
item_from_manifest = manifest.get(res_id)
result = {'name': item_from_manifest.get('res_name'),
'id': item_from_manifest.get('res_uuid')}
return (target_path, result)
resource = {'res_uuid': '00000000-0000-0000-0000-000000000001',
'test': {'id': '00000000-0000-0000-0000-000000000002'},
'list': [{'id': '00000000-0000-0000-0000-000000000003'},
{'id': '00000000-0000-0000-0000-000000000004'}]}
manifest = {'00000000-0000-0000-0000-000000000003':
{'res_name': 'test 1', 'res_uuid': '00000000-0000-0000-0000-000000000003'},
'00000000-0000-0000-0000-000000000004':
{'res_name': 'test 2', 'res_uuid': '00000000-0000-0000-0000-000000000004'}}
config = {'res_uuid': 'id',
'list.*': (transform_item, 'list.{1}', manifest)}
result = transform(resource, config)
expected = {'id': '00000000-0000-0000-0000-000000000001',
'list': [
{'name': 'test 1', 'id': '00000000-0000-0000-0000-000000000003'},
{'name': 'test 2', 'id': '00000000-0000-0000-0000-000000000004'}]}
self.assertEqual(result, expected)
def test_simple_transform(self):
resource = {'a': {'aa': {'aaa': 'value'}}}
config = {'a.aa.aaa': 'b.bb.bbb'}
result = transform(resource, config)
expected = {'b': {'bb': {'bbb': 'value'}}}
self.assertEqual(result, expected)
def test_simple_transform_without_full_path(self):
resource = {'a': {'aa': {'aaa': 'value'}}}
config = {'a.aa': 'b.bb'}
result = transform(resource, config)
expected = {'b': {'bb': {'aaa': 'value'}}}
self.assertEqual(result, expected)
def test_simple_transform_list(self):
resource = {'a': {'aa': [{'item': 1}, {'item': 2}]}}
config = {'a.aa.*.item': 'b.bb.{2}.element'}
result = transform(resource, config)
expected = {'b': {'bb': [{'element': 1}, {'element': 2}]}}
self.assertEqual(result, expected)
def test_transform_with_list_index(self):
resource = {'a': {'aa': [{'item': 1}, {'item': 2}]}}
config = {'a.aa.#1.item': 'item'}
result = transform(resource, config)
expected = {'item': 2}
self.assertEqual(result, expected)
def test_transform_key_rename(self):
resource = {'test': {'a': 'aa', 'b': 'bb'}}
config = {'test': 'x'}
result = transform(resource, config)
expected = {'x': {'a': 'aa', 'b': 'bb'}}
self.assertEqual(result, expected)
def test_transform_with_path_parts_reorder(self):
resource = {
'a': {'aa': 'aaa'},
'b': {'bb': 'bbb'},
'c': {'cc': 'ccc'}
}
config = {'*.*': '{1}.{0}'}
result = transform(resource, config)
expected = {
'aa': {'a': 'aaa'},
'bb': {'b': 'bbb'},
'cc': {'c': 'ccc'}
}
self.assertEqual(result, expected)
def test_complex_transform(self):
resource = {'count': 2, 'entries': [
{'res_name': 'A', 'authors': [{'id': 1, 'res_name': 'Author 1'},
{'id': 2, 'res_name': 'Author 2'}]},
{'res_name': 'B', 'authors': [{'id': 4, 'res_name': 'Author 4'},
{'id': 1, 'res_name': 'Author 1'}]}]}
config = {'count': 'elements',
'entries.*.res_name': 'items.{1}.name',
'entries.*.authors.*.id': 'items.{1}.authors.{3}.ref'}
result = transform(resource, config)
expected = {'elements': 2, 'items': [
{'name': 'A', 'authors': [{'ref': 1}, {'ref': 2}]},
{'name': 'B', 'authors': [{'ref': 4}, {'ref': 1}]}]}
self.assertEqual(result, expected)
def test_transform_lists_with_index(self):
resource = {'count': 2, 'entries': [
{'res_name': 'A', 'authors': [{'id': 1, 'res_name': 'Author 1'},
{'id': 2, 'res_name': 'Author 2'}]},
{'res_name': 'B', 'authors': [{'id': 4, 'res_name': 'Author 4'},
{'id': 1, 'res_name': 'Author 1'}]}]}
config = {
'entries.#0.authors.#0.res_name': 'name',
'entries.#0.authors.#0.id': 'id'
}
result = transform(resource, config)
expected = {'name': 'Author 1', 'id': 1}
self.assertEqual(result, expected)
def test_transform_with_function(self):
def transform_item(item, path, template):
target_path = rewrite_path(path, template)
return (target_path, {'test': 1})
resource = {'authors': [{'id': 1, 'res_name': 'Author 1'},
{'id': 2, 'res_name': 'Author 2'}]}
config = {'authors.*': (transform_item, 'creator.{1}')}
result = transform(resource, config)
expected = {'creator': [{'test': 1}, {'test': 1}]}
self.assertEqual(result, expected)
def test_transform_with_deep_lists(self):
resource = {'list': [{'id': 1}, {'id': 2}],
'deep_list': [{'name': 'Item 1',
'authors': [{'name': 'Author 1'}, {'name': 'Author 2'}]},
{'name': 'Item 2',
'authors': [{'name': 'Author 3'}, {'name': 'Author 4'}]}]}
config = {'list.*.id': 'list.{1}.uuid',
'deep_list.*.name': 'deep_list.{1}.display_name',
'deep_list.*.authors.*.name': 'deep_list.{1}.authors.{3}.n'}
result = transform(resource, config)
expected = {'deep_list':
[{'authors': [{'n': 'Author 1'}, {'n': 'Author 2'}],
'display_name': 'Item 1'},
{'authors': [{'n': 'Author 3'}, {'n': 'Author 4'}],
'display_name': 'Item 2'}],
'list': [{'uuid': 1}, {'uuid': 2}]}
self.assertEqual(result, expected)
def test_transform_dict_with_list_to_list(self):
resource = {'list': [{'id': 1, 'name': 'Item 1'}, {'id': 2, 'name': 'Item 2'}]}
config = {'list.*.id': '{1}.id',
'list.*.name': '{1}.title'}
result = transform(resource, config)
expected = [{'id': 1, 'title': 'Item 1'}, {'id': 2, 'title': 'Item 2'}]
self.assertEqual(result, expected)
def test_rewrite_path(self):
self.assertEqual('item.#0.id', rewrite_path('list.#0.id', 'item.{1}.id'))
self.assertEqual('item.#0.#1.id', rewrite_path('list.#0.#1.id', 'item.{1}.{2}.id'))
self.assertEqual('#0.id', rewrite_path('list.#0.id', '{1}.id'))
self.assertEqual('#100.id', rewrite_path('list.#100.id', '{1}.id'))
self.assertEqual('items.item_1.uuid', rewrite_path('items.item_1.id', 'items.{1}.uuid'))
self.assertEqual('d.c.b.a', rewrite_path('a.b.c.d', '{3}.{2}.{1}.{0}'))
def test_transform_list_first_and_last_element(self):
resource = [{'id': 1, 'name': 'First'},
{'id': 2, 'name': 'Second'},
{'id': 3, 'name': 'Last'}]
config = {'#0.id': 'first.id',
'#0.name': 'first.title',
'#-1.id': 'last.id',
'#-1.name': 'last.title'}
result = transform(resource, config)
expected = {'first': {'id': 1, 'title': 'First'}, 'last': {'id': 3, 'title': 'Last'}}
self.assertEqual(result, expected)
def test_transform_with_dict_keys_wildcard(self):
resource = {
'items': {
'item_1': {'id': 1, 'name': 'Item 1'},
'item_2': {'id': 2, 'name': 'Item 2'},
'item_3': {'id': 3, 'name': 'Item 3'}
}
}
config = {'items.*.name': 'elements.{1}.title'}
result = transform(resource, config)
expected = {
'elements': {
'item_1': {'title': 'Item 1'},
'item_2': {'title': 'Item 2'},
'item_3': {'title': 'Item 3'}
}
}
self.assertEqual(result, expected)
def test_extract(self):
resource = {
'items': [
{'id': 1, 'name': 'Item 1', 'description': 'Item 1 description'},
{'id': 2, 'name': 'Item 2', 'description': 'Item 2 description'},
{'id': 3, 'name': 'Item 3', 'description': 'Item 3 description'}
]
}
paths = ['items.*.name', 'items.*.id']
result = extract(resource, paths)
expected = {
'items': [
{'id': 1, 'name': 'Item 1'},
{'id': 2, 'name': 'Item 2'},
{'id': 3, 'name': 'Item 3'}
]
}
self.assertEqual(result, expected)
def test_extract_with_dict_keys_wildcard(self):
resource = {
'items': {
'item_1': {'id': 1, 'name': 'Item 1'},
'item_2': {'id': 2, 'name': 'Item 2'},
'item_3': {'id': 3, 'name': 'Item 3'}
}
}
paths = ['items.*.name']
result = extract(resource, paths)
expected = {
'items': {
'item_1': {'name': 'Item 1'},
'item_2': {'name': 'Item 2'},
'item_3': {'name': 'Item 3'}
}
}
self.assertEqual(result, expected)
```
|
{
"source": "jerzyjerzy8/j-zelle",
"score": 3
}
|
#### File: jerzyjerzy8/j-zelle/ch13ex4_maxrec.py
```python
def max_rec(lst):
"""Return the largest number in lst."""
if len(lst) == 1:
return lst[0]
m = max_rec(lst[1:])
if lst[0] > m:
return lst[0]
else:
return m
def max_rec_tests():
lst = [9, 0, -1, 14, -4, 7, 3]
assert max_rec(lst) == 14
lst = [0, 3.14, 9.99, 0, 9.98, 9.99, 3.14]
assert max_rec(lst) == 9.99
lst = [10, 2, 8, 0, 3]
assert max_rec(lst) == 10
print("All tests passed!")
```
#### File: jerzyjerzy8/j-zelle/ch13ex6_numtoeng.py
```python
eng_numbers = ["Zero", "One", "Two", "Three", "Four",
"Five", "Six", "Seven", "Eight", "Nine"]
def num_to_eng(num):
"""Return the digits of a number in English as string.
E.g. num_to_eng(153) == "One Five Three" """
if num // 10 == 0:
return eng_numbers[num]
return f"{num_to_eng(num // 10)} {num_to_eng(num % 10)}"
def num_to_eng_tests():
assert num_to_eng(153) == "One Five Three"
assert num_to_eng(12345) == "One Two Three Four Five"
assert num_to_eng(0) == "Zero"
assert num_to_eng(100) == "One Zero Zero"
print("All tests passed!")
```
#### File: jerzyjerzy8/j-zelle/ch7ex11_leapyear.py
```python
def is_leap_year(year):
"""Check whether a year is a leap year.
year - integer > 0
Return values: True if the year is a leap year, false otherwise."""
if year % 4 != 0:
return False
if year % 100 == 0 and year % 400 != 0:
return False
return True
def is_leap_year_test():
"""Test the is_leap_year(year) function."""
test_cases = [1800, 1900, 1600, 2000, 1324, 2020, 3001, 2029]
result_list = [False, False, True, True, True, True, False, False]
for i in range(len(test_cases)):
tested = is_leap_year(test_cases[i])
result = result_list[i]
assert tested == result, (
"for {} is {}, should be {}".format(test_cases[i], tested, result)
)
print("All tests have been passed.")
```
#### File: jerzyjerzy8/j-zelle/ch8ex7_goldbachc.py
```python
from math import sqrt, floor
def find_prime_summands(n):
"""Find two primes that add up to n.
Parameters:
n - even natural number"""
if n % 2 == 1:
return None, None
prime = 1
for i in range(n//2):
if is_prime(n-prime):
return prime, n-prime
prime = next_prime(prime)
print("An error occured. Couldn't find two primes adding to ", n)
def is_prime(n):
"""Returns True if n is a prime number, False otherwise."""
for divisor in range(2, floor(sqrt(n)) + 1):
if n % divisor == 0:
return False
return True
def next_prime(n):
"""Returns the next prime number after n."""
while True:
n += 1
if is_prime(n):
return n
```
#### File: jerzyjerzy8/j-zelle/ch9ex10_piest.py
```python
from random import random
def piest():
"""Return the estimation of pi after n Monte Carlo throws."""
welcome_user()
n = get_n()
hits = throw_n_times(n)
pi = calculate_pi(hits, n)
present_to_user(pi)
def welcome_user():
print("Program estimates the pi number based on the Monte Carlo method.")
def get_n():
"""Ask user number of throws and return it as a int type."""
n = int(input("Please provide number of throws: "))
print()
return n
def throw_n_times(n):
"""Simulate n throws and return the number of target hits.
For n >= 100 000, prints a progress bar. Without using more advanced
concepts (OOP I guess) it makes the function quite ugly, excuse me
reader!"""
hits = 0
bar_counter = 0
if n >= 100000:
start_bar()
next_5_perc = calc_next_5_perc(n, bar_counter)
print_bar = True
else:
print_bar = False
for i in range(n):
is_hit = throw()
if is_hit:
hits += 1
if print_bar and i >= next_5_perc:
update_bar()
bar_counter += 5
next_5_perc = calc_next_5_perc(n, bar_counter)
if print_bar:
finish_bar()
return hits
def calc_next_5_perc(n, bar_counter):
return bar_counter * n / 100
def start_bar():
print(" |start finish|")
print("Progress: ", end="")
def update_bar():
print("#", end="")
def finish_bar():
print()
print()
def throw():
"""Simulate a throw and return wheter target was hit."""
x = rand_coord()
y = rand_coord()
h = is_hit(x, y)
return h
def rand_coord():
"""Return a number in the range: <-1, 1)."""
return 2*random() - 1
def is_hit(x, y):
"""Return wheter given coords hit a circular target of r=1."""
return x*x + y*y <= 1
def calculate_pi(h, n):
return 4 * h / n
def present_to_user(pi):
print("The estimated value of pi, piest = {}".format(pi))
```
#### File: jerzyjerzy8/j-zelle/decoder.py
```python
def decode(message):
"""Decodes a message in Unicode."""
dec_chars = []
for num_str in message.split():
num_int = int(num_str)
dec_chars.append(chr(num_int))
return "".join(dec_chars)
def main():
print(decode("87 104 97 116 32 97 32 83 111 117 114 112 117 115 115 33"))
```
#### File: jerzyjerzy8/j-zelle/face.py
```python
from graphics import *
class Face:
"""Draw a face and control its expression with class' methods."""
def __init__(self, window, center, size):
self.window = window
eye_size = 0.15 * size
eye_off = size / 3.0
mouth_size = 0.8 * size
mouth_off = size / 2.0
# head
self.head = Circle(center, size)
# round eyes
self.r_left_eye = Circle(center, eye_size)
self.r_right_eye = self.r_left_eye.clone()
self.r_left_eye.move(eye_off, -eye_off)
self.r_right_eye.move(-eye_off, -eye_off)
# closed eyes
self.c_left_eye = Line(
Point(center.getX() - eye_size, center.getY()),
Point(center.getX() + eye_size, center.getY()))
self.c_right_eye = self.c_left_eye.clone()
self.c_left_eye.move(eye_off, -eye_off)
self.c_right_eye.move(-eye_off, -eye_off)
# grim mouth
p1 = center.clone()
p1.move(-mouth_size/2, mouth_off)
p2 = center.clone()
p2.move(mouth_size/2, mouth_off)
self.g_mouth = Line(p1, p2)
# smiley mouth
p3 = center.clone()
p3.move(0, 1.5 * mouth_off)
self.s_mouth = Polygon(p1, p2, p3)
# frowny mouth
p4 = center.clone()
p4.move(0, mouth_off/2)
self.f_mouth = Polygon(p1, p2, p4)
# draw default face
self.head.draw(window)
self.left_eye = self.r_left_eye
self.right_eye = self.r_right_eye
self.mouth = self.g_mouth
self.left_eye.draw(window)
self.right_eye.draw(window)
self.mouth.draw(window)
def _undraw(self):
self.left_eye.undraw()
self.right_eye.undraw()
self.mouth.undraw()
def _draw(self):
self.left_eye.draw(self.window)
self.right_eye.draw(self.window)
self.mouth.draw(self.window)
def smile(self):
self._undraw()
self.left_eye = self.r_left_eye
self.right_eye = self.r_right_eye
self.mouth = self.s_mouth
self._draw()
def wink(self):
self._undraw()
self.left_eye = self.r_left_eye
self.right_eye = self.c_right_eye
self.mouth = self.s_mouth
self._draw()
def frown(self):
self._undraw()
self.left_eye = self.r_left_eye
self.right_eye = self.r_right_eye
self.mouth = self.f_mouth
self._draw()
def grieve(self):
self._undraw()
self.left_eye = self.c_left_eye
self.right_eye = self.c_right_eye
self.mouth = self.g_mouth
self._draw()
def test_face():
win = GraphWin("face test", 640, 480)
face = Face(win, Point(320, 240), 150)
fn = face.frown
fn()
## win.getMouse()
## face.smile()
## win.getMouse()
## face.wink()
## win.getMouse()
## face.frown()
## win.getMouse()
## face.grieve()
## win.getMouse()
```
#### File: jerzyjerzy8/j-zelle/scorehistogram.py
```python
from graphics import GraphWin, Rectangle, Text, Point
def drawhistogram(scorefile):
"""Draw a histogram of the scores in scorefile.
The scorefile should contain the scores as numbers from 0 to 10,
each score in a new line. Function draws the histogram for given
scores.
Return value: None"""
################ OPENING THE FILE #########################################
scorefile = open(scorefile, "r")
################ COUNTING THE NUMBER OF EACH GRADE ########################
score_count = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for score in scorefile:
score_count[int(score)] += 1
################ SETTING THE WINDOW #######################################
win_width, win_height = 640, 480
# panel (space under and over the graph) height to win_height
panel_down, panel_up = 0.1667, 0.0833
win = GraphWin("Score histogram", win_width, win_height)
################ TRANSITION TO NEW COORDS #################################
max_count = max(score_count)
# window height in new coords
win_height_nc = max_count / (1 - panel_down - panel_up)
# upper and lower panel height in new coords
panel_down_nc = panel_down * win_height_nc
panel_up_nc = panel_up * win_height_nc
win.setCoords(-0.5, -panel_down_nc, 11, max_count + panel_up_nc)
################ DRAW GRAPH BARS AND LABELS ###############################
bar_width = 0.5 # in new coords
for i in range(len(score_count)):
if score_count[i]: # skip for empty records
# draw bars
Rectangle(Point(i, 0),
Point(i+bar_width, score_count[i])).draw(win)
# draw labels
Text(Point(i+0.25, -panel_down_nc/3),
i).draw(win).setSize(win_height//40)
```
#### File: jerzyjerzy8/j-zelle/theantsgomarching.py
```python
def printlyrics():
"""Print 5 verses of "The Ants Go Marching" song."""
for i in range(5):
print(getaverse(i))
def getaverse(verse_num):
"""Return a complete verse_num.
verse_num - 0-4"""
arg1 = ["one", "two", "three", "four", "five"]
arg2 = [
["suck", "thumb"],
["tie", "shoe"],
["scratch", "knee"],
["do", "chore"],
["eat", "chive"]
]
return start(arg1[verse_num]) + thelittleone(arg2[verse_num]) + end()
def start(arg):
"""Return the first 3 lines of a verse.
arg - "one", "two", "three" ..."""
return line1(arg)*2 + line3(arg)
def line1(arg):
h = hurrah()
return "{} {} {}\n".format(marching(arg), h, h)
def line3(arg):
return "{}\n".format(marching(arg))
def hurrah():
"""Return the "hurrah!" part of the song."""
return "hurrah!"
def marching(arg):
"""Return "The ants go marching..." part of the song.
arg - "one", "two", "three" ..."""
return "The ants go marching {} by {},".format(arg,arg)
def thelittleone(arg):
"""Return the 4th line of a verse.
arg - [verb, object]"""
return "The little one stops to {} his {},\n".format(arg[0], arg[1])
def end():
"""Return the last part of a verse."""
return (
"And they all go marching down...\n"
"In the ground...\n"
"To get out...\n"
"Of the rain.\n"
"{} {} {}\n"
).format(boom(), boom(), boom())
def boom():
"""Return the "Boom!" part of the song."""
return "Boom!"
```
|
{
"source": "jerzyk/django-constance",
"score": 2
}
|
#### File: django-constance/tests/test_database.py
```python
from django.test import TestCase
from constance import settings
from tests.storage import StorageTestsMixin
class TestDatabase(StorageTestsMixin, TestCase):
def setUp(self):
super(TestDatabase, self).setUp()
self.old_backend = settings.BACKEND
settings.BACKEND = 'constance.backends.database.DatabaseBackend'
def tearDown(self):
settings.BACKEND = self.old_backend
```
|
{
"source": "JerzySpendel/drf-nested-routers",
"score": 2
}
|
#### File: drf-nested-routers/rest_framework_nested/routers.py
```python
from __future__ import unicode_literals
import sys
import re
from rest_framework.routers import SimpleRouter, DefaultRouter # noqa: F401
if sys.version_info[0] < 3:
IDENTIFIER_REGEX = re.compile(r"^[^\d\W]\w*$")
else:
IDENTIFIER_REGEX = re.compile(r"^[^\d\W]\w*$", re.UNICODE)
class LookupMixin(object):
"""
Deprecated.
No method override is needed since Django Rest Framework 2.4.
"""
class NestedMixin(object):
def __init__(self, parent_router, parent_prefix, *args, **kwargs):
self.parent_router = parent_router
self.parent_prefix = parent_prefix
self.nest_count = getattr(parent_router, 'nest_count', 0) + 1
self.nest_prefix = kwargs.pop('lookup', 'nested_%i' % self.nest_count) + '_'
super(NestedMixin, self).__init__(*args, **kwargs)
if 'trailing_slash' not in kwargs:
# Inherit trailing_slash only when not specified explicitly.
#
# drf transposes the trailing_slash argument into the actual appended value
# within the route urls. This means that, on the parent class, trailing_slash
# is either '/' or '' for the expected kwarg values True or False, respectively.
# If, however, the trailing_slash property has been further customized beyond
# those two values (for example, to add an optional slash with '/?'), we won't
# be able to set it through the kwargs.
#
# By copying the value of trailing_slash directly, we ensure that our inherited
# behavior is ALWAYS consistent with the parent. If we didn't, we might create
# a situation where the parent's trailing slash is truthy (but not '/') and
# we set our trailing slash to just '/', leading to inconsistent behavior.
self.trailing_slash = parent_router.trailing_slash
parent_registry = [registered for registered
in self.parent_router.registry
if registered[0] == self.parent_prefix]
try:
parent_registry = parent_registry[0]
parent_prefix, parent_viewset, parent_basename = parent_registry
except:
raise RuntimeError('parent registered resource not found')
self.check_valid_name(self.nest_prefix)
nested_routes = []
parent_lookup_regex = parent_router.get_lookup_regex(parent_viewset, self.nest_prefix)
self.parent_regex = '{parent_prefix}/{parent_lookup_regex}/'.format(
parent_prefix=parent_prefix,
parent_lookup_regex=parent_lookup_regex
)
# If there is no parent prefix, the first part of the url is probably
# controlled by the project's urls.py and the router is in an app,
# so a slash in the beginning will (A) cause Django to give warnings
# and (B) generate URLs that will require using `//`
if not self.parent_prefix and self.parent_regex[0] == '/':
self.parent_regex = self.parent_regex[1:]
if hasattr(parent_router, 'parent_regex'):
self.parent_regex = parent_router.parent_regex + self.parent_regex
for route in self.routes:
route_contents = route._asdict()
# This will get passed through .format in a little bit, so we need
# to escape it
escaped_parent_regex = self.parent_regex.replace('{', '{{').replace('}', '}}')
route_contents['url'] = route.url.replace('^', '^' + escaped_parent_regex)
nested_routes.append(type(route)(**route_contents))
self.routes = nested_routes
def check_valid_name(self, value):
if IDENTIFIER_REGEX.match(value) is None:
raise ValueError("lookup argument '{}' needs to be valid python identifier".format(value))
class NestedSimpleRouter(NestedMixin, SimpleRouter):
""" Create a NestedSimpleRouter nested within `parent_router`
Args:
parent_router: Parent router. Maybe be a SimpleRouter or another nested
router.
parent_prefix: The url prefix within parent_router under which the
routes from this router should be nested.
lookup:
The regex variable that matches an instance of the parent-resource
will be called '<lookup>_<parent-viewset.lookup_field>'
In the example above, lookup=domain and the parent viewset looks up
on 'pk' so the parent lookup regex will be 'domain_pk'.
Default: 'nested_<n>' where <n> is 1+parent_router.nest_count
"""
pass
class NestedDefaultRouter(NestedMixin, DefaultRouter):
""" Create a NestedDefaultRouter nested within `parent_router`
Args:
parent_router: Parent router. Maybe be a DefaultRouteror another nested
router.
parent_prefix: The url prefix within parent_router under which the
routes from this router should be nested.
lookup:
The regex variable that matches an instance of the parent-resource
will be called '<lookup>_<parent-viewset.lookup_field>'
In the example above, lookup=domain and the parent viewset looks up
on 'pk' so the parent lookup regex will be 'domain_pk'.
Default: 'nested_<n>' where <n> is 1+parent_router.nest_count
"""
pass
```
|
{
"source": "jes1417/home-assistant-config",
"score": 2
}
|
#### File: custom_components/hacs/config_flow.py
```python
import voluptuous as vol
from aiogithubapi import (
AIOGitHubAPIAuthenticationException,
AIOGitHubAPIException,
GitHubDevice,
)
from aiogithubapi.common.const import OAUTH_USER_LOGIN
from homeassistant import config_entries
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client
from custom_components.hacs.const import DOMAIN
from custom_components.hacs.helpers.functions.configuration_schema import (
hacs_config_option_schema,
)
from custom_components.hacs.helpers.functions.logger import getLogger
from custom_components.hacs.share import get_hacs
from .base import HacsBase
_LOGGER = getLogger()
class HacsFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Config flow for HACS."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Initialize."""
self._errors = {}
self.device = None
async def async_step_device(self, user_input):
"""Handle device steps"""
## Vaiting for token
try:
activation = await self.device.async_device_activation()
return self.async_create_entry(
title="", data={"token": activation.access_token}
)
except (
AIOGitHubAPIException,
AIOGitHubAPIAuthenticationException,
) as exception:
_LOGGER.error(exception)
self._errors["base"] = "auth"
return await self._show_config_form(user_input)
async def async_step_user(self, user_input):
"""Handle a flow initialized by the user."""
self._errors = {}
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
if self.hass.data.get(DOMAIN):
return self.async_abort(reason="single_instance_allowed")
if user_input:
if [x for x in user_input if not user_input[x]]:
self._errors["base"] = "acc"
return await self._show_config_form(user_input)
## Get device key
if not self.device:
return await self._show_device_form()
## Initial form
return await self._show_config_form(user_input)
async def _show_device_form(self):
"""Device flow"""
self.device = GitHubDevice(
"395a8e669c5de9f7c6e8",
session=aiohttp_client.async_get_clientsession(self.hass),
)
device_data = await self.device.async_register_device()
return self.async_show_form(
step_id="device",
errors=self._errors,
description_placeholders={
"url": OAUTH_USER_LOGIN,
"code": device_data.user_code,
},
)
async def _show_config_form(self, user_input):
"""Show the configuration form to edit location data."""
if not user_input:
user_input = {}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(
"acc_logs", default=user_input.get("acc_logs", False)
): bool,
vol.Required(
"acc_addons", default=user_input.get("acc_addons", False)
): bool,
vol.Required(
"acc_untested", default=user_input.get("acc_untested", False)
): bool,
vol.Required(
"acc_disable", default=user_input.get("acc_disable", False)
): bool,
}
),
errors=self._errors,
)
@staticmethod
@callback
def async_get_options_flow(config_entry):
return HacsOptionsFlowHandler(config_entry)
class HacsOptionsFlowHandler(config_entries.OptionsFlow):
"""HACS config flow options handler."""
def __init__(self, config_entry):
"""Initialize HACS options flow."""
self.config_entry = config_entry
async def async_step_init(self, _user_input=None):
"""Manage the options."""
return await self.async_step_user()
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
hacs: HacsBase = get_hacs()
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
if hacs.configuration.config_type == "yaml":
schema = {vol.Optional("not_in_use", default=""): str}
else:
schema = hacs_config_option_schema(self.config_entry.options)
del schema["frontend_repo"]
del schema["frontend_repo_url"]
return self.async_show_form(step_id="user", data_schema=vol.Schema(schema))
```
|
{
"source": "jesa7955/context-translation",
"score": 3
}
|
#### File: data/token_indexers/pretrained_transformer_indexer.py
```python
from typing import Dict, List
import logging
from overrides import overrides
from transformers.tokenization_auto import AutoTokenizer
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.tokenizers.token import Token
from allennlp.data.token_indexers.token_indexer import TokenIndexer, IndexedTokenList
logger = logging.getLogger(__name__)
@TokenIndexer.register("customized_pretrained_transformer")
class CustomizedPretrainedTransformerIndexer(TokenIndexer):
"""
This ``TokenIndexer`` assumes that Tokens already have their indexes in them (see ``text_id`` field).
We still require ``model_name`` because we want to form allennlp vocabulary from pretrained one.
This ``Indexer`` is only really appropriate to use if you've also used a
corresponding :class:`PretrainedTransformerTokenizer` to tokenize your input. Otherwise you'll
have a mismatch between your tokens and your vocabulary, and you'll get a lot of UNK tokens.
# Parameters
model_name : ``str``
The name of the ``transformers`` model to use.
namespace : ``str``, optional (default=``tags``)
We will add the tokens in the pytorch_transformer vocabulary to this vocabulary namespace.
We use a somewhat confusing default value of ``tags`` so that we do not add padding or UNK
tokens to this namespace, which would break on loading because we wouldn't find our default
OOV token.
"""
def __init__(
self,
model_name: str,
namespace: str = "tags",
token_min_padding_length: int = 0,
) -> None:
super().__init__(token_min_padding_length)
self._namespace = namespace
self._tokenizer = AutoTokenizer.from_pretrained(model_name)
self._padding_value = self._tokenizer.convert_tokens_to_ids(
[self._tokenizer.pad_token]
)[0]
logger.info(f"Using token indexer padding value of {self._padding_value}")
self._added_to_vocabulary = False
def _add_encoding_to_vocabulary(self, vocab: Vocabulary) -> None:
"""
Copies tokens from ```transformers``` model to the specified namespace.
Transformers vocab is taken from the <vocab>/<encoder> keys of the tokenizer object.
"""
vocab_field_name = None
if hasattr(self._tokenizer, "vocab"):
vocab_field_name = "vocab"
elif hasattr(self._tokenizer, "encoder"):
vocab_field_name = "encoder"
else:
logger.warning(
"""Wasn't able to fetch vocabulary from pretrained transformers lib.
Neither <vocab> nor <encoder> are the valid fields for vocab.
Your tokens will still be correctly indexed, but vocabulary file will not be saved."""
)
if vocab_field_name is not None:
pretrained_vocab = getattr(self._tokenizer, vocab_field_name)
for word, idx in pretrained_vocab.items():
vocab._token_to_index[self._namespace][word] = idx
vocab._index_to_token[self._namespace][idx] = word
@overrides
def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]):
# If we only use pretrained models, we don't need to do anything here.
pass
@overrides
def tokens_to_indices(
self, tokens: List[Token], vocabulary: Vocabulary
) -> Dict[str, List[int]]:
if not self._added_to_vocabulary:
self._add_encoding_to_vocabulary(vocabulary)
self._added_to_vocabulary = True
indices: List[int] = []
type_ids: List[int] = []
for token in tokens:
if getattr(token, "text_id", None) is not None:
# `text_id` being set on the token means that we aren't using the vocab, we just use
# this id instead. Id comes from the pretrained vocab.
# It is computed in PretrainedTransformerTokenizer.
indices.append(token.text_id)
type_ids.append(token.type_id)
else:
raise KeyError(
"Using PretrainedTransformerIndexer but field text_id is not set"
f" for the following token: {token.text}"
)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1] * len(indices)
return {
"token_ids": indices,
"token_type_ids": type_ids,
"mask": attention_mask,
}
@overrides
def get_empty_token_list(self) -> IndexedTokenList:
return {"token_ids": [], "mask": []}
def __eq__(self, other):
if isinstance(other, PretrainedTransformerIndexer):
for key in self.__dict__:
if key == "tokenizer":
# This is a reference to a function in the huggingface code, which we can't
# really modify to make this clean. So we special-case it.
continue
if self.__dict__[key] != other.__dict__[key]:
return False
return True
return NotImplemented
```
#### File: context_nmt/pipelines/context_indicators_generator.py
```python
import collections
import logging
import json
import os
import luigi
import gokart
import tqdm
import torch
import sentencepiece as spm
import sacrebleu
import MeCab
from fairseq.models.transformer import TransformerModel
from fairseq.data import LanguagePairDataset
from context_nmt.pipelines.conversation_dataset_merger import (
MergeMultipleDataset,
CONCAT_TOKEN,
)
logger = logging.getLogger("luigi-interface")
class GenerateContextIndicator(gokart.TaskOnKart):
task_namespace = "context_nmt"
split_name = luigi.Parameter()
dataset_names = luigi.ListParameter()
source_paths = luigi.ListParameter()
source_lang = luigi.Parameter()
target_lang = luigi.Parameter()
context_aware_translation_models = luigi.DictParameter()
context_aware_sentencepiece_model = luigi.Parameter()
max_source_positions = luigi.IntParameter(default=128)
max_target_positions = luigi.IntParameter(default=128)
max_sentences = luigi.IntParameter(default=128)
sentence_translation_model_name = luigi.Parameter(default=None)
sentence_translation_models = luigi.DictParameter(default={})
sentence_sentencepiece_models = luigi.DictParameter(default={})
score_threhold = luigi.FloatParameter(default=0.3)
def requires(self):
return MergeMultipleDataset(
split_name=self.split_name,
dataset_names=self.dataset_names,
source_paths=self.source_paths,
translation_model_name=self.sentence_translation_model_name,
translation_models=self.sentence_translation_models,
sentencepiece_models=self.sentence_sentencepiece_models,
)
def output(self):
name_components = [
self.split_name,
self.source_lang,
self.target_lang,
self.sentence_translation_model_name,
]
return self.make_target("_".join(name_components) + "_context_indicators.pkl")
def run(self):
def tokenize_for_bleu(target):
target = tokenizer.decode_pieces(target.split())
if self.target_lang == "ja":
target = " ".join(
map(
lambda x: x.split("\t")[0],
tagger.parse(target).split("\n")[:-2],
)
)
return target
docs = self.load()
tagger = MeCab.Tagger()
tokenizer = spm.SentencePieceProcessor()
tokenizer.load(self.context_aware_sentencepiece_model)
translation_models = {}
for bias, path in self.context_aware_translation_models.items():
base_path, checkpoint_path = os.path.split(path)
model = (
TransformerModel.from_pretrained(
base_path, checkpoint_file=checkpoint_path
)
.half()
.cuda()
.eval()
)
model.args.max_source_positions = self.max_source_positions
model.args.max_target_positions = self.max_target_positions
translation_models[int(bias)] = model
args = translation_models[-1].args
task = translation_models[-1].task
criterion = task.build_criterion(args)
results = collections.defaultdict(dict)
for doc_id, doc in tqdm.tqdm(docs.items(), total=len(docs)):
parallel_doc = set(
[
sent_id
for sent_id, score in doc["pairs"]
if score >= self.score_threhold
]
)
batches = collections.defaultdict(dict)
targets = {}
for sent_id in parallel_doc:
source, target = [
tokenizer.encode_as_pieces(doc[lang][sent_id])
for lang in (self.source_lang, self.target_lang)
]
available_index = [
index for index in range(0, sent_id) if doc[self.source_lang][index]
]
# context_bias is the parameter which the model is trained with.
# context_sent_index is the index of the actual used contextual
# sentence.
targets[sent_id] = " ".join(target)
for context_bias, _ in translation_models.items():
context_sent_index = None
if context_bias != -1:
if len(available_index) < context_bias:
context_sent_index = -1
else:
context_sent_index = available_index[-context_bias]
source_context = tokenizer.encode_as_pieces(
docs[doc_id][self.source_lang][context_sent_index]
)
real_source = source_context + [CONCAT_TOKEN] + source
else:
real_source = source
if real_source and len(real_source) < self.max_source_positions:
source_sentence = " ".join(real_source)
else:
source_sentence = None
batches[context_bias][sent_id] = source_sentence
batch_results = collections.defaultdict(
lambda: collections.defaultdict(dict)
)
for context_bias, batch in batches.items():
data = [sentence for sentence in batch.values() if sentence]
if not data:
continue
real_targets = {
sent_id: targets[sent_id] for sent_id in batch if batch[sent_id]
}
model = translation_models[context_bias]
args.max_source_positions = self.max_source_positions
args.max_target_positions = self.max_target_positions
translated = model.translate(data)
# Compute BLEU score
# Make the BLEU negative to easy the results computaion
for trans, (sent_id, target) in zip(translated, real_targets.items()):
batch_results[sent_id]["bleu"][
context_bias
] = -sacrebleu.corpus_bleu(
tokenize_for_bleu(trans), tokenize_for_bleu(target)
).score
# Compute loss
src_tokens = [
model.src_dict.encode_line(
real_source,
line_tokenizer=lambda x: x.split(),
add_if_not_exist=False,
).long()
for real_source in data
]
src_lengths = [tokens.numel() for tokens in src_tokens]
tgt_tokens = [
model.tgt_dict.encode_line(
target,
line_tokenizer=lambda x: x.split(),
add_if_not_exist=False,
).long()
for target in real_targets.values()
]
tgt_lengths = [tokens.numel() for tokens in tgt_tokens]
temp_dataset = LanguagePairDataset(
src_tokens,
src_lengths,
model.src_dict,
tgt_tokens,
tgt_lengths,
left_pad_source=args.left_pad_source,
left_pad_target=args.left_pad_target,
max_source_positions=self.max_source_positions,
max_target_positions=self.max_target_positions,
)
reports = collections.defaultdict(list)
iterator = task.get_batch_iterator(
dataset=temp_dataset, max_sentences=self.max_sentences,
)
for sample in iterator.next_epoch_itr(shuffle=False):
sample["net_input"]["src_tokens"] = sample["net_input"][
"src_tokens"
].cuda()
sample["net_input"]["src_lengths"] = sample["net_input"][
"src_lengths"
].cuda()
sample["net_input"]["prev_output_tokens"] = sample["net_input"][
"prev_output_tokens"
].cuda()
sample["target"] = sample["target"].cuda()
with torch.no_grad():
_, _, report = criterion(model.models[0], sample, False)
for key, value in report.items():
reports[key].append(value)
for key in ("loss", "nll_loss"):
for value, (sent_id, _) in zip(
torch.cat(reports[key]), real_targets.items()
):
batch_results[sent_id][key][context_bias] = float(value)
for sent_id, value in batch_results.items():
results[doc_id][sent_id] = value
self.dump(dict(results))
```
|
{
"source": "jesa7955/CycleGAN-PyTorch",
"score": 3
}
|
#### File: CycleGAN-PyTorch/simplified/models.py
```python
import pdb
import torch
import torch.nn as nn
import torch.nn.functional as F
def deconv(in_channels, out_channels, kernel_size, stride=2, padding=1, batch_norm=True):
"""Creates a transposed-convolutional layer, with optional batch normalization.
"""
layers = []
layers.append(nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=False))
if batch_norm:
layers.append(nn.BatchNorm2d(out_channels))
return nn.Sequential(*layers)
def conv(in_channels, out_channels, kernel_size, stride=2, padding=1, batch_norm=True, init_zero_weights=False):
"""Creates a convolutional layer, with optional batch normalization.
"""
layers = []
conv_layer = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
if init_zero_weights:
conv_layer.weight.data = torch.randn(out_channels, in_channels, kernel_size, kernel_size) * 0.001
layers.append(conv_layer)
if batch_norm:
layers.append(nn.BatchNorm2d(out_channels))
return nn.Sequential(*layers)
class ResnetBlock(nn.Module):
def __init__(self, conv_dim):
super(ResnetBlock, self).__init__()
self.conv_layer = conv(in_channels=conv_dim, out_channels=conv_dim, kernel_size=3, stride=1, padding=1)
def forward(self, x):
out = x + self.conv_layer(x)
return out
class CycleGenerator(nn.Module):
"""Defines the architecture of the generator network.
Note: Both generators G_XtoY and G_YtoX have the same architecture in this assignment.
"""
def __init__(self, conv_dim=64, init_zero_weights=False):
super(CycleGenerator, self).__init__()
# 1. Define the encoder part of the generator
self.conv1 = conv(3, conv_dim, 4)
self.conv2 = conv(conv_dim, conv_dim*2, 4)
# 2. Define the transformation part of the generator
self.resnet_block = ResnetBlock(conv_dim*2)
# 3. Define the decoder part of the generator
self.deconv1 = deconv(conv_dim*2, conv_dim, 4)
self.deconv2 = deconv(conv_dim, 3, 4, batch_norm=False)
def forward(self, x):
"""Generates an image conditioned on an input image.
Input
-----
x: BS x 3 x 32 x 32
Output
------
out: BS x 3 x 32 x 32
"""
out = F.relu(self.conv1(x))
out = F.relu(self.conv2(out))
out = F.relu(self.resnet_block(out))
out = F.relu(self.deconv1(out))
out = F.tanh(self.deconv2(out))
return out
class DCDiscriminator(nn.Module):
"""Defines the architecture of the discriminator network.
Note: Both discriminators D_X and D_Y have the same architecture in this assignment.
"""
def __init__(self, conv_dim=64):
super(DCDiscriminator, self).__init__()
self.conv1 = conv(3, conv_dim, 4)
self.conv2 = conv(conv_dim, conv_dim*2, 4)
self.conv3 = conv(conv_dim*2, conv_dim*4, 4)
self.conv4 = conv(conv_dim*4, 1, 4, padding=0, batch_norm=False)
def forward(self, x):
out = F.relu(self.conv1(x))
out = F.relu(self.conv2(out))
out = F.relu(self.conv3(out))
out = self.conv4(out).squeeze()
out = F.sigmoid(out)
return out
```
|
{
"source": "jesa7955/r2c",
"score": 3
}
|
#### File: models/selfatt/selfattn.py
```python
from typing import Dict, List, Any
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
from allennlp.data.vocabulary import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import TextFieldEmbedder, Seq2SeqEncoder, FeedForward, \
InputVariationalDropout, TimeDistributed
from allennlp.training.metrics import CategoricalAccuracy
from allennlp.modules.matrix_attention import BilinearMatrixAttention
from utils.detector import SimpleDetector
from allennlp.nn.util import masked_softmax, weighted_sum, \
add_positional_features, replace_masked_values, \
add_sentence_boundary_token_ids
from allennlp.nn import InitializerApplicator
@Model.register("MultiModalAttentionQA")
class MultiModalAttentionQA(Model):
def __init__(self,
vocab: Vocabulary,
fusion_encoder: Seq2SeqEncoder,
type_vocab_size: int = 3,
feature_dim: int = 768,
final_mlp_hidden_dim: int = 1024,
input_dropout: float = 0.3,
class_embs: bool=True,
reasoning_use_obj: bool=True,
reasoning_use_answer: bool=True,
reasoning_use_question: bool=True,
initializer: InitializerApplicator = InitializerApplicator(),
):
super(MultiModalAttentionQA, self).__init__(vocab)
self.detector = SimpleDetector(pretrained=True,
average_pool=True,
semantic=class_embs,
final_dim=feature_dim)
######################################################################
self.token_type_embeddings = nn.Embedding(type_vocab_size, feature_dim)
self.bos_token = torch.randn(feature_dim)
self.eos_token = torch.randn(feature_dim)
self.encoder_input_dropout = TimeDistributed(InputVariationalDropout(input_dropout)) if input_dropout > 0 else None
self.feature_dim = feature_dim
self.fusion_encoder = TimeDistributed(fusion_encoder)
self.reasoning_use_obj = reasoning_use_obj
self.reasoning_use_answer = reasoning_use_answer
self.reasoning_use_question = reasoning_use_question
final_mlp_dim = fusion_encoder.get_output_dim()
self.final_mlp = torch.nn.Sequential(
torch.nn.Dropout(input_dropout, inplace=False),
torch.nn.Linear(final_mlp_dim, final_mlp_hidden_dim),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(input_dropout, inplace=False),
torch.nn.Linear(final_mlp_hidden_dim, 1),
)
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
def forward(self,
images: torch.Tensor,
objects: torch.LongTensor,
segms: torch.Tensor,
boxes: torch.Tensor,
box_mask: torch.LongTensor,
question: Dict[str, torch.Tensor],
question_tags: torch.LongTensor,
question_mask: torch.LongTensor,
answers: Dict[str, torch.Tensor],
answer_tags: torch.LongTensor,
answer_mask: torch.LongTensor,
metadata: List[Dict[str, Any]] = None,
label: torch.LongTensor = None) -> Dict[str, torch.Tensor]:
"""
:param images: [batch_size, 3, im_height, im_width]
:param objects: [batch_size, max_num_objects] Padded objects
:param boxes: [batch_size, max_num_objects, 4] Padded boxes
:param box_mask: [batch_size, max_num_objects] Mask for whether or not each box is OK
:param question: AllenNLP representation of the question. [batch_size, num_answers, seq_length]
:param question_tags: A detection label for each item in the Q [batch_size, num_answers, seq_length]
:param question_mask: Mask for the Q [batch_size, num_answers, seq_length]
:param answers: AllenNLP representation of the answer. [batch_size, num_answers, seq_length]
:param answer_tags: A detection label for each item in the A [batch_size, num_answers, seq_length]
:param answer_mask: Mask for the As [batch_size, num_answers, seq_length]
:param metadata: Ignore, this is about which dataset item we're on
:param label: Optional, which item is valid
:return: shit
"""
# Trim off boxes that are too long. this is an issue b/c dataparallel, it'll pad more zeros that are
# not needed
max_len = int(box_mask.sum(1).max().item())
objects = objects[:, :max_len]
box_mask = box_mask[:, :max_len]
boxes = boxes[:, :max_len]
segms = segms[:, :max_len]
for tag_type, the_tags in (('question', question_tags), ('answer', answer_tags)):
if int(the_tags.max()) > max_len:
raise ValueError("Oh no! {}_tags has maximum of {} but objects is of dim {}. Values are\n{}".format(
tag_type, int(the_tags.max()), objects.shape, the_tags
))
obj_reps = self.detector(images=images, boxes=boxes, box_mask=box_mask, classes=objects, segms=segms)
##################################################
# Concatenate words features and object features #
# at the dim of sequence #
##################################################
obj_features = obj_reps['obj_reps']
obj_bs, obj_len, obj_dim = obj_features.shape
que_bs, a_num, que_len, que_dim = question['bert'].shape
ans_bs, a_num, ans_len, ans_dim = answers['bert'].shape
# Add [SEP] and [CLS]. What is really done here is wrap question,
# answers, and images obejcts with <S> </S> then remove the last
# two <S> and view the first one as [CLS]
question_bert, question_mask = add_sentence_boundary_token_ids(
question['bert'].view(-1, que_len, que_dim),
question_mask,
self.bos_token.to(question_mask.device),
self.eos_token.to(question_mask.device))
question_bert = question_bert.view(que_bs, a_num, que_len+2, que_dim)
question_mask = question_mask.view(que_bs, a_num, que_len+2)
answers_bert, answer_mask = add_sentence_boundary_token_ids(
answers['bert'].view(-1, ans_len, ans_dim),
answer_mask,
self.bos_token.to(answer_mask.device),
self.eos_token.to(answer_mask.device))
answers_bert = answers_bert.view(ans_bs, a_num, ans_len+2, ans_dim)[:, :, 1:, :]
answer_mask = answer_mask.view(ans_bs, a_num, ans_len+2)[:, :, 1:]
obj_features, obj_mask = add_sentence_boundary_token_ids(
obj_features,
box_mask,
self.bos_token.to(box_mask.device),
self.eos_token.to(box_mask.device))
obj_features = obj_features.view(obj_bs, obj_len+2, obj_dim)[:, 1:, :]
obj_mask = obj_mask.view(obj_bs, obj_len+2)[:, 1:]
obj_features = torch.stack([obj_features for _ in range(a_num)], dim=1)
obj_mask = torch.stack([obj_mask for _ in range(a_num)], dim=1)
# The shape for the input of transformer is
# batch_size * num_answers * new_seq_length * dim
# where new_seq_length = question_seq_length + 2 +
# answer_seq_lenght + 1 +
# max_num_objects + 1
que_ans_obj = torch.cat((question_bert,
answers_bert,
obj_features), dim=2)
que_ans_obj_mask = torch.cat((question_mask,
answer_mask,
obj_mask), dim=2)
# Add positional features
total_bs, a_num, total_len, total_dim = que_ans_obj.shape
que_ans_obj = add_positional_features(que_ans_obj.view(-1,
total_len,
total_dim)).view(total_bs,
a_num,
total_len,
total_dim)
# Add type information, which is used to distinguished between
# Qution, Answer, and Images
target_device = que_ans_obj.device
question_type_ids = torch.zeros(que_bs, a_num, que_len+2, dtype=torch.long, device=target_device)
answers_type_ids = 1 - torch.zeros(ans_bs, a_num, ans_len+1, dtype=torch.long, device=target_device)
objs_type_ids = 2 - torch.zeros(obj_bs, a_num, obj_len+1, dtype=torch.long, device=target_device)
token_type_ids = torch.cat((question_type_ids,
answers_type_ids,
objs_type_ids), dim=2)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
que_ans_obj = que_ans_obj + token_type_embeddings
##########################################
# Self attetion
outputs = self.fusion_encoder(que_ans_obj, que_ans_obj_mask)
bs, a_num, seq_len, output_dim = outputs.shape
cls_reps = outputs[:, :, 1, :].squeeze(2)
###########################################
logits = self.final_mlp(cls_reps.view(-1, output_dim)).view(bs, a_num)
###########################################
class_probabilities = F.softmax(logits, dim=-1)
output_dict = {"label_logits": logits, "label_probs": class_probabilities,
'cnn_regularization_loss': obj_reps['cnn_regularization_loss'],
# Uncomment to visualize attention, if you want
# 'qa_attention_weights': qa_attention_weights,
# 'atoo_attention_weights': atoo_attention_weights,
}
if label is not None:
loss = self._loss(logits, label.long().view(-1))
self._accuracy(logits, label)
output_dict["loss"] = loss[None]
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {'accuracy': self._accuracy.get_metric(reset)}
```
|
{
"source": "je-santos/livelossplot",
"score": 2
}
|
#### File: je-santos/livelossplot/setup.py
```python
from setuptools import setup, find_packages
from os import path
import re
def readme():
with open('README.md', encoding='utf-8') as f:
return f.read()
def version():
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'livelossplot/version.py')) as f:
version_file = f.read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
version = version_match.group(1)
return version
setup(
name='livelossplot',
version=version(),
python_requires=">=3.5",
install_requires=[
'ipython', 'matplotlib;python_version>="3.6"', 'matplotlib<3.1;python_version<"3.6"',
'numpy<1.18;python_version<"3.6"',
'bokeh;python_version>="3.6"', 'bokeh<=1.4.0;python_version<"3.6"'
],
description='Live training loss plot in Jupyter Notebook for Keras, PyTorch and others.',
long_description=readme(),
long_description_content_type='text/markdown',
url='https://github.com/stared/livelossplot',
author='<NAME>',
author_email='<EMAIL>',
keywords=['keras', 'pytorch', 'plot', 'chart', 'deep-learning'],
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Jupyter',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Visualization',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
packages=find_packages(),
zip_safe=False
)
```
|
{
"source": "jesboat/fbthrift",
"score": 2
}
|
#### File: py/protocol/test_fastcompact.py
```python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import unittest
import thrift.protocol.fastproto as fastproto
#For test against py impl
import thrift.transport.TTransport as TTransport
import thrift.protocol.TProtocol as TProtocol
import thrift.protocol.TCompactProtocol as TCompactProtocol
import dummy.ttypes as dummy
class Compact(unittest.TestCase):
def test_true(self):
dummy_test = dummy.Dummy()
dummy_test.dummy = {b'test': 42}
otrans = TTransport.TMemoryBuffer()
proto = TCompactProtocol.TCompactProtocol(otrans)
fastproto.encode(proto, dummy_test,
(dummy.Dummy, dummy_test.thrift_spec))
value = otrans.getvalue()
trans = TTransport.TMemoryBuffer()
proto = TCompactProtocol.TCompactProtocol(trans)
dummy_test.write(proto)
self.assertEqual(value, trans.getvalue())
itrans = TTransport.TMemoryBuffer(value)
proto = TCompactProtocol.TCompactProtocol(itrans)
new_dummy = dummy.Dummy()
fastproto.decode(proto, new_dummy,
(dummy.Dummy, dummy_test.thrift_spec))
self.assertEqual(new_dummy.dummy[b'test'], 42)
```
#### File: py/transport/TSSLSocketOverHttpTunnel.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import ssl
from .TSocketOverHttpTunnel import TSocketOverHttpTunnel
from .TTransport import TTransportException
class TSSLSocketOverHttpTunnel(TSocketOverHttpTunnel):
def __init__(self, host, port, proxy_host, proxy_port,
ssl_version=ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_NONE,
ca_certs=None,
keyfile=None,
certfile=None):
TSocketOverHttpTunnel.__init__(self, host, port, proxy_host, proxy_port)
self.ssl_version = ssl_version
self.cert_reqs = cert_reqs
self.keyfile, self.certfile, self.ca_certs = \
keyfile, certfile, ca_certs
def open(self):
TSocketOverHttpTunnel.open(self)
try:
sslh = ssl.SSLSocket(self.handle,
ssl_version=self.ssl_version,
cert_reqs=self.cert_reqs,
keyfile=self.keyfile,
certfile=self.certfile,
ca_certs=self.ca_certs)
self.handle = sslh
except ssl.SSLError as e:
self.close()
raise TTransportException(TTransportException.NOT_OPEN,
"SSL error during handshake: " + str(e))
except socket.error as e:
self.close()
raise TTransportException(TTransportException.NOT_OPEN,
"socket error during SSL handshake: " + str(e))
```
#### File: py/transport/TTwisted.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from zope.interface import implements, Interface, Attribute
from struct import unpack
from twisted.internet.protocol import Protocol, ServerFactory, ClientFactory, \
connectionDone
from twisted.internet import defer
from twisted.protocols import basic
from twisted.python import log
from thrift.protocol.THeaderProtocol import THeaderProtocolFactory
from thrift.server import TServer
from thrift.transport import TTransport
import sys
if sys.version_info[0] >= 3:
from io import StringIO
else:
from cStringIO import StringIO
class TMessageSenderTransport(TTransport.TTransportBase):
def __init__(self):
self.__wbuf = StringIO()
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
msg = self.__wbuf.getvalue()
self.__wbuf = StringIO()
self.sendMessage(msg)
def sendMessage(self, message):
raise NotImplementedError
class TCallbackTransport(TMessageSenderTransport):
def __init__(self, func):
TMessageSenderTransport.__init__(self)
self.func = func
def sendMessage(self, message):
self.func(message)
class ThriftClientProtocol(basic.Int32StringReceiver):
MAX_LENGTH = 1 << 24
def __init__(self, client_class, iprot_factory, oprot_factory=None):
self._client_class = client_class
self._iprot_factory = iprot_factory
if oprot_factory is None:
self._oprot_factory = iprot_factory
else:
self._oprot_factory = oprot_factory
self._errormsg = None
self.recv_map = {}
self.started = defer.Deferred()
def dispatch(self, msg):
self.sendString(msg)
def connectionMade(self):
tmo = TCallbackTransport(self.dispatch)
self.client = self._client_class(tmo, self._oprot_factory)
self.started.callback(self.client)
def connectionLost(self, reason=connectionDone):
if sys.version_info[0] >= 3:
client_req_iter = self.client._reqs.items()
else:
client_req_iter = self.client._reqs.iteritems()
for k, v in client_req_iter:
tex = TTransport.TTransportException(
type=TTransport.TTransportException.END_OF_FILE,
message=self._errormsg or 'Connection closed')
v.errback(tex)
def stringReceived(self, frame):
tr = TTransport.TMemoryBuffer(frame)
iprot = self._iprot_factory.getProtocol(tr)
(fname, mtype, rseqid) = iprot.readMessageBegin()
try:
method = self.recv_map[fname]
except KeyError:
method = getattr(self.client, 'recv_' + fname)
self.recv_map[fname] = method
method(iprot, mtype, rseqid)
def lengthLimitExceeded(self, length):
self._errormsg = 'Received frame too large (%s > %s)' % (
length, self.MAX_LENGTH)
self.transport.loseConnection()
class TwistedRpcConnectionContext(TServer.TConnectionContext):
def __init__(self, client_socket):
self._client_socket = client_socket
def getPeerName(self):
return self._client_socket.getpeername()
class ThriftServerProtocol(basic.Int32StringReceiver):
MAX_LENGTH = 1 << 24
def dispatch(self, msg):
self.sendString(msg)
def processError(self, error):
self.transport.loseConnection()
def processOk(self, _, tmo):
msg = tmo.getvalue()
if len(msg) > 0:
self.dispatch(msg)
def stringReceived(self, frame):
tmi = TTransport.TMemoryBuffer(frame)
tmo = TTransport.TMemoryBuffer()
iprot = self.factory.iprot_factory.getProtocol(tmi)
oprot = self.factory.oprot_factory.getProtocol(tmo)
server_ctx = TwistedRpcConnectionContext(self.transport.socket)
d = self.factory.processor.process(iprot, oprot, server_ctx)
d.addCallbacks(self.processOk, self.processError,
callbackArgs=(tmo,))
class ThriftHeaderServerProtocol(Protocol):
MAX_LENGTH = 1 << 24
recvd = b""
def dataReceived(self, recvd):
self.recvd = self.recvd + recvd
while len(self.recvd) >= 4:
length, = unpack(b"!I", self.recvd[:4])
if length > self.MAX_LENGTH:
self.transport.loseConnection()
return
if len(self.recvd) < length + 4:
break
packet = self.recvd[0:4 + length]
self.recvd = self.recvd[4 + length:]
self.stringReceived(packet)
def processError(self, error):
self.transport.loseConnection()
def processOk(self, _, tmo):
msg = tmo.getvalue()
if len(msg) > 0:
# HeaderTransport will have already done msg length checking,
# and already adds the frame size. Write directly.
self.transport.write(msg)
def stringReceived(self, frame):
tmi = TTransport.TMemoryBuffer(frame)
iprot = self.factory.iprot_factory.getProtocol(tmi)
oprot = iprot
tmo = tmi
server_ctx = TwistedRpcConnectionContext(self.transport.socket)
d = self.factory.processor.process(iprot, oprot, server_ctx)
d.addCallbacks(self.processOk, self.processError,
callbackArgs=(tmo,))
class IThriftServerFactory(Interface):
processor = Attribute("Thrift processor")
iprot_factory = Attribute("Input protocol factory")
oprot_factory = Attribute("Output protocol factory")
class IThriftClientFactory(Interface):
client_class = Attribute("Thrift client class")
iprot_factory = Attribute("Input protocol factory")
oprot_factory = Attribute("Output protocol factory")
class ThriftServerFactory(ServerFactory):
implements(IThriftServerFactory)
protocol = ThriftServerProtocol
def __init__(self, processor, iprot_factory, oprot_factory=None):
self.processor = processor
self.iprot_factory = iprot_factory
if oprot_factory is None:
self.oprot_factory = iprot_factory
else:
self.oprot_factory = oprot_factory
if isinstance(iprot_factory, THeaderProtocolFactory):
self.protocol = ThriftHeaderServerProtocol
class ThriftClientFactory(ClientFactory):
implements(IThriftClientFactory)
protocol = ThriftClientProtocol
def __init__(self, client_class, iprot_factory, oprot_factory=None):
self.client_class = client_class
self.iprot_factory = iprot_factory
if oprot_factory is None:
self.oprot_factory = iprot_factory
else:
self.oprot_factory = oprot_factory
def buildProtocol(self, addr):
p = self.protocol(self.client_class, self.iprot_factory,
self.oprot_factory)
p.factory = self
return p
```
#### File: test/py/JSONReaderTest.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import unittest
from thrift.transport.TTransport import TMemoryBuffer
from JsonReaderTest.ttypes import StructContainingOptionalList
from JsonReaderTest.ttypes import StructContainingRequiredList
from thrift.protocol.TProtocol import TProtocolException
class TestJSONReader(unittest.TestCase):
def testReadNullOptionalList(self):
struct = StructContainingOptionalList()
struct.readFromJson('{ "data" : null }')
def testReadNullRequiredList(self):
try:
struct = StructContainingRequiredList()
struct.readFromJson('{ "data" : null }')
self.assertFalse(True,
"Should have failed with required field missing")
except TProtocolException as ex:
pass
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jesbu1/alf",
"score": 2
}
|
#### File: alf/algorithms/ddpg_algorithm.py
```python
import functools
import gin
import numpy as np
import torch
import torch.nn as nn
import torch.distributions as td
from typing import Callable
import alf
from alf.algorithms.config import TrainerConfig
from alf.algorithms.off_policy_algorithm import OffPolicyAlgorithm
from alf.algorithms.one_step_loss import OneStepTDLoss
from alf.algorithms.rl_algorithm import RLAlgorithm
from alf.data_structures import TimeStep, Experience, LossInfo, namedtuple
from alf.data_structures import AlgStep, StepType
from alf.nest import nest
import alf.nest.utils as nest_utils
from alf.networks import ActorNetwork, CriticNetwork
from alf.tensor_specs import TensorSpec, BoundedTensorSpec
from alf.utils import losses, common, dist_utils, math_ops, spec_utils
DdpgCriticState = namedtuple("DdpgCriticState",
['critics', 'target_actor', 'target_critics'])
DdpgCriticInfo = namedtuple("DdpgCriticInfo", ["q_values", "target_q_values"])
DdpgActorState = namedtuple("DdpgActorState", ['actor', 'critics'])
DdpgState = namedtuple("DdpgState", ['actor', 'critics'])
DdpgInfo = namedtuple(
"DdpgInfo", ["action_distribution", "actor_loss", "critic"],
default_value=())
DdpgLossInfo = namedtuple('DdpgLossInfo', ('actor', 'critic'))
@gin.configurable
class DdpgAlgorithm(OffPolicyAlgorithm):
"""Deep Deterministic Policy Gradient (DDPG).
Reference:
Lillicrap et al "Continuous control with deep reinforcement learning"
https://arxiv.org/abs/1509.02971
"""
def __init__(self,
observation_spec,
action_spec: BoundedTensorSpec,
actor_network_ctor=ActorNetwork,
critic_network_ctor=CriticNetwork,
use_parallel_network=False,
observation_transformer=math_ops.identity,
env=None,
config: TrainerConfig = None,
ou_stddev=0.2,
ou_damping=0.15,
critic_loss_ctor=None,
num_critic_replicas=1,
target_update_tau=0.05,
target_update_period=1,
rollout_random_action=0.,
dqda_clipping=None,
actor_optimizer=None,
critic_optimizer=None,
debug_summaries=False,
name="DdpgAlgorithm"):
"""
Args:
action_spec (nested BoundedTensorSpec): representing the actions.
actor_network_ctor (Callable): Function to construct the actor network.
``actor_network_ctor`` needs to accept ``input_tensor_spec`` and
``action_spec`` as its arguments and return an actor network.
The constructed network will be called with ``forward(observation, state)``.
critic_network_ctor (Callable): Function to construct the critic
network. ``critic_netwrok_ctor`` needs to accept ``input_tensor_spec``
which is a tuple of ``(observation_spec, action_spec)``. The
constructed network will be called with
``forward((observation, action), state)``.
use_parallel_network (bool): whether to use parallel network for
calculating critics.
observation_transformer (Callable or list[Callable]): transformation(s)
applied to ``time_step.observation``.
num_critic_replicas (int): number of critics to be used. Default is 1.
env (Environment): The environment to interact with. env is a batched
environment, which means that it runs multiple simulations
simultateously. ``env`` only needs to be provided to the root
algorithm.
config (TrainerConfig): config for training. config only needs to be
provided to the algorithm which performs ``train_iter()`` by
itself.
ou_stddev (float): Standard deviation for the Ornstein-Uhlenbeck
(OU) noise added in the default collect policy.
ou_damping (float): Damping factor for the OU noise added in the
default collect policy.
critic_loss_ctor (None|OneStepTDLoss|MultiStepLoss): a critic loss
constructor. If ``None``, a default ``OneStepTDLoss`` will be used.
target_update_tau (float): Factor for soft update of the target
networks.
target_update_period (int): Period for soft update of the target
networks.
rollout_random_action (float): the probability of taking a uniform
random action during a ``rollout_step()``. 0 means always directly
taking actions added with OU noises and 1 means always sample
uniformly random actions. A bigger value results in more
exploration during rollout.
dqda_clipping (float): when computing the actor loss, clips the
gradient dqda element-wise between ``[-dqda_clipping, dqda_clipping]``.
Does not perform clipping if ``dqda_clipping == 0``.
actor_optimizer (torch.optim.optimizer): The optimizer for actor.
critic_optimizer (torch.optim.optimizer): The optimizer for critic.
debug_summaries (bool): True if debug summaries should be created.
name (str): The name of this algorithm.
"""
critic_network = critic_network_ctor(
input_tensor_spec=(observation_spec, action_spec))
actor_network = actor_network_ctor(
input_tensor_spec=observation_spec, action_spec=action_spec)
if use_parallel_network:
critic_networks = critic_network.make_parallel(num_critic_replicas)
else:
critic_networks = alf.networks.NaiveParallelNetwork(
critic_network, num_critic_replicas)
train_state_spec = DdpgState(
actor=DdpgActorState(
actor=actor_network.state_spec,
critics=critic_networks.state_spec),
critics=DdpgCriticState(
critics=critic_networks.state_spec,
target_actor=actor_network.state_spec,
target_critics=critic_networks.state_spec))
super().__init__(
observation_spec,
action_spec,
train_state_spec=train_state_spec,
env=env,
observation_transformer=observation_transformer,
config=config,
debug_summaries=debug_summaries,
name=name)
if actor_optimizer is not None:
self.add_optimizer(actor_optimizer, [actor_network])
if critic_optimizer is not None:
self.add_optimizer(critic_optimizer, [critic_networks])
self._actor_network = actor_network
self._num_critic_replicas = num_critic_replicas
self._critic_networks = critic_networks
self._target_actor_network = actor_network.copy(
name='target_actor_networks')
self._target_critic_networks = critic_networks.copy(
name='target_critic_networks')
self._rollout_random_action = float(rollout_random_action)
if critic_loss_ctor is None:
critic_loss_ctor = OneStepTDLoss
critic_loss_ctor = functools.partial(
critic_loss_ctor, debug_summaries=debug_summaries)
self._critic_losses = [None] * num_critic_replicas
for i in range(num_critic_replicas):
self._critic_losses[i] = critic_loss_ctor(
name=("critic_loss" + str(i)))
self._ou_process = common.create_ou_process(action_spec, ou_stddev,
ou_damping)
self._update_target = common.get_target_updater(
models=[self._actor_network, self._critic_networks],
target_models=[
self._target_actor_network, self._target_critic_networks
],
tau=target_update_tau,
period=target_update_period)
self._dqda_clipping = dqda_clipping
def predict_step(self, time_step: TimeStep, state, epsilon_greedy=1.):
action, state = self._actor_network(
time_step.observation, state=state.actor.actor)
empty_state = nest.map_structure(lambda x: (), self.train_state_spec)
def _sample(a, ou):
if epsilon_greedy == 0:
return a
elif epsilon_greedy >= 1.0:
return a + ou()
else:
ind_explore = torch.where(
torch.rand(a.shape[:1]) < epsilon_greedy)
noisy_a = a + ou()
a[ind_explore[0], :] = noisy_a[ind_explore[0], :]
return a
noisy_action = nest.map_structure(_sample, action, self._ou_process)
noisy_action = nest.map_structure(spec_utils.clip_to_spec,
noisy_action, self._action_spec)
state = empty_state._replace(
actor=DdpgActorState(actor=state, critics=()))
return AlgStep(
output=noisy_action,
state=state,
info=DdpgInfo(action_distribution=action))
def rollout_step(self, time_step: TimeStep, state=None):
if self.need_full_rollout_state():
raise NotImplementedError("Storing RNN state to replay buffer "
"is not supported by DdpgAlgorithm")
def _update_random_action(spec, noisy_action):
random_action = spec_utils.scale_to_spec(
torch.rand_like(noisy_action) * 2 - 1, spec)
ind = torch.where(
torch.rand(noisy_action.shape[:1]) < self.
_rollout_random_action)
noisy_action[ind[0], :] = random_action[ind[0], :]
pred_step = self.predict_step(time_step, state, epsilon_greedy=1.0)
if self._rollout_random_action > 0:
nest.map_structure(_update_random_action, self._action_spec,
pred_step.output)
return pred_step
def _critic_train_step(self, exp: Experience, state: DdpgCriticState):
target_action, target_actor_state = self._target_actor_network(
exp.observation, state=state.target_actor)
target_q_values, target_critic_states = self._target_critic_networks(
(exp.observation, target_action), state=state.target_critics)
q_values, critic_states = self._critic_networks(
(exp.observation, exp.action), state=state.critics)
state = DdpgCriticState(
critics=critic_states,
target_actor=target_actor_state,
target_critics=target_critic_states)
info = DdpgCriticInfo(
q_values=q_values, target_q_values=target_q_values)
return state, info
def _actor_train_step(self, exp: Experience, state: DdpgActorState):
action, actor_state = self._actor_network(
exp.observation, state=state.actor)
q_values, critic_states = self._critic_networks(
(exp.observation, action), state=state.critics)
q_value = q_values.min(dim=1)[0]
dqda = nest_utils.grad(action, q_value.sum())
def actor_loss_fn(dqda, action):
if self._dqda_clipping:
dqda = torch.clamp(dqda, -self._dqda_clipping,
self._dqda_clipping)
loss = 0.5 * losses.element_wise_squared_loss(
(dqda + action).detach(), action)
loss = loss.sum(list(range(1, loss.ndim)))
return loss
actor_loss = nest.map_structure(actor_loss_fn, dqda, action)
state = DdpgActorState(actor=actor_state, critics=critic_states)
info = LossInfo(loss=sum(nest.flatten(actor_loss)), extra=actor_loss)
return AlgStep(output=action, state=state, info=info)
def train_step(self, exp: Experience, state: DdpgState):
critic_states, critic_info = self._critic_train_step(
exp=exp, state=state.critics)
policy_step = self._actor_train_step(exp=exp, state=state.actor)
return policy_step._replace(
state=DdpgState(actor=policy_step.state, critics=critic_states),
info=DdpgInfo(
action_distribution=policy_step.output,
critic=critic_info,
actor_loss=policy_step.info))
def calc_loss(self, experience, train_info: DdpgInfo):
critic_losses = [None] * self._num_critic_replicas
for i in range(self._num_critic_replicas):
critic_losses[i] = self._critic_losses[i](
experience=experience,
value=train_info.critic.q_values[..., i],
target_value=train_info.critic.target_q_values[..., i]).loss
critic_loss = math_ops.add_n(critic_losses)
if (experience.batch_info != ()
and experience.batch_info.importance_weights != ()):
valid_masks = (experience.step_type != StepType.LAST).to(
torch.float32)
valid_n = torch.clamp(valid_masks.sum(dim=0), min=1.0)
priority = (
(critic_loss * valid_masks).sum(dim=0) / valid_n).sqrt()
else:
priority = ()
actor_loss = train_info.actor_loss
return LossInfo(
loss=critic_loss + actor_loss.loss,
priority=priority,
extra=DdpgLossInfo(critic=critic_loss, actor=actor_loss.extra))
def after_update(self, experience, train_info: DdpgInfo):
self._update_target()
def _trainable_attributes_to_ignore(self):
return ['_target_actor_network', '_target_critic_networks']
```
#### File: alf/algorithms/entropy_target_algorithm.py
```python
from absl import logging
import gin
import math
import numpy as np
import torch
import alf
from alf.algorithms.algorithm import Algorithm
from alf.data_structures import namedtuple, AlgStep, LossInfo, StepType
from alf.summary import should_record_summaries
from alf.utils.averager import ScalarWindowAverager
from alf.utils.dist_utils import calc_default_target_entropy, entropy_with_fallback
EntropyTargetLossInfo = namedtuple("EntropyTargetLossInfo", ["neg_entropy"])
EntropyTargetInfo = namedtuple("EntropyTargetInfo", ["loss"])
@gin.configurable
class EntropyTargetAlgorithm(Algorithm):
"""Algorithm for adjusting entropy regularization.
It tries to adjust the entropy regularization (i.e. alpha) so that the
the entropy is not smaller than ``target_entropy``.
The algorithm has three stages:
0. init stage. This is an optional stage. If the initial entropy is already
below ``max_entropy``, then this stage is skipped. Otherwise, the alpha will
be slowly decreased so that the entropy will land at ``max_entropy`` to
trigger the next ``free_stage``. Basically, this stage let the user to choose
an arbitrary large init alpha without considering every specific case.
1. free stage. During this stage, the alpha is not changed. It transitions
to adjust_stage once entropy drops below ``target_entropy``.
2. adjust stage. During this stage, ``log_alpha`` is adjusted using this formula:
.. code-block:: python
((below + 0.5 * above) * decreasing - (above + 0.5 * below) * increasing) * update_rate
Note that ``log_alpha`` will always be decreased if entropy is increasing
even when the entropy is below the target entropy. This is to prevent
overshooting ``log_alpha`` to a too big value. Same reason for always
increasing ``log_alpha`` even when the entropy is above the target entropy.
``update_rate`` is initialized to ``fast_update_rate`` and is reduced by a
factor of 0.9 whenever the entropy crosses ``target_entropy``. ``udpate_rate``
is reset to ``fast_update_rate`` if entropy drops too much below
``target_entropy`` (i.e., ``fast_stage_thresh`` in the code, which is the half
of ``target_entropy`` if it is positive, and twice of ``target_entropy`` if
it is negative.
``EntropyTargetAlgorithm`` can be used to approximately reproduce the learning
of temperature in `Soft Actor-Critic Algorithms and Applications <https://arxiv.org/abs/1812.05905>`_.
To do so, you need to use the same ``target_entropy``, set ``skip_free_stage``
to True, and set ``slow_update_rate`` and ``fast_update_rate`` to the 4
times of the learning rate for temperature.
"""
def __init__(self,
action_spec,
initial_alpha=0.1,
skip_free_stage=False,
max_entropy=None,
target_entropy=None,
very_slow_update_rate=0.001,
slow_update_rate=0.01,
fast_update_rate=np.log(2),
min_alpha=1e-4,
average_window=2,
debug_summaries=False,
name="EntropyTargetAlgorithm"):
"""
Args:
action_spec (nested BoundedTensorSpec): representing the actions.
initial_alpha (float): initial value for alpha; make sure that it's
large enough for initial meaningful exploration
skip_free_stage (bool): If True, directly goes to the adjust stage.
max_entropy (float): the upper bound of the entropy. If not provided,
``min(initial_entropy * 0.8, initial_entropy / 0.8)`` is used.
initial_entropy is estimated from the first ``average_window``
steps. 0.8 is to ensure that we can get a policy a less random
as the initial policy before starting the free stage.
target_entropy (float): the lower bound of the entropy. If not
provided, a default value proportional to the action dimension
is used. This value should be less or equal than ``max_entropy``.
very_slow_update_rate (float): a tiny update rate for ``log_alpha``;
used in stage 0.
slow_update_rate (float): minimal update rate for ``log_alpha``; used
in stage 2.
fast_update_rate (float): maximum update rate for ``log_alpha``; used
in state 2.
min_alpha (float): the minimal value of alpha. If <=0, :math:`e^{-100}`
is used.
average_window (int): window size for averaging past entropies.
debug_summaries (bool): True if debug summaries should be created.
"""
super().__init__(debug_summaries=debug_summaries, name=name)
self.register_buffer(
'_log_alpha',
torch.tensor(np.log(initial_alpha), dtype=torch.float32))
self.register_buffer('_stage', torch.tensor(-2, dtype=torch.int32))
self._avg_entropy = ScalarWindowAverager(average_window)
self.register_buffer(
"_update_rate", torch.tensor(
fast_update_rate, dtype=torch.float32))
self._action_spec = action_spec
self._min_log_alpha = -100.
if min_alpha >= 0.:
self._min_log_alpha = np.log(min_alpha)
self._min_log_alpha = torch.tensor(self._min_log_alpha)
flat_action_spec = alf.nest.flatten(self._action_spec)
if target_entropy is None:
target_entropy = np.sum(
list(map(calc_default_target_entropy, flat_action_spec)))
logging.info("target_entropy=%s" % target_entropy)
if max_entropy is None:
# max_entropy will be estimated in the first `average_window` steps.
max_entropy = 0.
self._stage.fill_(-2 - average_window)
else:
assert target_entropy <= max_entropy, (
"Target entropy %s should be less or equal than max entropy %s!"
% (target_entropy, max_entropy))
self.register_buffer("_max_entropy",
torch.tensor(max_entropy, dtype=torch.float32))
if skip_free_stage:
self._stage.fill_(1)
if target_entropy > 0:
self._fast_stage_thresh = 0.5 * target_entropy
else:
self._fast_stage_thresh = 2.0 * target_entropy
self._target_entropy = target_entropy
self._very_slow_update_rate = very_slow_update_rate
self._slow_update_rate = torch.tensor(slow_update_rate)
self._fast_update_rate = torch.tensor(fast_update_rate)
def rollout_step(self, distribution, step_type, on_policy_training):
"""Rollout step.
Args:
distribution (nested Distribution): action distribution from the
policy.
step_type (StepType): the step type for the distributions.
on_policy_training (bool): If False, this step does nothing.
Returns:
AlgStep: ``info`` field is ``LossInfo``, other fields are empty. All
fields are empty If ``on_policy_training=False``.
"""
if on_policy_training:
return self.train_step(distribution, step_type)
else:
return AlgStep()
def train_step(self, distribution, step_type):
"""Train step.
Args:
distribution (nested Distribution): action distribution from the
policy.
step_type (StepType): the step type for the distributions.
Returns:
AlgStep: ``info`` field is ``LossInfo``, other fields are empty.
"""
entropy, entropy_for_gradient = entropy_with_fallback(distribution)
return AlgStep(
output=(),
state=(),
info=EntropyTargetInfo(
loss=LossInfo(
loss=-entropy_for_gradient,
extra=EntropyTargetLossInfo(neg_entropy=-entropy))))
def calc_loss(self, experience, info: EntropyTargetInfo, valid_mask=None):
"""Calculate loss.
Args:
experience (Experience): experience for gradient update
info (EntropyTargetInfo): for computing loss.
valid_mask (tensor): valid mask to be applied on time steps.
Returns:
LossInfo:
"""
loss_info = info.loss
mask = (experience.step_type != StepType.LAST).type(torch.float32)
if valid_mask:
mask = mask * (valid_mask).type(torch.float32)
entropy = -loss_info.extra.neg_entropy * mask
num = torch.sum(mask)
not_empty = num > 0
num = max(num, 1)
entropy2 = torch.sum(entropy**2) / num
entropy = torch.sum(entropy) / num
entropy_std = torch.sqrt(
torch.max(torch.tensor(0.0), entropy2 - entropy * entropy))
if not_empty:
self.adjust_alpha(entropy)
if self._debug_summaries and should_record_summaries():
with alf.summary.scope(self.name):
alf.summary.scalar("entropy_std", entropy_std)
alpha = torch.exp(self._log_alpha)
return loss_info._replace(loss=loss_info.loss * alpha)
def adjust_alpha(self, entropy):
"""Adjust alpha according to the current entropy.
Args:
entropy (scalar Tensor): the current entropy.
Returns:
adjusted entropy regularization
"""
prev_avg_entropy = self._avg_entropy.get()
avg_entropy = self._avg_entropy.average(entropy)
def _init_entropy():
self._max_entropy.fill_(
torch.min(0.8 * avg_entropy, avg_entropy / 0.8))
self._stage.add_(1)
def _init():
below = avg_entropy < self._max_entropy
decreasing = (avg_entropy < prev_avg_entropy).type(torch.float32)
# -1 * (1 - decreasing) + 0.5 * decreasing
update_rate = (-1 + 1.5 * decreasing) * self._very_slow_update_rate
self._stage.add_(below.type(torch.int32))
self._log_alpha.fill_(
torch.max(self._log_alpha + update_rate, self._min_log_alpha))
def _free():
crossing = avg_entropy < self._target_entropy
self._stage.add_(crossing.type(torch.int32))
def _adjust():
previous_above = self._stage.type(torch.bool)
above = avg_entropy > self._target_entropy
self._stage.fill_(above.type(torch.int32))
crossing = above != previous_above
update_rate = self._update_rate
update_rate = torch.where(crossing, 0.9 * update_rate, update_rate)
update_rate = torch.max(update_rate, self._slow_update_rate)
update_rate = torch.where(entropy < self._fast_stage_thresh,
self._fast_update_rate, update_rate)
self._update_rate.fill_(update_rate)
above = above.type(torch.float32)
below = 1 - above
decreasing = (avg_entropy < prev_avg_entropy).type(torch.float32)
increasing = 1 - decreasing
log_alpha = self._log_alpha + (
(below + 0.5 * above) * decreasing -
(above + 0.5 * below) * increasing) * update_rate
log_alpha = torch.max(log_alpha, self._min_log_alpha)
self._log_alpha.fill_(log_alpha)
if self._stage < -2:
_init_entropy()
if self._stage == -2:
_init()
if self._stage == -1:
_free()
if self._stage >= 0:
_adjust()
alpha = torch.exp(self._log_alpha)
if self._debug_summaries and should_record_summaries():
with alf.summary.scope(self.name):
alf.summary.scalar("alpha", alpha)
alf.summary.scalar("avg_entropy", avg_entropy)
alf.summary.scalar("stage", self._stage)
alf.summary.scalar("update_rate", self._update_rate)
return alpha
```
#### File: alf/bin/train_play_test.py
```python
from absl import logging
import os
import numpy as np
from pathlib import Path
import subprocess
import sys
import tempfile
from tensorboard.backend.event_processing import event_file_loader
import unittest
from unittest import SkipTest
import alf
SKIP_TODO_MESSAGE = "TODO: convert and test this gin file to pytorch version"
def run_cmd(cmd, cwd=None):
"""Run cmd in a new process and check its exit status
Args:
cmd (list[str]): command and args to run
cwd (str): working directory for the process
"""
logging.info("Running %s", " ".join(cmd))
new_env = os.environ.copy()
process = subprocess.Popen(
cmd, stdout=sys.stderr, stderr=sys.stderr, cwd=cwd, env=new_env)
process.communicate()
assert process.returncode == 0, ("cmd: {0} exit abnormally".format(
" ".join(cmd)))
def get_metrics_from_eval_tfevents(eval_dir):
"""Get metrics from tfevents in eval dir
Args:
eval_dir (str): Root directory where eval summaries are stored
Returns:
list[float], list[int]: average returns, and average episode lengths
"""
event_file = None
for root, dirs, files in os.walk(eval_dir):
for file_name in files:
if "events" in file_name and 'profile' not in file_name:
event_file = os.path.join(root, file_name)
break
assert event_file is not None
logging.info("Parse event file:%s", event_file)
episode_returns = []
episode_lengths = []
for event_str in event_file_loader.EventFileLoader(event_file).Load():
if event_str.summary.value:
for item in event_str.summary.value:
if item.tag == 'Metrics/AverageReturn':
episode_returns.append(item.simple_value)
elif item.tag == 'Metrics/AverageEpisodeLength':
episode_lengths.append(item.simple_value)
assert len(episode_returns) > 0
logging.info("Episode returns, %s, episode lengths: %s", episode_returns,
episode_lengths)
return episode_returns, episode_lengths
def get_examples_dir():
"""Get examples directory"""
bin_dir = Path(os.path.abspath(__file__)).parent
examples_dir = os.path.join(Path(bin_dir).parent, 'examples')
return examples_dir
def _to_gin_params(parameters):
"""A helper function that convert key-value parameters to gin parameters"""
return ['--gin_param=%s' % e for e in parameters]
COMMON_TRAIN_CONF = [
# create only one env
'create_environment.num_parallel_environments=1',
# disable summaries
'TrainerConfig.debug_summaries=False',
'TrainerConfig.summarize_grads_and_vars=False',
'TrainerConfig.summarize_action_distributions=False',
# train two iterations
'TrainerConfig.num_iterations=2',
'TrainerConfig.num_env_steps=0',
# only save checkpoint after train iteration finished
'TrainerConfig.num_checkpoints=1',
# disable evaluate
'TrainerConfig.evaluate=False',
]
COMMON_TRAIN_PARAMS = _to_gin_params(COMMON_TRAIN_CONF)
ON_POLICY_TRAIN_CONF = COMMON_TRAIN_CONF + [
'TrainerConfig.unroll_length=4',
]
ON_POLICY_TRAIN_PARAMS = _to_gin_params(ON_POLICY_TRAIN_CONF)
OFF_POLICY_TRAIN_CONF = COMMON_TRAIN_CONF + [
'TrainerConfig.unroll_length=1',
'TrainerConfig.initial_collect_steps=8',
'TrainerConfig.num_updates_per_train_iter=1',
'TrainerConfig.mini_batch_length=2',
'TrainerConfig.mini_batch_size=4',
'TrainerConfig.num_envs=2',
'TrainerConfig.replay_buffer_length=64',
]
OFF_POLICY_TRAIN_PARAMS = _to_gin_params(OFF_POLICY_TRAIN_CONF)
ON_POLICY_ALG_OFF_POLICY_TRAIN_CONF = OFF_POLICY_TRAIN_CONF + [
'TrainerConfig.unroll_length=2',
'TrainerConfig.initial_collect_steps=0',
]
ON_POLICY_ALG_OFF_POLICY_TRAIN_PARAMS = _to_gin_params(
ON_POLICY_ALG_OFF_POLICY_TRAIN_CONF)
PPO_TRAIN_CONF = OFF_POLICY_TRAIN_CONF + [
'TrainerConfig.unroll_length=2', 'TrainerConfig.initial_collect_steps=0',
'TrainerConfig.num_updates_per_train_iter=2'
]
PPO_TRAIN_PARAMS = _to_gin_params(PPO_TRAIN_CONF)
# Run COMMAND in a virtual X server environment
XVFB_RUN = ['xvfb-run', '-a', '-e', '/dev/stderr']
class TrainPlayTest(alf.test.TestCase):
"""Train and play test for examples located in directory
`$PROJECT_ROOT/alf/examples`
NOTE: It's not reasonable to train all the examples until they reaches
desired performance, we just test if they can run and play correctly
with minimal configuration (train a few iterations, play a few steps,
disable summary ...) for most of the examples.
"""
# They are common configuration files, not complete test, exclude them
_excluded_ = {
'atari.gin',
'ppo.gin',
}
# All gin files list in directory `$PROJECT_ROOT/alf/examples`
_all_ = set()
_tested_ = set()
_skipped_ = set()
@classmethod
def setUpClass(cls):
super().setUpClass()
examples_dir = get_examples_dir()
for root, dirs, files in os.walk(examples_dir):
for filename in files:
if filename.endswith('.gin'):
cls._all_.add(filename)
@classmethod
def markTested(cls, test):
cls._tested_.add(test)
@classmethod
def markSkipped(cls, test):
cls._skipped_.add(test)
def _skip_if_socialbot_unavailable(self):
from alf.environments import suite_socialbot
if not suite_socialbot.is_available():
self.skipTest("Socialbot env is not available.")
def _skip_if_mario_unavailable(self):
from alf.environments import suite_mario
if not suite_mario.is_available():
self.skipTest("SuperMario is not available.")
def _skip_if_mujoco_unavailable(self):
from alf.environments import suite_robotics
# import other mujoco suites here
if not suite_robotics.is_available():
self.skipTest("Mujoco env is not available.")
def _skip_if_dmlab_unavailable(self):
from alf.environments import suite_dmlab
if not suite_dmlab.is_available():
self.skipTest("DeepMindLab env is not available.")
def _skip_if_bullet_unavailable(self):
try:
import pybullet_envs
except ImportError:
self.skipTest("PyBullet env is not available.")
def _skip_if_atari_unavailable(self):
try:
import atari_py
except ImportError:
self.skipTest("Atari env is not available.")
def _test(self,
gin_file,
skip_checker=None,
extra_train_params=None,
test_play=True,
extra_play_params=None,
test_perf=True,
test_perf_func=None):
"""Test train, play and check performance
Args:
gin_file (str): Path to the gin-config file.
skip_checker (Callable|list[Callable]): callables that will raise a `SkipTest`
exception when the test is not available.
extra_train_params (list[str]): extra params used for training.
test_play (bool): A bool for test play.
extra_play_params (list[str]): extra param used for play.
test_perf (bool): A bool for check performance.
test_perf_func (Callable): called as test_perf_func(episode_returns, episode_lengths)
which checks whether `episode_returns` and `episode_lengths` meet expectations
performance.
"""
skip_checker = skip_checker or []
if not isinstance(skip_checker, list):
skip_checker = [skip_checker]
try:
for checker in skip_checker:
checker()
except SkipTest:
self.markSkipped(gin_file)
raise
self.markTested(gin_file)
with tempfile.TemporaryDirectory() as root_dir:
self._test_train(gin_file, extra_train_params, root_dir)
if test_play:
self._test_play(root_dir, extra_play_params)
if test_perf and test_perf_func:
self._test_performance(root_dir, test_perf_func)
def _test_train(self, gin_file, extra_params, root_dir):
"""Test if the training configured by gin_file and extra_params can run
successfully.
Args:
gin_file (str): Path to the gin-config file.
extra_params (list[str]): extra parameters used for training
root_dir (str): Root directory for writing logs/summaries/checkpoints.
"""
examples_dir = get_examples_dir()
cmd = [
'python3',
'-m',
'alf.bin.train',
'--root_dir=%s' % root_dir,
'--gin_file=%s' % gin_file,
'--gin_param=TrainerConfig.random_seed=1',
]
if 'DISPLAY' not in os.environ:
cmd = XVFB_RUN + cmd
cmd.extend(extra_params or [])
run_cmd(cmd=cmd, cwd=examples_dir)
def _test_play(self, root_dir, extra_params):
"""Test if it can play successfully using configuration and checkpoints
saved in root_dir.
Args:
root_dir (str): Root directory where configuration and checkpoints are saved
extra_params (list[str]): extra parameters used for play
"""
cmd = [
'python3', '-m', 'alf.bin.play',
'--root_dir=%s' % root_dir, '--num_episodes=1'
]
if 'DISPLAY' not in os.environ:
cmd = XVFB_RUN + cmd
cmd.extend(extra_params or [])
run_cmd(cmd=cmd)
def _test_performance(self, root_dir, test_func):
"""Test if the performance meet expectations
Args:
root_dir (str): Root directory where logs/summaries are saved
test_func (Callable): called as test_func(episode_returns, episode_lengths)
which checks whether `episode_returns` and `episode_lengths` meet desired
performance.
"""
eval_dir = os.path.join(root_dir, 'eval')
episode_returns, episode_lengths = get_metrics_from_eval_tfevents(
eval_dir)
test_func(episode_returns, episode_lengths)
def test_ac_breakout(self):
self._test(
gin_file='ac_breakout.gin',
skip_checker=self._skip_if_atari_unavailable,
extra_train_params=ON_POLICY_TRAIN_PARAMS)
def test_ac_cart_pole(self):
def _test_func(returns, lengths):
self.assertGreater(returns[-1], 195)
self.assertGreater(lengths[-1], 195)
self._test(gin_file='ac_cart_pole.gin', test_perf_func=_test_func)
def test_ac_simple_navigation(self):
self._test(
gin_file='ac_simple_navigation.gin',
skip_checker=self._skip_if_socialbot_unavailable,
extra_train_params=ON_POLICY_TRAIN_PARAMS)
@unittest.skip(SKIP_TODO_MESSAGE)
def test_ddpg_pendulum(self):
def _test_func(returns, lengths):
self.assertGreater(returns[-1], -200)
self._test(gin_file='ddpg_pendulum.gin', test_perf_func=_test_func)
def test_ddpg_fetchslide(self):
self._test(
gin_file="ddpg_fetchslide.gin",
skip_checker=self._skip_if_mujoco_unavailable,
extra_train_params=OFF_POLICY_TRAIN_PARAMS)
def test_diayn_pendulum(self):
self._test(
gin_file='diayn_pendulum.gin',
extra_train_params=OFF_POLICY_TRAIN_PARAMS)
def test_icm_mountain_car(self):
self._test(
gin_file='icm_mountain_car.gin',
extra_train_params=ON_POLICY_TRAIN_PARAMS)
@unittest.skip(SKIP_TODO_MESSAGE)
def test_icm_playground(self):
self._test(
gin_file='icm_playground.gin',
skip_checker=self._skip_if_socialbot_unavailable,
extra_train_params=ON_POLICY_TRAIN_PARAMS)
def test_icm_super_mario(self):
self._test(
gin_file='icm_super_mario.gin',
skip_checker=self._skip_if_mario_unavailable,
extra_train_params=ON_POLICY_TRAIN_PARAMS)
def test_icm_super_mario_intrinsic_only(self):
self._test(
gin_file="icm_super_mario_intrinsic_only.gin",
skip_checker=self._skip_if_mario_unavailable,
extra_train_params=ON_POLICY_TRAIN_PARAMS)
@unittest.skip(SKIP_TODO_MESSAGE)
def test_merlin_dmlab_collect_good_objects(self):
self._test(
gin_file='icm_super_mario_intrinsic_only.gin',
skip_checker=self._skip_if_dmlab_unavailable,
extra_train_params=ON_POLICY_TRAIN_PARAMS)
@unittest.skip(SKIP_TODO_MESSAGE)
def test_off_policy_ac_breakout(self):
self._test(
gin_file='off_policy_ac_breakout.gin',
skip_checker=self._skip_if_atari_unavailable,
extra_train_params=ON_POLICY_ALG_OFF_POLICY_TRAIN_PARAMS)
@unittest.skip(SKIP_TODO_MESSAGE)
def test_off_policy_ac_cart_pole(self):
self._test(
gin_file='off_policy_ac_cart_pole.gin',
extra_train_params=ON_POLICY_ALG_OFF_POLICY_TRAIN_PARAMS)
def test_ppo_bullet_humanoid(self):
self._test(
gin_file='ppo_bullet_humanoid.gin',
skip_checker=self._skip_if_bullet_unavailable,
extra_train_params=PPO_TRAIN_PARAMS)
@unittest.skip(SKIP_TODO_MESSAGE)
def test_ppo_cart_pole(self):
def _test_func(returns, lengths):
self.assertGreater(returns[-1], 195)
self._test(gin_file='ppo_cart_pole.gin', test_perf_func=_test_func)
@unittest.skip(SKIP_TODO_MESSAGE)
def test_ppo_icm_super_mario_intrinsic_only(self):
self._test(
gin_file='ppo_icm_super_mario_intrinsic_only.gin',
skip_checker=self._skip_if_mario_unavailable(),
extra_train_params=PPO_TRAIN_PARAMS)
def test_ppo_icubwalk(self):
self._test(
gin_file='ppo_icubwalk.gin',
skip_checker=self._skip_if_socialbot_unavailable,
extra_train_params=PPO_TRAIN_PARAMS)
def test_ppo_pr2(self):
self._test(
gin_file='ppo_pr2.gin',
skip_checker=self._skip_if_socialbot_unavailable,
extra_train_params=PPO_TRAIN_PARAMS)
@unittest.skip(SKIP_TODO_MESSAGE)
def test_rnd_super_mario(self):
self._test(
gin_file='rnd_super_mario.gin',
skip_checker=self._skip_if_mario_unavailable,
extra_train_params=ON_POLICY_TRAIN_PARAMS)
def test_ppo_rnd_mrevenge(self):
self._test(
gin_file='ppo_rnd_mrevenge.gin',
extra_train_params=PPO_TRAIN_PARAMS)
def test_sac_bipedal_walker(self):
self._test(
gin_file='sac_bipedal_walker.gin',
extra_train_params=OFF_POLICY_TRAIN_PARAMS)
def test_sac_fetchreach(self):
self._test(
gin_file="sac_fetchreach.gin",
skip_checker=self._skip_if_mujoco_unavailable,
extra_train_params=OFF_POLICY_TRAIN_PARAMS)
def test_sac_fetchslide(self):
self._test(
gin_file="sac_fetchslide.gin",
skip_checker=self._skip_if_mujoco_unavailable,
extra_train_params=OFF_POLICY_TRAIN_PARAMS)
def test_sac_cart_pole(self):
self._test(
gin_file='sac_cart_pole.gin',
extra_train_params=OFF_POLICY_TRAIN_PARAMS)
@unittest.skip(SKIP_TODO_MESSAGE)
def test_sac_humanoid(self):
self._test(
gin_file='sac_humanoid.gin',
skip_checker=self._skip_if_mujoco_unavailable,
extra_train_params=OFF_POLICY_TRAIN_PARAMS)
@unittest.skip(SKIP_TODO_MESSAGE)
def test_sac_pendulum(self):
def _test_func(returns, lengths):
self.assertGreater(returns[-1], -200)
self._test(gin_file='sac_pendulum.gin', test_perf_func=_test_func)
@unittest.skip(SKIP_TODO_MESSAGE)
def test_sarsa_pendulum(self):
self._test(
gin_file='sarsa_pendulum.gin',
extra_train_params=ON_POLICY_TRAIN_PARAMS)
def test_sarsa_ddpg_pendulum(self):
self._test(
gin_file='sarsa_ddpg_pendulum.gin',
extra_train_params=OFF_POLICY_TRAIN_PARAMS)
def test_sarsa_sac_pendulum(self):
self._test(
gin_file='sarsa_sac_pendulum.gin',
extra_train_params=OFF_POLICY_TRAIN_PARAMS)
def test_sarsa_sac_bipedal_walker(self):
self._test(
gin_file='sarsa_sac_bipedal_walker.gin',
extra_train_params=OFF_POLICY_TRAIN_PARAMS)
@unittest.skip(SKIP_TODO_MESSAGE)
def test_trac_breakout(self):
self._test(
gin_file='trac_breakout.gin',
skip_checker=self._skip_if_atari_unavailable,
extra_train_params=ON_POLICY_TRAIN_PARAMS)
@unittest.skip(SKIP_TODO_MESSAGE)
def test_trac_ddpg_pendulum(self):
self._test(
gin_file='trac_ddpg_pendulum.gin',
extra_train_params=OFF_POLICY_TRAIN_PARAMS)
@unittest.skip(SKIP_TODO_MESSAGE)
def test_trac_ppo_pr2(self):
self._test(
gin_file='trac_ppo_pr2.gin',
skip_checker=self._skip_if_socialbot_unavailable,
extra_train_params=PPO_TRAIN_PARAMS)
@unittest.skip(SKIP_TODO_MESSAGE)
def test_trac_sac_pendulum(self):
self._test(
gin_file='trac_sac_pendulum.gin',
extra_train_params=OFF_POLICY_TRAIN_PARAMS)
@classmethod
def tearDownClass(cls):
not_tested = cls._all_.difference(cls._tested_)
missed = not_tested.difference(cls._excluded_.union(cls._skipped_))
if missed:
logging.warning(
'Missing test for [%s], highly recommended to add them to testes.',
','.join(list(missed)))
cls._all_.clear()
cls._tested_.clear()
cls._skipped_.clear()
super().tearDownClass()
if __name__ == '__main__':
alf.test.main()
```
#### File: alf/experience_replayers/replay_buffer_test.py
```python
import torch
from absl.testing import parameterized
import alf
from alf.utils.data_buffer import RingBuffer
from alf.utils.data_buffer_test import get_batch, DataItem, RingBufferTest
from alf.experience_replayers.replay_buffer import ReplayBuffer
class ReplayBufferTest(RingBufferTest):
@parameterized.named_parameters([
('test_sync', False),
('test_async', True),
])
def test_replay_buffer(self, allow_multiprocess):
replay_buffer = ReplayBuffer(
data_spec=self.data_spec,
num_environments=self.num_envs,
max_length=self.max_length,
allow_multiprocess=allow_multiprocess)
batch1 = get_batch([0, 4, 7], self.dim, t=0, x=0.1)
replay_buffer.add_batch(batch1, batch1.env_id)
self.assertEqual(replay_buffer._current_size,
torch.tensor([1, 0, 0, 0, 1, 0, 0, 1]))
self.assertEqual(replay_buffer._current_pos,
torch.tensor([1, 0, 0, 0, 1, 0, 0, 1]))
self.assertRaises(AssertionError, replay_buffer.get_batch, 8, 1)
batch2 = get_batch([1, 2, 3, 5, 6], self.dim, t=0, x=0.2)
replay_buffer.add_batch(batch2, batch2.env_id)
self.assertEqual(replay_buffer._current_size,
torch.tensor([1, 1, 1, 1, 1, 1, 1, 1]))
self.assertEqual(replay_buffer._current_pos,
torch.tensor([1, 1, 1, 1, 1, 1, 1, 1]))
batch = replay_buffer.gather_all()
self.assertEqual(list(batch.t.shape), [8, 1])
# test that RingBuffer detaches gradients of inputs
self.assertFalse(batch.x.requires_grad)
self.assertRaises(AssertionError, replay_buffer.get_batch, 8, 2)
replay_buffer.get_batch(13, 1)[0]
batch = replay_buffer.get_batch(8, 1)[0]
# squeeze the time dimension
batch = alf.nest.map_structure(lambda bat: bat.squeeze(1), batch)
bat1 = alf.nest.map_structure(lambda bat: bat[batch1.env_id], batch)
bat2 = alf.nest.map_structure(lambda bat: bat[batch2.env_id], batch)
self.assertEqual(bat1.env_id, batch1.env_id)
self.assertEqual(bat1.x, batch1.x)
self.assertEqual(bat1.t, batch1.t)
self.assertEqual(bat2.env_id, batch2.env_id)
self.assertEqual(bat2.x, batch2.x)
self.assertEqual(bat2.t, batch2.t)
for t in range(1, 10):
batch3 = get_batch([0, 4, 7], self.dim, t=t, x=0.3)
j = (t + 1) % self.max_length
s = min(t + 1, self.max_length)
replay_buffer.add_batch(batch3, batch3.env_id)
self.assertEqual(replay_buffer._current_size,
torch.tensor([s, 1, 1, 1, s, 1, 1, s]))
self.assertEqual(replay_buffer._current_pos,
torch.tensor([j, 1, 1, 1, j, 1, 1, j]))
batch2 = get_batch([1, 2, 3, 5, 6], self.dim, t=1, x=0.2)
replay_buffer.add_batch(batch2, batch2.env_id)
batch = replay_buffer.get_batch(8, 1)[0]
# squeeze the time dimension
batch = alf.nest.map_structure(lambda bat: bat.squeeze(1), batch)
bat3 = alf.nest.map_structure(lambda bat: bat[batch3.env_id], batch)
bat2 = alf.nest.map_structure(lambda bat: bat[batch2.env_id], batch)
self.assertEqual(bat3.env_id, batch3.env_id)
self.assertEqual(bat3.x, batch3.x)
self.assertEqual(bat2.env_id, batch2.env_id)
self.assertEqual(bat2.x, batch2.x)
batch = replay_buffer.get_batch(8, 2)[0]
t2 = []
t3 = []
for t in range(2):
batch_t = alf.nest.map_structure(lambda b: b[:, t], batch)
bat3 = alf.nest.map_structure(lambda bat: bat[batch3.env_id],
batch_t)
bat2 = alf.nest.map_structure(lambda bat: bat[batch2.env_id],
batch_t)
t2.append(bat2.t)
self.assertEqual(bat3.env_id, batch3.env_id)
self.assertEqual(bat3.x, batch3.x)
self.assertEqual(bat2.env_id, batch2.env_id)
self.assertEqual(bat2.x, batch2.x)
t3.append(bat3.t)
# Test time consistency
self.assertEqual(t2[0] + 1, t2[1])
self.assertEqual(t3[0] + 1, t3[1])
batch = replay_buffer.get_batch(128, 2)[0]
self.assertEqual(batch.t[:, 0] + 1, batch.t[:, 1])
self.assertEqual(list(batch.t.shape), [128, 2])
batch = replay_buffer.get_batch(10, 2)[0]
self.assertEqual(batch.t[:, 0] + 1, batch.t[:, 1])
self.assertEqual(list(batch.t.shape), [10, 2])
batch = replay_buffer.get_batch(4, 2)[0]
self.assertEqual(batch.t[:, 0] + 1, batch.t[:, 1])
self.assertEqual(list(batch.t.shape), [4, 2])
# Test gather_all()
# Exception because the size of all the environments are not same
self.assertRaises(AssertionError, replay_buffer.gather_all)
for t in range(2, 10):
batch4 = get_batch([1, 2, 3, 5, 6], self.dim, t=t, x=0.4)
replay_buffer.add_batch(batch4, batch4.env_id)
batch = replay_buffer.gather_all()
self.assertEqual(list(batch.t.shape), [8, 4])
# Test clear()
replay_buffer.clear()
self.assertEqual(replay_buffer.total_size, 0)
def test_prioritized_replay(self):
replay_buffer = ReplayBuffer(
data_spec=self.data_spec,
num_environments=self.num_envs,
max_length=self.max_length,
prioritized_sampling=True)
self.assertRaises(AssertionError, replay_buffer.get_batch, 1, 1)
batch1 = get_batch([1], self.dim, x=0.25, t=0)
replay_buffer.add_batch(batch1, batch1.env_id)
batch, batch_info = replay_buffer.get_batch(1, 1)
self.assertEqual(batch_info.env_ids,
torch.tensor([1], dtype=torch.int64))
self.assertEqual(batch_info.importance_weights, 1.)
self.assertEqual(batch_info.importance_weights, torch.tensor([1.]))
self.assertRaises(AssertionError, replay_buffer.get_batch, 1, 2)
batch2 = get_batch([1], self.dim, x=0.5, t=1)
replay_buffer.add_batch(batch1, batch1.env_id)
batch, batch_info = replay_buffer.get_batch(4, 2)
self.assertEqual(batch_info.env_ids,
torch.tensor([1], dtype=torch.int64))
self.assertEqual(batch_info.importance_weights, torch.tensor([1.]))
self.assertEqual(batch_info.importance_weights, torch.tensor([1.] * 4))
batch, batch_info = replay_buffer.get_batch(1000, 1)
n0 = (batch_info.positions == 0).sum()
n1 = (batch_info.positions == 1).sum()
self.assertEqual(n0, 500)
self.assertEqual(n1, 500)
replay_buffer.update_priority(
env_ids=torch.tensor([1, 1], dtype=torch.int64),
positions=torch.tensor([0, 1], dtype=torch.int64),
priorities=torch.tensor([0.5, 1.5]))
batch, batch_info = replay_buffer.get_batch(1000, 1)
n0 = (batch_info.positions == 0).sum()
n1 = (batch_info.positions == 1).sum()
self.assertEqual(n0, 250)
self.assertEqual(n1, 750)
batch2 = get_batch([0, 2], self.dim, x=0.5, t=1)
replay_buffer.add_batch(batch2, batch2.env_id)
batch, batch_info = replay_buffer.get_batch(1000, 1)
def _get(env_id, pos):
flag = (
(batch_info.env_ids == env_id) * (batch_info.positions == pos))
w = batch_info.importance_weights[torch.nonzero(
flag, as_tuple=True)[0]]
return flag.sum(), w
n0, w0 = _get(0, 0)
n1, w1 = _get(1, 0)
n2, w2 = _get(1, 1)
n3, w3 = _get(2, 0)
self.assertEqual(n0, 300)
self.assertEqual(n1, 100)
self.assertEqual(n2, 300)
self.assertEqual(n3, 300)
self.assertTrue(torch.all(w0 == 1.2))
self.assertTrue(torch.all(w1 == 0.4))
self.assertTrue(torch.all(w2 == 1.2))
self.assertTrue(torch.all(w3 == 1.2))
replay_buffer.update_priority(
env_ids=torch.tensor([1, 2], dtype=torch.int64),
positions=torch.tensor([1, 0], dtype=torch.int64),
priorities=torch.tensor([1.0, 1.0]))
batch, batch_info = replay_buffer.get_batch(1000, 1)
n0, w0 = _get(0, 0)
n1, w1 = _get(1, 0)
n2, w2 = _get(1, 1)
n3, w3 = _get(2, 0)
self.assertEqual(n0, 375)
self.assertEqual(n1, 125)
self.assertEqual(n2, 250)
self.assertEqual(n3, 250)
self.assertTrue(torch.all(w0 == 1.5))
self.assertTrue(torch.all(w1 == 0.5))
self.assertTrue(torch.all(w2 == 1.0))
self.assertTrue(torch.all(w3 == 1.0))
if __name__ == '__main__':
alf.test.main()
```
#### File: alf/alf/layers.py
```python
import gin
import copy
import numpy as np
import torch
import torch.nn as nn
from alf.initializers import variance_scaling_init
from alf.nest.utils import get_outer_rank
from alf.tensor_specs import TensorSpec
from alf.utils import common
from alf.utils.math_ops import identity
def normalize_along_batch_dims(x, mean, variance, variance_epsilon):
"""Normalizes a tensor by ``mean`` and ``variance``, which are expected to have
the same tensor spec with the inner dims of ``x``.
Args:
x (Tensor): a tensor of (``[D1, D2, ..] + shape``), where ``D1``, ``D2``, ..
are arbitrary leading batch dims (can be empty).
mean (Tensor): a tensor of ``shape``
variance (Tensor): a tensor of ``shape``
variance_epsilon (float): A small float number to avoid dividing by 0.
Returns:
Normalized tensor.
"""
spec = TensorSpec.from_tensor(mean)
assert spec == TensorSpec.from_tensor(variance), \
"The specs of mean and variance must be equal!"
bs = BatchSquash(get_outer_rank(x, spec))
x = bs.flatten(x)
variance_epsilon = torch.as_tensor(variance_epsilon).to(variance.dtype)
inv = torch.rsqrt(variance + variance_epsilon)
x = (x - mean.to(x.dtype)) * inv.to(x.dtype)
x = bs.unflatten(x)
return x
class BatchSquash(object):
"""Facilitates flattening and unflattening batch dims of a tensor. Copied
from `tf_agents`.
Exposes a pair of matched flatten and unflatten methods. After flattening
only 1 batch dimension will be left. This facilitates evaluating networks
that expect inputs to have only 1 batch dimension.
"""
def __init__(self, batch_dims):
"""Create two tied ops to flatten and unflatten the front dimensions.
Args:
batch_dims (int): Number of batch dimensions the flatten/unflatten
ops should handle.
Raises:
ValueError: if batch dims is negative.
"""
if batch_dims < 0:
raise ValueError('Batch dims must be non-negative.')
self._batch_dims = batch_dims
self._original_tensor_shape = None
def flatten(self, tensor):
"""Flattens and caches the tensor's batch_dims."""
if self._batch_dims == 1:
return tensor
self._original_tensor_shape = tensor.shape
return torch.reshape(tensor,
(-1, ) + tuple(tensor.shape[self._batch_dims:]))
def unflatten(self, tensor):
"""Unflattens the tensor's batch_dims using the cached shape."""
if self._batch_dims == 1:
return tensor
if self._original_tensor_shape is None:
raise ValueError('Please call flatten before unflatten.')
return torch.reshape(
tensor, (tuple(self._original_tensor_shape[:self._batch_dims]) +
tuple(tensor.shape[1:])))
@gin.configurable
class OneHot(nn.Module):
def __init__(self, num_classes):
super().__init__()
self._num_classes = num_classes
def forward(self, input):
return nn.functional.one_hot(
input, num_classes=self._num_classes).to(torch.float32)
@gin.configurable
class FixedDecodingLayer(nn.Module):
def __init__(self,
input_size,
output_size,
basis_type="rbf",
sigma=1.,
tau=0.5):
"""A layer that uses a set of fixed basis for decoding the inputs.
Args:
input_size (int): the size of input to be decoded, representing the
number of representation coefficients
output_size (int): the size of the decoded output
basis_type (str): the type of basis to be used for decoding
- "poly": polynomial basis using Vandermonde matrix
- "cheb": polynomial basis using Chebyshev polynomials
- "rbf": radial basis functions
- "haar": Haar wavelet basis
sigma (float): the bandwidth parameter used for RBF basis.
If None, a default value of 1. will be used.
tau (float): a factor for weighting the basis exponentially
according to the order (``n``) of the basis, i.e., ``tau**n```
"""
# get the argument list with vals
self._kwargs = copy.deepcopy(locals())
self._kwargs.pop('self')
self._kwargs.pop('__class__')
super(FixedDecodingLayer, self).__init__()
assert input_size > 0, "input_size should be at least one"
assert basis_type in {"poly", "cheb", "rbf", "haar"
}, ("the specified method "
"{} is not supported".format(basis_type))
self._B = nn.Linear(input_size, output_size, bias=False)
def _polyvander_matrix(n, D, tau=tau):
# non-square matrix [n, D + 1]
x = torch.linspace(-1, 1, n)
B = torch.as_tensor(np.polynomial.polynomial.polyvander(x, D))
# weight for encoding the preference to low-frequency basis
exp_factor = torch.arange(D + 1).float()
basis_weight = tau**exp_factor
return B * basis_weight
def _chebvander_matrix(n, D, tau=tau):
# non-square matrix [n, D + 1]
x = np.linspace(-1, 1, n)
B = torch.as_tensor(np.polynomial.chebyshev.chebvander(x, D))
# weight for encoding the preference to low-frequency basis
exp_factor = torch.arange(D + 1).float()
basis_weight = tau**exp_factor
return B * basis_weight
def _rbf_matrix(n, sigma=1.0):
# square matrix [n, n]
x = torch.linspace(-1, 1, n)
B = torch.empty(n, n)
for d in range(n):
B[:, d] = torch.exp(-(x - x[d])**2 / sigma)
return B
def _haar_matrix(n, tau=tau):
# square matrix [n, n]
def _is_power_of_two(x):
return (x & (x - 1)) == 0
# allow only size n to be the power of 2
assert _is_power_of_two(n), "n is required to be the power of 2"
def _get_haar_matrix(n):
if n > 2:
h = _get_haar_matrix(n // 2)
else:
return torch.Tensor([[1, 1], [1, -1]])
def _kron(A, B):
return torch.einsum("ab,cd->acbd", A, B).view(
A.size(0) * B.size(0),
A.size(1) * B.size(1))
# calculate upper haar part
h_n = _kron(h, torch.Tensor([[1], [1]]))
# calculate lower haar part
h_i = torch.sqrt(torch.Tensor([n / 2])) * _kron(
torch.eye(len(h)), torch.Tensor([[1], [-1]]))
# combine both parts
h = torch.cat((h_n, h_i), dim=1)
return h
B = _get_haar_matrix(n) / torch.sqrt(torch.Tensor([n]))
# weight for encoding the preference to low-frequency basis
exp_factor = torch.ceil(torch.log2(torch.arange(n).float() + 1))
basis_weight = tau**exp_factor
return B * basis_weight
if basis_type == "poly":
B = _polyvander_matrix(output_size, input_size - 1)
elif basis_type == "cheb":
B = _chebvander_matrix(output_size, input_size - 1)
elif basis_type == "rbf":
assert input_size == output_size
B = _rbf_matrix(input_size, sigma=sigma)
elif basis_type == "haar":
assert input_size == output_size
B = _haar_matrix(input_size)
# assign the constructed transformation matrix and set it to be non-trainable
self._B.weight.requires_grad = False
self._B.weight.copy_(B)
def forward(self, inputs):
return self._B(inputs)
@property
def weight(self):
return self._B.weight
@gin.configurable
class FC(nn.Module):
def __init__(self,
input_size,
output_size,
activation=identity,
use_bias=True,
kernel_initializer=None,
kernel_init_gain=1.0,
bias_init_value=0.0):
"""A fully connected layer that's also responsible for activation and
customized weights initialization. An auto gain calculation might depend
on the activation following the linear layer. Suggest using this wrapper
module instead of ``nn.Linear`` if you really care about weight std after
init.
Args:
input_size (int): input size
output_size (int): output size
activation (torch.nn.functional):
use_bias (bool): whether use bias
kernel_initializer (Callable): initializer for the FC layer kernel.
If none is provided a ``variance_scaling_initializer`` with gain as
``kernel_init_gain`` will be used.
kernel_init_gain (float): a scaling factor (gain) applied to
the std of kernel init distribution. It will be ignored if
``kernel_initializer`` is not None.
bias_init_value (float): a constant
"""
# get the argument list with vals
self._kwargs = copy.deepcopy(locals())
self._kwargs.pop('self')
self._kwargs.pop('__class__')
super(FC, self).__init__()
self._activation = activation
self._linear = nn.Linear(input_size, output_size, bias=use_bias)
self._kernel_initializer = kernel_initializer
self._kernel_init_gain = kernel_init_gain
self._bias_init_value = bias_init_value
self._use_bias = use_bias
self.reset_parameters()
def reset_parameters(self):
if self._kernel_initializer is None:
variance_scaling_init(
self._linear.weight.data,
gain=self._kernel_init_gain,
nonlinearity=self._activation)
else:
self._kernel_initializer(self._linear.weight.data)
if self._use_bias:
nn.init.constant_(self._linear.bias.data, self._bias_init_value)
def forward(self, inputs):
return self._activation(self._linear(inputs))
@property
def weight(self):
return self._linear.weight
@property
def bias(self):
return self._linear.bias
def make_parallel(self, n):
"""Create a ``ParallelFC`` using ``n`` replicas of ``self``.
The initialized layer parameters will be different.
"""
return ParallelFC(n=n, **self._kwargs)
@gin.configurable
class ParallelFC(nn.Module):
def __init__(self,
input_size,
output_size,
n,
activation=identity,
use_bias=True,
kernel_initializer=None,
kernel_init_gain=1.0,
bias_init_value=0.0):
"""Parallel FC layer.
It is equivalent to ``n`` separate FC layers with the same
``input_size`` and ``output_size``.
Args:
input_size (int): input size
output_size (int): output size
n (int): n independent ``FC`` layers
activation (torch.nn.functional):
use_bias (bool): whether use bias
kernel_initializer (Callable): initializer for the FC layer kernel.
If none is provided a ``variance_scaling_initializer`` with gain
as ``kernel_init_gain`` will be used.
kernel_init_gain (float): a scaling factor (gain) applied to
the std of kernel init distribution. It will be ignored if
``kernel_initializer`` is not None.
bias_init_value (float): a constant
"""
super().__init__()
self._activation = activation
self._weight = nn.Parameter(torch.Tensor(n, output_size, input_size))
if use_bias:
self._bias = nn.Parameter(torch.Tensor(n, output_size))
else:
self._bias = None
for i in range(n):
if kernel_initializer is None:
variance_scaling_init(
self._weight.data[i],
gain=kernel_init_gain,
nonlinearity=self._activation)
else:
kernel_initializer(self._weight.data[i])
if use_bias:
nn.init.constant_(self._bias.data, bias_init_value)
def forward(self, inputs):
"""Forward
Args:
inputs (torch.Tensor): with shape ``[B, n, input_size]`` or ``[B, input_size]``
Returns:
torch.Tensor with shape ``[B, n, output_size]``
"""
n, k, l = self._weight.shape
if inputs.ndim == 2:
assert inputs.shape[1] == l, (
"inputs has wrong shape %s. Expecting (B, %d)" % (inputs.shape,
l))
inputs = inputs.unsqueeze(0).expand(n, *inputs.shape)
elif inputs.ndim == 3:
assert (inputs.shape[1] == n and inputs.shape[2] == l), (
"inputs has wrong shape %s. Expecting (B, %d, %d)" %
(inputs.shape, n, l))
inputs = inputs.transpose(0, 1) # [n, B, l]
else:
raise ValueError("Wrong inputs.ndim=%d" % inputs.ndim)
if self.bias is not None:
y = torch.baddbmm(
self._bias.unsqueeze(1), inputs,
self.weight.transpose(1, 2)) # [n, B, k]
else:
y = torch.bmm(inputs, self._weight.transpose(1, 2)) # [n, B, k]
y = y.transpose(0, 1) # [B, n, k]
return self._activation(y)
@property
def weight(self):
"""Get the weight Tensor.
Returns:
Tensor: with shape (n, output_size, input_size). ``weight[i]`` is
the weight for the i-th FC layer. ``weight[i]`` can be used for
``FC`` layer with the same ``input_size`` and ``output_size``
"""
return self._weight
@property
def bias(self):
"""Get the bias Tensor.
Returns:
Tensor: with shape (n, output_size). ``bias[i]`` is the bias for the
i-th FC layer. ``bias[i]`` can be used for ``FC`` layer with
the same ``input_size`` and ``output_size``
"""
return self._bias
@gin.configurable
class Conv2D(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
activation=torch.relu_,
strides=1,
padding=0,
use_bias=True,
kernel_initializer=None,
kernel_init_gain=1.0,
bias_init_value=0.0):
"""A 2D Conv layer that's also responsible for activation and customized
weights initialization. An auto gain calculation might depend on the
activation following the conv layer. Suggest using this wrapper module
instead of ``nn.Conv2d`` if you really care about weight std after init.
Args:
in_channels (int): channels of the input image
out_channels (int): channels of the output image
kernel_size (int or tuple):
activation (torch.nn.functional):
strides (int or tuple):
padding (int or tuple):
use_bias (bool):
kernel_initializer (Callable): initializer for the conv layer kernel.
If None is provided a variance_scaling_initializer with gain as
``kernel_init_gain`` will be used.
kernel_init_gain (float): a scaling factor (gain) applied to the
std of kernel init distribution. It will be ignored if
``kernel_initializer`` is not None.
bias_init_value (float): a constant
"""
super(Conv2D, self).__init__()
self._activation = activation
self._conv2d = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride=strides,
padding=padding,
bias=use_bias)
if kernel_initializer is None:
variance_scaling_init(
self._conv2d.weight.data,
gain=kernel_init_gain,
nonlinearity=self._activation)
else:
kernel_initializer(self._conv2d.weight.data)
if use_bias:
nn.init.constant_(self._conv2d.bias.data, bias_init_value)
def forward(self, img):
return self._activation(self._conv2d(img))
@property
def weight(self):
return self._conv2d.weight
@property
def bias(self):
return self._conv2d.bias
@gin.configurable
class ParallelConv2D(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
n,
activation=torch.relu_,
strides=1,
padding=0,
use_bias=True,
kernel_initializer=None,
kernel_init_gain=1.0,
bias_init_value=0.0):
"""A parallel 2D Conv layer that can be used to perform n independent
2D convolutions in parallel.
It is equivalent to ``n`` separate ``Conv2D`` layers with the same
``in_channels`` and ``out_channels``.
Args:
in_channels (int): channels of the input image
out_channels (int): channels of the output image
kernel_size (int or tuple):
n (int): n independent ``Conv2D`` layers
activation (torch.nn.functional):
strides (int or tuple):
padding (int or tuple):
use_bias (bool):
kernel_initializer (Callable): initializer for the conv layer kernel.
If None is provided a ``variance_scaling_initializer`` with gain
as ``kernel_init_gain`` will be used.
kernel_init_gain (float): a scaling factor (gain) applied to the
std of kernel init distribution. It will be ignored if
``kernel_initializer`` is not None.
bias_init_value (float): a constant
"""
super(ParallelConv2D, self).__init__()
self._activation = activation
self._n = n
self._in_channels = in_channels
self._out_channels = out_channels
self._kernel_size = common.tuplify2d(kernel_size)
self._conv2d = nn.Conv2d(
in_channels * n,
out_channels * n,
kernel_size,
groups=n,
stride=strides,
padding=padding,
bias=use_bias)
for i in range(n):
if kernel_initializer is None:
variance_scaling_init(
self._conv2d.weight.data[i * out_channels:(i + 1) *
out_channels],
gain=kernel_init_gain,
nonlinearity=self._activation)
else:
kernel_initializer(
self._conv2d.weight.data[i * out_channels:(i + 1) *
out_channels])
# [n*C', C, kernel_size, kernel_size]->[n, C', C, kernel_size, kernel_size]
self._weight = self._conv2d.weight.view(
self._n, self._out_channels, self._in_channels,
self._kernel_size[0], self._kernel_size[1])
if use_bias:
nn.init.constant_(self._conv2d.bias.data, bias_init_value)
# [n*C']->[n, C']
self._bias = self._conv2d.bias.view(self._n, self._out_channels)
else:
self._bias = None
def forward(self, img):
"""Forward
Args:
img (torch.Tensor): with shape ``[B, C, H, W]``
or ``[B, n, C, H, W]``
where the meaning of the symbols are:
- ``B``: batch size
- ``n``: number of replicas
- ``C``: number of channels
- ``H``: image height
- ``W``: image width.
When the shape of img is ``[B, C, H, W]``, all the n 2D Conv
operations will take img as the same shared input.
When the shape of img is ``[B, n, C, H, W]``, each 2D Conv operator
will have its own input data by slicing img.
Returns:
torch.Tensor with shape ``[B, n, C', H', W']``
where the meaning of the symbols are:
- ``B``: batch
- ``n``: number of replicas
- ``C'``: number of output channels
- ``H'``: output height
- ``W'``: output width
"""
if img.ndim == 4:
# the shared input case
assert img.shape[1] == self._in_channels, (
"Input img has wrong shape %s. Expecting (B, %d, H, W)" %
(img.shape, self._in_channels))
img = img.unsqueeze(1).expand(img.shape[0], self._n,
*img.shape[1:])
elif img.ndim == 5:
# the non-shared case
assert (
img.shape[1] == self._n
and img.shape[2] == self._in_channels), (
"Input img has wrong shape %s. Expecting (B, %d, %d, H, W)"
% (img.shape, self._n, self._in_channels))
else:
raise ValueError("Wrong img.ndim=%d" % img.ndim)
# merge replica and channels
img = img.reshape(img.shape[0], img.shape[1] * img.shape[2],
*img.shape[3:])
res = self._activation(self._conv2d(img))
# reshape back: [B, n*C', H', W'] -> [B, n, C', H', W']
res = res.reshape(res.shape[0], self._n, self._out_channels,
*res.shape[2:])
return res
@property
def weight(self):
return self._weight
@property
def bias(self):
return self._bias
@gin.configurable
class ConvTranspose2D(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
activation=torch.relu_,
strides=1,
padding=0,
use_bias=True,
kernel_initializer=None,
kernel_init_gain=1.0,
bias_init_value=0.0):
"""A 2D ConvTranspose layer that's also responsible for activation and
customized weights initialization. An auto gain calculation might depend
on the activation following the conv layer. Suggest using this wrapper
module instead of ``nn.ConvTranspose2d`` if you really care about weight std
after init.
Args:
in_channels (int): channels of the input image
out_channels (int): channels of the output image
kernel_size (int or tuple):
activation (torch.nn.functional):
strides (int or tuple):
padding (int or tuple):
use_bias (bool):
kernel_initializer (Callable): initializer for the conv_trans layer.
If None is provided a variance_scaling_initializer with gain as
``kernel_init_gain`` will be used.
kernel_init_gain (float): a scaling factor (gain) applied to the
std of kernel init distribution. It will be ignored if
``kernel_initializer`` is not None.
bias_init_value (float): a constant
"""
super(ConvTranspose2D, self).__init__()
self._activation = activation
self._conv_trans2d = nn.ConvTranspose2d(
in_channels,
out_channels,
kernel_size,
stride=strides,
padding=padding,
bias=use_bias)
if kernel_initializer is None:
variance_scaling_init(
self._conv_trans2d.weight.data,
gain=kernel_init_gain,
nonlinearity=self._activation,
transposed=True)
else:
kernel_initializer(self._conv_trans2d.weight.data)
if use_bias:
nn.init.constant_(self._conv_trans2d.bias.data, bias_init_value)
def forward(self, img):
return self._activation(self._conv_trans2d(img))
@property
def weight(self):
return self._conv_trans2d.weight
@property
def bias(self):
return self._conv_trans2d.bias
@gin.configurable
class ParallelConvTranspose2D(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
n,
activation=torch.relu_,
strides=1,
padding=0,
use_bias=True,
kernel_initializer=None,
kernel_init_gain=1.0,
bias_init_value=0.0):
"""A parallel ConvTranspose2D layer that can be used to perform n
independent 2D transposed convolutions in parallel.
Args:
in_channels (int): channels of the input image
out_channels (int): channels of the output image
kernel_size (int or tuple):
n (int): n independent ``ConvTranspose2D`` layers
activation (torch.nn.functional):
strides (int or tuple):
padding (int or tuple):
use_bias (bool):
kernel_initializer (Callable): initializer for the conv_trans layer.
If None is provided a ``variance_scaling_initializer`` with gain
as ``kernel_init_gain`` will be used.
kernel_init_gain (float): a scaling factor (gain) applied to the
std of kernel init distribution. It will be ignored if
``kernel_initializer`` is not None.
bias_init_value (float): a constant
"""
super(ParallelConvTranspose2D, self).__init__()
self._activation = activation
self._n = n
self._in_channels = in_channels
self._out_channels = out_channels
self._kernel_size = common.tuplify2d(kernel_size)
self._conv_trans2d = nn.ConvTranspose2d(
in_channels * n,
out_channels * n,
kernel_size,
groups=n,
stride=strides,
padding=padding,
bias=use_bias)
for i in range(n):
if kernel_initializer is None:
variance_scaling_init(
self._conv_trans2d.weight.data[i * in_channels:(i + 1) *
in_channels],
gain=kernel_init_gain,
nonlinearity=self._activation)
else:
kernel_initializer(
self._conv_trans2d.weight.data[i * in_channels:(i + 1) *
in_channels])
# [n*C, C', kernel_size, kernel_size]->[n, C, C', kernel_size, kernel_size]
self._weight = self._conv_trans2d.weight.view(
self._n, self._in_channels, self._out_channels,
self._kernel_size[0], self._kernel_size[1])
if use_bias:
nn.init.constant_(self._conv_trans2d.bias.data, bias_init_value)
# [n*C]->[n, C]
self._bias = self._conv_trans2d.bias.view(self._n,
self._out_channels)
else:
self._bias = None
def forward(self, img):
"""Forward
Args:
img (torch.Tensor): with shape ``[B, C, H, W]``
or ``[B, n, C, H, W]``
where the meaning of the symbols are:
- ``B``: batch size
- ``n``: number of replicas
- ``C``: number of channels
- ``H``: image height
- ``W``: image width.
When the shape of img is ``[B, C, H, W]``, all the n transposed 2D
Conv operations will take img as the same shared input.
When the shape of img is ``[B, n, C, H, W]``, each transposed 2D
Conv operator will have its own input data by slicing img.
Returns:
torch.Tensor with shape ``[B, n, C', H', W']``
where the meaning of the symbols are:
- ``B``: batch
- ``n``: number of replicas
- ``C'``: number of output channels
- ``H'``: output height
- ``W'``: output width
"""
if img.ndim == 4:
# the shared input case
assert img.shape[1] == self._in_channels, (
"Input img has wrong shape %s. Expecting (B, %d, H, W)" %
(img.shape, self._in_channels))
img = img.unsqueeze(1).expand(img.shape[0], self._n,
*img.shape[1:])
elif img.ndim == 5:
# the non-shared case
assert (
img.shape[1] == self._n
and img.shape[2] == self._in_channels), (
"Input img has wrong shape %s. Expecting (B, %d, %d, H, W)"
% (img.shape, self._n, self._in_channels))
else:
raise ValueError("Wrong img.ndim=%d" % img.ndim)
# merge replica and channels
img = img.reshape(img.shape[0], img.shape[1] * img.shape[2],
*img.shape[3:])
res = self._activation(self._conv_trans2d(img))
# reshape back: [B, n*C', H', W'] -> [B, n, C', H', W']
res = res.reshape(res.shape[0], self._n, self._out_channels,
res.shape[2], res.shape[3])
return res
@property
def weight(self):
return self._weight
@property
def bias(self):
return self._bias
class Reshape(nn.Module):
def __init__(self, shape):
"""A layer for reshape the tensor.
The result of this layer is a tensor reshaped to ``(B, *shape)`` where
``B`` is ``x.shape[0]``
Args:
shape (tuple): desired shape not including the batch dimension.
"""
super().__init__()
self._shape = shape
def forward(self, x):
return x.reshape(x.shape[0], *self._shape)
def _tuplify2d(x):
if isinstance(x, tuple):
assert len(x) == 2
return x
return (x, x)
def _conv_transpose_2d(in_channels,
out_channels,
kernel_size,
stride=1,
padding=0):
# need output_padding so that output_size is stride * input_size
# See https://pytorch.org/docs/stable/nn.html#torch.nn.ConvTranspose2d
output_padding = stride + 2 * padding - kernel_size
return nn.ConvTranspose2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
output_padding=output_padding)
@gin.configurable(whitelist=['v1_5', 'with_batch_normalization'])
class BottleneckBlock(nn.Module):
"""Bottleneck block for ResNet.
We allow two slightly different architectures:
* v1: Placing the stride at the first 1x1 convolution as described in the
original ResNet paper `Deep residual learning for image recognition
<https://arxiv.org/abs/1512.03385>`_.
* v1.5: Placing the stride for downsampling at 3x3 convolution. This variant
is also known as ResNet V1.5 and improves accuracy according to
`<https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch>`_.
"""
def __init__(self,
in_channels,
kernel_size,
filters,
stride,
transpose=False,
v1_5=True,
with_batch_normalization=True):
"""
Args:
kernel_size (int): the kernel size of middle layer at main path
filters (int): the filters of 3 layer at main path
stride (int): stride for this block.
transpose (bool): a bool indicate using ``Conv2D`` or ``Conv2DTranspose``.
If two BottleneckBlock layers ``L`` and ``LT`` are constructed
with the same arguments except ``transpose``, it is gauranteed that
``LT(L(x)).shape == x.shape`` if ``x.shape[-2:]`` can be divided
by ``stride``.
v1_5 (bool): whether to use the ResNet V1.5 structure
with_batch_normalization (bool): whether to include batch normalization.
Note that standard ResNet uses batch normalization.
Return:
Output tensor for the block
"""
super().__init__()
filters1, filters2, filters3 = filters
conv_fn = _conv_transpose_2d if transpose else nn.Conv2d
padding = (kernel_size - 1) // 2
if v1_5:
a = conv_fn(in_channels, filters1, 1)
b = conv_fn(filters1, filters2, kernel_size, stride, padding)
else:
a = conv_fn(in_channels, filters1, 1, stride)
b = conv_fn(filters1, filters2, kernel_size, 1, padding)
nn.init.kaiming_normal_(a.weight.data)
nn.init.zeros_(a.bias.data)
nn.init.kaiming_normal_(b.weight.data)
nn.init.zeros_(b.bias.data)
c = conv_fn(filters2, filters3, 1)
nn.init.kaiming_normal_(c.weight.data)
nn.init.zeros_(c.bias.data)
s = conv_fn(in_channels, filters3, 1, stride)
nn.init.kaiming_normal_(s.weight.data)
nn.init.zeros_(s.bias.data)
relu = nn.ReLU(inplace=True)
if with_batch_normalization:
core_layers = nn.Sequential(a, nn.BatchNorm2d(filters1), relu, b,
nn.BatchNorm2d(filters2), relu, c,
nn.BatchNorm2d(filters3))
shortcut_layers = nn.Sequential(s, nn.BatchNorm2d(filters3))
else:
core_layers = nn.Sequential(a, relu, b, relu, c)
shortcut_layers = s
self._core_layers = core_layers
self._shortcut_layers = shortcut_layers
def forward(self, inputs):
core = self._core_layers(inputs)
shortcut = self._shortcut_layers(inputs)
return torch.relu_(core + shortcut)
def calc_output_shape(self, input_shape):
x = torch.zeros(1, *input_shape)
y = self.forward(x)
return y.shape[1:]
```
#### File: alf/networks/encoding_networks.py
```python
import abc
import copy
import functools
import gin
import numpy as np
import torch
import torch.nn as nn
from .network import Network
from .preprocessor_networks import PreprocessorNetwork
import alf
import alf.layers as layers
from alf.initializers import variance_scaling_init
from alf.tensor_specs import TensorSpec
from alf.utils import common, math_ops
@gin.configurable
class ImageEncodingNetwork(Network):
"""
A general template class for creating convolutional encoding networks.
"""
def __init__(self,
input_channels,
input_size,
conv_layer_params,
same_padding=False,
activation=torch.relu_,
kernel_initializer=None,
flatten_output=False,
name="ImageEncodingNetwork"):
"""
Initialize the layers for encoding an image into a latent vector.
Currently there seems no need for this class to handle nested inputs;
If necessary, extend the argument list to support it in the future.
How to calculate the output size:
`<https://pytorch.org/docs/stable/nn.html#torch.nn.Conv2d>`_::
H = (H1 - HF + 2P) // strides + 1
where H = output size, H1 = input size, HF = size of kernel, P = padding.
Regarding padding: in the previous TF version, we have two padding modes:
``valid`` and ``same``. For the former, we always have no padding (P=0); for
the latter, it's also called "half padding" (P=(HF-1)//2 when strides=1
and HF is an odd number the output has the same size with the input.
Currently, PyTorch don't support different left and right paddings and
P is always (HF-1)//2. So if HF is an even number, the output size will
decrease by 1 when strides=1).
Args:
input_channels (int): number of channels in the input image
input_size (int or tuple): the input image size (height, width)
conv_layer_params (tuppe[tuple]): a non-empty tuple of
tuple (num_filters, kernel_size, strides, padding), where
padding is optional
same_padding (bool): similar to TF's conv2d ``same`` padding mode. If
True, the user provided paddings in `conv_layer_params` will be
replaced by automatically calculated ones; if False, it
corresponds to TF's ``valid`` padding mode (the user can still
provide custom paddings though)
activation (torch.nn.functional): activation for all the layers
kernel_initializer (Callable): initializer for all the layers.
flatten_output (bool): If False, the output will be an image
structure of shape ``BxCxHxW``; otherwise the output will be
flattened into a feature of shape ``BxN``.
"""
input_size = common.tuplify2d(input_size)
super().__init__(
input_tensor_spec=TensorSpec((input_channels, ) + input_size),
name=name)
assert isinstance(conv_layer_params, tuple)
assert len(conv_layer_params) > 0
self._flatten_output = flatten_output
self._conv_layer_params = conv_layer_params
self._conv_layers = nn.ModuleList()
for paras in conv_layer_params:
filters, kernel_size, strides = paras[:3]
padding = paras[3] if len(paras) > 3 else 0
if same_padding: # overwrite paddings
kernel_size = common.tuplify2d(kernel_size)
padding = ((kernel_size[0] - 1) // 2,
(kernel_size[1] - 1) // 2)
self._conv_layers.append(
layers.Conv2D(
input_channels,
filters,
kernel_size,
activation=activation,
kernel_initializer=kernel_initializer,
strides=strides,
padding=padding))
input_channels = filters
def forward(self, inputs, state=()):
"""The empty state just keeps the interface same with other networks."""
z = inputs
for conv_l in self._conv_layers:
z = conv_l(z)
if self._flatten_output:
z = torch.reshape(z, (z.size()[0], -1))
return z, state
@gin.configurable
class ParallelImageEncodingNetwork(Network):
"""
A Parallel Image Encoding Network that can be used to perform n
independent image encodings in parallel.
"""
def __init__(self,
input_channels,
input_size,
n,
conv_layer_params,
same_padding=False,
activation=torch.relu_,
kernel_initializer=None,
flatten_output=False,
name="ParallelImageEncodingNetwork"):
"""
Args:
input_channels (int): number of channels in the input image
input_size (int or tuple): the input image size (height, width)
n (int): number of parallel networks
conv_layer_params (tuppe[tuple]): a non-empty tuple of
tuple (num_filters, kernel_size, strides, padding), where
padding is optional
same_padding (bool): similar to TF's conv2d ``same`` padding mode. If
True, the user provided paddings in `conv_layer_params` will be
replaced by automatically calculated ones; if False, it
corresponds to TF's ``valid`` padding mode (the user can still
provide custom paddings though)
activation (torch.nn.functional): activation for all the layers
kernel_initializer (Callable): initializer for all the layers.
flatten_output (bool): If False, the output will be an image
structure of shape ``(B, n, C, H, W)``; otherwise the output
will be flattened into a feature of shape ``(B, n, C*H*W)``.
"""
input_size = common.tuplify2d(input_size)
super().__init__(
input_tensor_spec=TensorSpec((input_channels, ) + input_size),
name=name)
assert isinstance(conv_layer_params, tuple)
assert len(conv_layer_params) > 0
self._flatten_output = flatten_output
self._conv_layer_params = conv_layer_params
self._conv_layers = nn.ModuleList()
for paras in conv_layer_params:
filters, kernel_size, strides = paras[:3]
padding = paras[3] if len(paras) > 3 else 0
if same_padding: # overwrite paddings
kernel_size = common.tuplify2d(kernel_size)
padding = ((kernel_size[0] - 1) // 2,
(kernel_size[1] - 1) // 2)
self._conv_layers.append(
layers.ParallelConv2D(
input_channels,
filters,
kernel_size,
n,
activation=activation,
kernel_initializer=kernel_initializer,
strides=strides,
padding=padding))
input_channels = filters
def forward(self, inputs, state=()):
"""Forward
Args:
inputs (torch.Tensor): with shape ``[B, C, H, W]``
or ``[B, n, C, H, W]``
where the meaning of the symbols are:
- ``B``: batch size
- ``n``: number of replicas
- ``C``: number of channels
- ``H``: image height
- ``W``: image width.
When the shape of inputs is ``[B, C, H, W]``, the same input is
shared among all the n replicas.
When the shape of img is ``[B, n, C, H, W]``, each replica
will have its own data by slicing inputs.
state: an empty state just keeps the interface same with other
networks.
Returns:
- a tensor of shape ``(B, n, C, H, W)`` if ``flatten_output=False``
``(B, n, C*H*W)`` if ``flatten_output=True``
- the empty state just to keep the interface same with other networks
"""
z = inputs
for conv_l in self._conv_layers:
z = conv_l(z)
if self._flatten_output:
z = torch.reshape(z, (*z.size()[:2], -1))
return z, state
@gin.configurable
class ImageDecodingNetwork(Network):
"""
A general template class for creating transposed convolutional decoding networks.
"""
def __init__(self,
input_size,
transconv_layer_params,
start_decoding_size,
start_decoding_channels,
same_padding=False,
preprocess_fc_layer_params=None,
activation=torch.relu_,
kernel_initializer=None,
output_activation=torch.tanh,
name="ImageDecodingNetwork"):
"""
Initialize the layers for decoding a latent vector into an image.
Currently there seems no need for this class to handle nested inputs;
If necessary, extend the argument list to support it in the future.
How to calculate the output size:
`<https://pytorch.org/docs/stable/nn.html#torch.nn.ConvTranspose2d>`_::
H = (H1-1) * strides + HF - 2P + OP
where H = output size, H1 = input size, HF = size of kernel, P = padding,
OP = output_padding (currently hardcoded to be 0 for this class).
Regarding padding: in the previous TF version, we have two padding modes:
``valid`` and ``same``. For the former, we always have no padding (P=0); for
the latter, it's also called ``half padding`` (P=(HF-1)//2 when strides=1
and HF is an odd number the output has the same size with the input.
Currently, PyTorch doesn't support different left and right paddings and
P is always (HF-1)//2. So if HF is an even number, the output size will
increaseby 1 when strides=1).
Args:
input_size (int): the size of the input latent vector
transconv_layer_params (tuple[tuple]): a non-empty
tuple of tuple (num_filters, kernel_size, strides, padding),
where ``padding`` is optional.
start_decoding_size (int or tuple): the initial height and width
we'd like to have for the feature map
start_decoding_channels (int): the initial number of channels we'd
like to have for the feature map. Note that we always first
project an input latent vector into a vector of an appropriate
length so that it can be reshaped into (``start_decoding_channels``,
``start_decoding_height``, ``start_decoding_width``).
same_padding (bool): similar to TF's conv2d ``same`` padding mode. If
True, the user provided paddings in ``transconv_layer_params`` will
be replaced by automatically calculated ones; if False, it
corresponds to TF's ``valid`` padding mode (the user can still
provide custom paddings though).
preprocess_fc_layer_params (tuple[int]): a tuple of fc
layer units. These fc layers are used for preprocessing the
latent vector before transposed convolutions.
activation (nn.functional): activation for hidden layers
kernel_initializer (Callable): initializer for all the layers.
output_activation (nn.functional): activation for the output layer.
Usually our image inputs are normalized to [0, 1] or [-1, 1],
so this function should be ``torch.sigmoid`` or
``torch.tanh``.
name (str):
"""
super().__init__(
input_tensor_spec=TensorSpec((input_size, )), name=name)
assert isinstance(transconv_layer_params, tuple)
assert len(transconv_layer_params) > 0
self._preprocess_fc_layers = nn.ModuleList()
if preprocess_fc_layer_params is not None:
for size in preprocess_fc_layer_params:
self._preprocess_fc_layers.append(
layers.FC(
input_size,
size,
activation=activation,
kernel_initializer=kernel_initializer))
input_size = size
start_decoding_size = common.tuplify2d(start_decoding_size)
# pytorch assumes "channels_first" !
self._start_decoding_shape = [
start_decoding_channels, start_decoding_size[0],
start_decoding_size[1]
]
self._preprocess_fc_layers.append(
layers.FC(
input_size,
np.prod(self._start_decoding_shape),
activation=activation,
kernel_initializer=kernel_initializer))
self._transconv_layer_params = transconv_layer_params
self._transconv_layers = nn.ModuleList()
in_channels = start_decoding_channels
for i, paras in enumerate(transconv_layer_params):
filters, kernel_size, strides = paras[:3]
padding = paras[3] if len(paras) > 3 else 0
if same_padding: # overwrite paddings
kernel_size = common.tuplify2d(kernel_size)
padding = ((kernel_size[0] - 1) // 2,
(kernel_size[1] - 1) // 2)
act = activation
if i == len(transconv_layer_params) - 1:
act = output_activation
self._transconv_layers.append(
layers.ConvTranspose2D(
in_channels,
filters,
kernel_size,
activation=act,
kernel_initializer=kernel_initializer,
strides=strides,
padding=padding))
in_channels = filters
def forward(self, inputs, state=()):
"""Returns an image of shape ``(B,C,H,W)``. The empty state just keeps the
interface same with other networks.
"""
z = inputs
for fc_l in self._preprocess_fc_layers:
z = fc_l(z)
z = torch.reshape(z, [-1] + self._start_decoding_shape)
for deconv_l in self._transconv_layers:
z = deconv_l(z)
return z, state
@gin.configurable
class ParallelImageDecodingNetwork(Network):
"""
A Parallel Image Decoding Network that can be used to perform n
independent image decodings in parallel.
"""
def __init__(self,
input_size,
n,
transconv_layer_params,
start_decoding_size,
start_decoding_channels,
same_padding=False,
preprocess_fc_layer_params=None,
activation=torch.relu_,
kernel_initializer=None,
output_activation=torch.tanh,
name="ImageDecodingNetwork"):
"""
Args:
input_size (int): the size of the input latent vector
n (int): number of parallel networks
transconv_layer_params (tuple[tuple]): a non-empty
tuple of tuple (num_filters, kernel_size, strides, padding),
where ``padding`` is optional.
start_decoding_size (int or tuple): the initial height and width
we'd like to have for the feature map
start_decoding_channels (int): the initial number of channels we'd
like to have for the feature map. Note that we always first
project an input latent vector into a vector of an appropriate
length so that it can be reshaped into (``start_decoding_channels``,
``start_decoding_height``, ``start_decoding_width``).
same_padding (bool): similar to TF's conv2d ``same`` padding mode. If
True, the user provided paddings in ``transconv_layer_params`` will
be replaced by automatically calculated ones; if False, it
corresponds to TF's ``valid`` padding mode (the user can still
provide custom paddings though).
preprocess_fc_layer_params (tuple[int]): a tuple of fc
layer units. These fc layers are used for preprocessing the
latent vector before transposed convolutions.
activation (nn.functional): activation for hidden layers
kernel_initializer (Callable): initializer for all the layers.
output_activation (nn.functional): activation for the output layer.
Usually our image inputs are normalized to [0, 1] or [-1, 1],
so this function should be ``torch.sigmoid`` or
``torch.tanh``.
name (str):
"""
super().__init__(
input_tensor_spec=TensorSpec((input_size, )), name=name)
assert isinstance(transconv_layer_params, tuple)
assert len(transconv_layer_params) > 0
self._preprocess_fc_layers = nn.ModuleList()
if preprocess_fc_layer_params is not None:
for size in preprocess_fc_layer_params:
self._preprocess_fc_layers.append(
layers.ParallelFC(
input_size,
size,
n,
activation=activation,
kernel_initializer=kernel_initializer))
input_size = size
start_decoding_size = common.tuplify2d(start_decoding_size)
# pytorch assumes "channels_first" !
self._start_decoding_shape = [
start_decoding_channels, start_decoding_size[0],
start_decoding_size[1]
]
self._preprocess_fc_layers.append(
layers.ParallelFC(
input_size,
np.prod(self._start_decoding_shape),
n,
activation=activation,
kernel_initializer=kernel_initializer))
self._transconv_layer_params = transconv_layer_params
self._transconv_layers = nn.ModuleList()
in_channels = start_decoding_channels
for i, paras in enumerate(transconv_layer_params):
filters, kernel_size, strides = paras[:3]
padding = paras[3] if len(paras) > 3 else 0
if same_padding: # overwrite paddings
kernel_size = common.tuplify2d(kernel_size)
padding = ((kernel_size[0] - 1) // 2,
(kernel_size[1] - 1) // 2)
act = activation
if i == len(transconv_layer_params) - 1:
act = output_activation
self._transconv_layers.append(
layers.ParallelConvTranspose2D(
in_channels,
filters,
kernel_size,
n,
activation=act,
kernel_initializer=kernel_initializer,
strides=strides,
padding=padding))
in_channels = filters
self._n = n
def forward(self, inputs, state=()):
"""Forward
Args:
inputs (torch.Tensor): with shape ``[B, N]``
or ``[B, n, N]``
where the meaning of the symbols are:
- ``B``: batch size
- ``n``: number of replicas
- ``N``: dimension of the feature vector to be decoded.
When the shape of inputs is ``[B, N]``, the same input is
shared among all the n replicas.
When the shape of img is ``[B, n, N]``, each replica
will have its own data by slicing inputs.
state: an empty state just keeps the interface same with other
networks.
Returns:
- an image of shape ``(B, n, C, H, W)``
- the empty state just to keep the interface same with other networks
"""
z = inputs
for fc_l in self._preprocess_fc_layers:
z = fc_l(z)
z = torch.reshape(z, [-1, self._n] + self._start_decoding_shape)
for deconv_l in self._transconv_layers:
z = deconv_l(z)
return z, state
@gin.configurable
class EncodingNetwork(PreprocessorNetwork):
"""Feed Forward network with CNN and FC layers which allows the last layer
to have different settings from the other layers.
"""
def __init__(self,
input_tensor_spec,
input_preprocessors=None,
preprocessing_combiner=None,
conv_layer_params=None,
fc_layer_params=None,
activation=torch.relu_,
kernel_initializer=None,
last_layer_size=None,
last_activation=None,
last_kernel_initializer=None,
name="EncodingNetwork"):
"""
Args:
input_tensor_spec (nested TensorSpec): the (nested) tensor spec of
the input. If nested, then ``preprocessing_combiner`` must not be
None.
input_preprocessors (nested InputPreprocessor): a nest of
``InputPreprocessor``, each of which will be applied to the
corresponding input. If not None, then it must have the same
structure with ``input_tensor_spec``. This arg is helpful if you
want to have separate preprocessings for different inputs by
configuring a gin file without changing the code. For example,
embedding a discrete input before concatenating it to another
continuous vector.
preprocessing_combiner (NestCombiner): preprocessing called on
complex inputs. Note that this combiner must also accept
``input_tensor_spec`` as the input to compute the processed
tensor spec. For example, see ``alf.nest.utils.NestConcat``. This
arg is helpful if you want to combine inputs by configuring a
gin file without changing the code.
conv_layer_params (tuple[tuple]): a tuple of tuples where each
tuple takes a format ``(filters, kernel_size, strides, padding)``,
where ``padding`` is optional.
fc_layer_params (tuple[int]): a tuple of integers
representing FC layer sizes.
activation (nn.functional): activation used for all the layers but
the last layer.
kernel_initializer (Callable): initializer for all the layers but
the last layer. If None, a variance_scaling_initializer will be
used.
last_layer_size (int): an optional size of an additional layer
appended at the very end. Note that if ``last_activation`` is
specified, ``last_layer_size`` has to be specified explicitly.
last_activation (nn.functional): activation function of the
additional layer specified by ``last_layer_size``. Note that if
``last_layer_size`` is not None, ``last_activation`` has to be
specified explicitly.
last_kernel_initializer (Callable): initializer for the the
additional layer specified by ``last_layer_size``.
If None, it will be the same with ``kernel_initializer``. If
``last_layer_size`` is None, ``last_kernel_initializer`` will
not be used.
name (str):
"""
super().__init__(
input_tensor_spec,
input_preprocessors,
preprocessing_combiner,
name=name)
if kernel_initializer is None:
kernel_initializer = functools.partial(
variance_scaling_init,
mode='fan_in',
distribution='truncated_normal',
nonlinearity=activation)
self._img_encoding_net = None
if conv_layer_params:
assert isinstance(conv_layer_params, tuple), \
"The input params {} should be tuple".format(conv_layer_params)
assert len(self._processed_input_tensor_spec.shape) == 3, \
"The input shape {} should be like (C,H,W)!".format(
self._processed_input_tensor_spec.shape)
input_channels, height, width = self._processed_input_tensor_spec.shape
self._img_encoding_net = ImageEncodingNetwork(
input_channels, (height, width),
conv_layer_params,
activation=activation,
kernel_initializer=kernel_initializer,
flatten_output=True)
input_size = self._img_encoding_net.output_spec.shape[0]
else:
assert self._processed_input_tensor_spec.ndim == 1, \
"The input shape {} should be like (N,)!".format(
self._processed_input_tensor_spec.shape)
input_size = self._processed_input_tensor_spec.shape[0]
self._fc_layers = nn.ModuleList()
if fc_layer_params is None:
fc_layer_params = []
else:
assert isinstance(fc_layer_params, tuple)
fc_layer_params = list(fc_layer_params)
for size in fc_layer_params:
self._fc_layers.append(
layers.FC(
input_size,
size,
activation=activation,
kernel_initializer=kernel_initializer))
input_size = size
if last_layer_size is not None or last_activation is not None:
assert last_layer_size is not None and last_activation is not None, \
"Both last_layer_size and last_activation need to be specified!"
if last_kernel_initializer is None:
common.warning_once(
"last_kernel_initializer is not specified "
"for the last layer of size {}.".format(last_layer_size))
last_kernel_initializer = kernel_initializer
self._fc_layers.append(
layers.FC(
input_size,
last_layer_size,
activation=last_activation,
kernel_initializer=last_kernel_initializer))
input_size = last_layer_size
self._output_spec = TensorSpec(
(input_size, ), dtype=self._processed_input_tensor_spec.dtype)
def forward(self, inputs, state=()):
"""
Args:
inputs (nested Tensor):
"""
# call super to preprocess inputs
z, state = super().forward(inputs, state)
if self._img_encoding_net is not None:
z, _ = self._img_encoding_net(z)
for fc in self._fc_layers:
z = fc(z)
return z, state
def make_parallel(self, n):
"""Make a parllelized version of this network.
A parallel network has ``n`` copies of network with the same structure but
different independently initialized parameters.
For supported network structures (currently, networks with only FC layers)
it will create ``ParallelCriticNetwork`` (PCN). Otherwise, it will
create a ``NaiveParallelNetwork`` (NPN). However, PCN is not always
faster than NPN. Especially for small ``n`` and large batch_size. See
``test_make_parallel()`` in critic_networks_test.py for detail.
Returns:
Network: A paralle network
"""
if (self.saved_args.get('input_preprocessors') is None and
(self._preprocessing_combiner == math_ops.identity or isinstance(
self._preprocessing_combiner,
(alf.nest.utils.NestSum, alf.nest.utils.NestConcat)))):
parallel_enc_net_args = dict(**self.saved_args)
parallel_enc_net_args.update(n=n, name="parallel_" + self.name)
return ParallelEncodingNetwork(**parallel_enc_net_args)
else:
return super().make_parallel(n)
@gin.configurable
class ParallelEncodingNetwork(PreprocessorNetwork):
"""Parallel feed-forward network with FC layers which allows the last layer
to have different settings from the other layers.
"""
def __init__(self,
input_tensor_spec,
n,
input_preprocessors=None,
preprocessing_combiner=None,
conv_layer_params=None,
fc_layer_params=None,
activation=torch.relu_,
kernel_initializer=None,
last_layer_size=None,
last_activation=None,
last_kernel_initializer=None,
name="ParallelEncodingNetwork"):
"""
Args:
input_tensor_spec (nested TensorSpec): the (nested) tensor spec of
the input. If nested, then ``preprocessing_combiner`` must not be
None.
n (int): number of parallel networks
input_preprocessors (None): must be ``None``.
preprocessing_combiner (NestCombiner): preprocessing called on
complex inputs. Note that this combiner must also accept
``input_tensor_spec`` as the input to compute the processed
tensor spec. For example, see ``alf.nest.utils.NestConcat``. This
arg is helpful if you want to combine inputs by configuring a
gin file without changing the code.
conv_layer_params (tuple[tuple]): a tuple of tuples where each
tuple takes a format ``(filters, kernel_size, strides, padding)``,
where ``padding`` is optional.
fc_layer_params (tuple[int]): a tuple of integers
representing FC layer sizes.
activation (nn.functional): activation used for all the layers but
the last layer.
kernel_initializer (Callable): initializer for all the layers but
the last layer. If None, a variance_scaling_initializer will be
used.
last_layer_size (int): an optional size of an additional layer
appended at the very end. Note that if ``last_activation`` is
specified, ``last_layer_size`` has to be specified explicitly.
last_activation (nn.functional): activation function of the
additional layer specified by ``last_layer_size``. Note that if
``last_layer_size`` is not None, ``last_activation`` has to be
specified explicitly.
last_kernel_initializer (Callable): initializer for the the
additional layer specified by ``last_layer_size``.
If None, it will be the same with ``kernel_initializer``. If
``last_layer_size`` is None, ``last_kernel_initializer`` will
not be used.
name (str):
"""
super().__init__(
input_tensor_spec,
input_preprocessors=None,
preprocessing_combiner=preprocessing_combiner,
name=name)
# TODO: handle input_preprocessors
assert input_preprocessors is None
if kernel_initializer is None:
kernel_initializer = functools.partial(
variance_scaling_init,
mode='fan_in',
distribution='truncated_normal',
nonlinearity=activation)
self._img_encoding_net = None
if conv_layer_params:
assert isinstance(conv_layer_params, tuple), \
"The input params {} should be tuple".format(conv_layer_params)
assert len(self._processed_input_tensor_spec.shape) == 3, \
"The input shape {} should be like (C,H,W)!".format(
self._processed_input_tensor_spec.shape)
input_channels, height, width = self._processed_input_tensor_spec.shape
self._img_encoding_net = ParallelImageEncodingNetwork(
input_channels, (height, width),
n,
conv_layer_params,
activation=activation,
kernel_initializer=kernel_initializer,
flatten_output=True)
input_size = self._img_encoding_net.output_spec.shape[1]
else:
assert self._processed_input_tensor_spec.ndim == 1, \
"The input shape {} should be like (N,)!".format(
self._processed_input_tensor_spec.shape)
input_size = self._processed_input_tensor_spec.shape[0]
self._fc_layers = nn.ModuleList()
if fc_layer_params is None:
fc_layer_params = []
else:
assert isinstance(fc_layer_params, tuple)
fc_layer_params = list(fc_layer_params)
for size in fc_layer_params:
self._fc_layers.append(
layers.ParallelFC(
input_size,
size,
n,
activation=activation,
kernel_initializer=kernel_initializer))
input_size = size
if last_layer_size is not None or last_activation is not None:
assert last_layer_size is not None and last_activation is not None, \
"Both last_layer_size and last_activation need to be specified!"
if last_kernel_initializer is None:
common.warning_once(
"last_kernel_initializer is not specified "
"for the last layer of size {}.".format(last_layer_size))
last_kernel_initializer = kernel_initializer
self._fc_layers.append(
layers.ParallelFC(
input_size,
last_layer_size,
n,
activation=last_activation,
kernel_initializer=last_kernel_initializer))
input_size = last_layer_size
self._output_spec = TensorSpec(
(n, input_size), dtype=self._processed_input_tensor_spec.dtype)
self._n = n
def forward(self, inputs, state=()):
"""
Args:
inputs (nested Tensor):
"""
# call super to preprocess inputs
z, state = super().forward(inputs, state, max_outer_rank=2)
if self._img_encoding_net is None and len(self._fc_layers) == 0:
if inputs.ndim == 2:
z = z.unsqueeze(1).expand(-1, self._n, *z.shape[1:])
else:
if self._img_encoding_net is not None:
z, _ = self._img_encoding_net(z)
for fc in self._fc_layers:
z = fc(z)
return z, state
@gin.configurable
class LSTMEncodingNetwork(Network):
"""LSTM cells followed by an encoding network."""
def __init__(self,
input_tensor_spec,
input_preprocessors=None,
preprocessing_combiner=None,
conv_layer_params=None,
pre_fc_layer_params=None,
hidden_size=(100, ),
lstm_output_layers=-1,
post_fc_layer_params=None,
activation=torch.relu_,
kernel_initializer=None,
last_layer_size=None,
last_activation=None,
last_kernel_initializer=None,
name="LSTMEncodingNetwork"):
"""
Args:
input_tensor_spec (nested TensorSpec): the (nested) tensor spec of
the input. If nested, then ``preprocessing_combiner`` must not be
None.
input_preprocessors (nested InputPreprocessor): a nest of
``InputPreprocessor``, each of which will be applied to the
corresponding input. If not None, then it must have the same
structure with ``input_tensor_spec``. This arg is helpful if you
want to have separate preprocessings for different inputs by
configuring a gin file without changing the code. For example,
embedding a discrete input before concatenating it to another
continuous vector.
preprocessing_combiner (NestCombiner): preprocessing called on
complex inputs. Note that this combiner must also accept
``input_tensor_spec`` as the input to compute the processed
tensor spec. For example, see ``alf.nest.utils.NestConcat``. This
arg is helpful if you want to combine inputs by configuring a
gin file without changing the code.
conv_layer_params (tuple[tuple]): a tuple of tuples where each
tuple takes a format ``(filters, kernel_size, strides, padding)``,
where ``padding`` is optional.
pre_fc_layer_params (tuple[int]): a tuple of integers
representing FC layers that are applied before the LSTM cells.
hidden_size (int or tuple[int]): the hidden size(s) of
the lstm cell(s). Each size corresponds to a cell. If there are
multiple sizes, then lstm cells are stacked.
lstm_output_layers (None|int|list[int]): -1 means the output from
the last lstm layer. ``None`` means all lstm layers.
post_fc_layer_params (tuple[int]): an optional tuple of
integers representing hidden FC layers that are applied after
the LSTM cells.
activation (nn.functional): activation for all the layers but the
last layer.
kernel_initializer (Callable): initializer for all the layers but
the last layer.
last_layer_size (int): an optional size of an additional layer
appended at the very end. Note that if ``last_activation`` is
specified, ``last_layer_size`` has to be specified explicitly.
last_activation (nn.functional): activation function of the
additional layer specified by ``last_layer_size``. Note that if
``last_layer_size`` is not None, ``last_activation`` has to be
specified explicitly.
last_kernel_initializer (Callable): initializer for the the
additional layer specified by ``last_layer_size``.
If None, it will be the same with ``kernel_initializer``. If
``last_layer_size`` is None, ``last_kernel_initializer`` will
not be used.
"""
super().__init__(input_tensor_spec, name=name)
if (input_preprocessors or preprocessing_combiner or conv_layer_params
or pre_fc_layer_params):
self._pre_encoding_net = EncodingNetwork(
input_tensor_spec=input_tensor_spec,
input_preprocessors=input_preprocessors,
preprocessing_combiner=preprocessing_combiner,
conv_layer_params=conv_layer_params,
fc_layer_params=pre_fc_layer_params,
activation=activation,
kernel_initializer=kernel_initializer)
input_size = self._pre_encoding_net.output_spec.shape[0]
else:
self._pre_encoding_net = lambda x: (x, ())
input_size = input_tensor_spec.shape[0]
if isinstance(hidden_size, int):
hidden_size = [hidden_size]
else:
assert isinstance(hidden_size, tuple)
self._cells = nn.ModuleList()
self._state_spec = []
for hs in hidden_size:
self._cells.append(
torch.nn.LSTMCell(input_size=input_size, hidden_size=hs))
self._state_spec.append(self._create_lstm_cell_state_spec(hs))
input_size = hs
if lstm_output_layers is None:
lstm_output_layers = list(range(len(hidden_size)))
elif type(lstm_output_layers) == int:
lstm_output_layers = [lstm_output_layers]
self._lstm_output_layers = lstm_output_layers
self._lstm_output_layers = copy.copy(lstm_output_layers)
input_size = sum(hidden_size[i] for i in lstm_output_layers)
if post_fc_layer_params is None and last_layer_size is None:
self._post_encoding_net = lambda x: (x, ())
self._output_spec = TensorSpec((input_size, ))
else:
self._post_encoding_net = EncodingNetwork(
input_tensor_spec=TensorSpec((input_size, )),
fc_layer_params=post_fc_layer_params,
activation=activation,
kernel_initializer=kernel_initializer,
last_layer_size=last_layer_size,
last_activation=last_activation,
last_kernel_initializer=last_kernel_initializer)
self._output_spec = self._post_encoding_net.output_spec
def _create_lstm_cell_state_spec(self, hidden_size, dtype=torch.float32):
"""Create LSTMCell state specs given the hidden size and dtype, according to
PyTorch `LSTMCell doc <https://pytorch.org/docs/stable/nn.html#torch.nn.LSTMCell>`_.
Each LSTMCell has two states: h and c with the same shape.
Args:
hidden_size (int): the number of units in the hidden state
dtype (torch.dtype): dtype of the specs
Returns:
specs (tuple[TensorSpec]):
"""
state_spec = TensorSpec(shape=(hidden_size, ), dtype=dtype)
return (state_spec, state_spec)
def forward(self, inputs, state):
"""
Args:
inputs (nested torch.Tensor):
state (list[tuple]): a list of tuples, where each tuple is a pair
of ``h_state`` and ``c_state``.
Returns:
tuple:
- output (torch.Tensor): output of the network
- new_state (list[tuple]): the updated states
"""
assert isinstance(state, list)
for s in state:
assert isinstance(s, tuple) and len(s) == 2, \
"Each LSTMCell state should be a tuple of (h,c)!"
assert len(self._cells) == len(state)
new_state = []
h_state, _ = self._pre_encoding_net(inputs)
for cell, s in zip(self._cells, state):
h_state, c_state = cell(h_state, s)
new_state.append((h_state, c_state))
if len(self._lstm_output_layers) == 1:
lstm_output = new_state[self._lstm_output_layers[0]][0]
else:
lstm_output = [new_state[l][0] for l in self._lstm_output_layers]
h_state = torch.cat(lstm_output, -1)
output, _ = self._post_encoding_net(h_state)
return output, new_state
@property
def state_spec(self):
return self._state_spec
```
#### File: alf/utils/math_ops.py
```python
import functools
import gin
import torch
import alf
nest_map = alf.nest.map_structure
def identity(x):
"""PyTorch doesn't have an identity activation. This can be used as a
placeholder.
"""
return x
@gin.configurable
def clipped_exp(value, clip_value_min=-20, clip_value_max=2):
""" Clip value to the range [`clip_value_min`, `clip_value_max`]
then compute exponential
Args:
value (Tensor): input tensor.
clip_value_min (float): The minimum value to clip by.
clip_value_max (float): The maximum value to clip by.
"""
value = torch.clamp(value, clip_value_min, clip_value_max)
return torch.exp(value)
def add_ignore_empty(x, y):
"""Add two Tensors which may be None or ().
If x or y is None, they are assumed to be zero and the other tensor is
returned.
Args:
x (Tensor|None|()):
y (Tensor(|None|())):
Returns:
x + y
"""
def _ignore(t):
return t is None or (isinstance(t, tuple) and len(t) == 0)
if _ignore(y):
return x
elif _ignore(x):
return y
else:
return x + y
@gin.configurable
def swish(x):
"""Swish activation.
This is suggested in arXiv:1710.05941
Args:
x (Tensor): input
Returns:
Tensor
"""
return x * torch.sigmoid(x)
def max_n(inputs):
"""Calculate the maximum of n tensors.
Args:
inputs (iterable[Tensor]): an iterable of tensors. It requires that
all tensor shapes can be broadcast to the same shape.
Returns:
Tensor: the element-wise maximum of all the tensors in ``inputs``.
"""
return functools.reduce(torch.max, inputs)
def min_n(inputs):
"""Calculate the minimum of n tensors.
Args:
inputs (iterable[Tensor]): an iterable of tensors. It requires that
all tensor shapes can be broadcast to the same shape.
Returns:
Tensor: the element-wise minimum of all the tensors in ``inputs``.
"""
return functools.reduce(torch.min, inputs)
def add_n(inputs):
"""Calculate the sum of n tensors.
Args:
inputs (iterable[Tensor]): an iterable of tensors. It requires that
all tensor shapes can be broadcast to the same shape.
Returns:
Tensor: the element-wise sum of all the tensors in ``inputs``.
"""
return sum(inputs)
def mul_n(inputs):
"""Calculate the product of n tensors.
Args:
inputs (iterable[Tensor]): an iterable of tensors. It requires that
all tensor shapes can be broadcast to the same shape.
Returns:
Tensor: the element-wise multiplication of all the tensors in ``inputs``.
"""
return functools.reduce(torch.mul, inputs)
def square(x):
"""torch doesn't have square."""
return torch.pow(x, 2)
def weighted_reduce_mean(x, weight, dim=()):
"""Weighted mean.
Args:
x (Tensor): values for calculating the mean
weight (Tensor): weight for x. should have same shape as `x`
dim (int | tuple[int]): The dimensions to reduce. If None (the
default), reduces all dimensions. Must be in the range
[-rank(x), rank(x)). Empty tuple means to sum all elements.
Returns:
the weighted mean across `axis`
"""
weight = weight.to(torch.float32)
sum_weight = weight.sum(dim=dim)
sum_weight = torch.max(sum_weight, torch.tensor(1e-10))
return nest_map(lambda y: (y * weight).sum(dim=dim) / sum_weight, x)
def sum_to_leftmost(value, dim):
"""Sum out `value.ndim-dim` many rightmost dimensions of a given tensor.
Args:
value (Tensor): A tensor of `.ndim` at least `dim`.
dim (int): The number of leftmost dims to remain.
Returns:
The result tensor whose ndim is `min(dim, value.dim)`.
"""
if value.ndim <= dim:
return value
return value.sum(list(range(dim, value.ndim)))
def argmin(x):
"""Deterministic argmin.
Different from torch.argmin, which may have undetermined result if the are
multiple elements equal to the min, this argmin is guaranteed to return the
index of the first element equal to the min in each row.
Args:
x (Tensor): only support rank-2 tensor
Returns:
rank-1 int64 Tensor represeting the column of the first element in each
row equal to the minimum of the row.
"""
assert x.ndim == 2
m, _ = x.min(dim=1, keepdims=True)
r, c = torch.nonzero(x == m, as_tuple=True)
r, num_mins = torch.unique(r, return_counts=True)
i = torch.cumsum(num_mins, 0)
i = torch.cat([torch.tensor([0]), i[:-1]])
return c[i]
def shuffle(values):
"""Shuffle a nest.
Shuffle all the tensors in ``values`` by a same random order.
Args:
values (nested Tensor): nested Tensor to be shuffled. All the tensor
need to have the same batch size (i.e. shape[0]).
Returns:
shuffled value along dimension 0.
"""
batch_size = alf.nest.get_nest_batch_size(values)
indices = torch.randperm(batch_size)
return nest_map(lambda value: value[indices], values)
class Softsign_(torch.autograd.Function):
r"""Inplace version of softsign function.
Applies element-wise inplace, the function :math:`\text{SoftSign}(x) = \frac{x}{1 + |x|}`
The `current pytorch implementation of softsign
<https://pytorch.org/docs/stable/_modules/torch/nn/functional.html#softsign>`_
is inefficient for backward because it relies on automatic differentiation
and does not have an inplace version. Hence we provide a more efficient
implementation.
Reference:
`PyTorch: Defining New Autograd Functions
<https://pytorch.org/tutorials/beginner/examples_autograd/two_layer_net_custom_function.html>`_
"""
@staticmethod
def forward(ctx, input):
output = torch.div(input, input.abs() + 1, out=input)
ctx.save_for_backward(output)
return output
@staticmethod
def backward(ctx, grad_output):
output, = ctx.saved_tensors
return torch.mul(grad_output, torch.pow(1 - output.abs(), 2))
softsign_ = Softsign_.apply
class Softsign(torch.autograd.Function):
r"""Softsign function.
Applies element-wise, the function :math:`\text{SoftSign}(x) = \frac{x}{1 + |x|}`
Compared to ``Softsign_``, this uses more memory but is faster and has higher precision
for backward.
"""
@staticmethod
def forward(ctx, input):
x = torch.pow(input.abs() + 1, -1)
output = torch.mul(input, x)
ctx.save_for_backward(x)
return output
@staticmethod
def backward(ctx, grad_output):
x, = ctx.saved_tensors
return torch.mul(grad_output, torch.pow(x, 2))
softsign = Softsign.apply
```
#### File: alf/utils/value_ops.py
```python
import torch
import alf
from alf.data_structures import StepType
from alf.utils import dist_utils
def action_importance_ratio(action_distribution, collect_action_distribution,
action, clipping_mode, scope,
importance_ratio_clipping, log_prob_clipping,
check_numerics, debug_summaries):
""" ratio for importance sampling, used in PPO loss and vtrace loss.
Caller has to save tf.name_scope() and pass scope to this function.
Args:
action_distribution (nested tf.distribution): Distribution over
actions under target policy.
collect_action_distribution (nested tf.distribution): distribution
over actions from behavior policy, used to sample actions for
the rollout.
action (nested tf.distribution): possibly batched action tuple
taken during rollout.
clipping_mode (str): mode for clipping the importance ratio.
'double_sided': clips the range of importance ratio into
[1-importance_ratio_clipping, 1+importance_ratio_clipping],
which is used by PPOLoss.
'capping': clips the range of importance ratio into
min(1+importance_ratio_clipping, importance_ratio),
which is used by VTraceLoss, where c_bar or rho_bar =
1+importance_ratio_clipping.
scope (name scope manager): returned by tf.name_scope(), set
outside.
importance_ratio_clipping (float): Epsilon in clipped, surrogate
PPO objective. See the cited paper for more detail.
log_prob_clipping (float): If >0, clipping log probs to the range
(-log_prob_clipping, log_prob_clipping) to prevent inf / NaN
values.
check_numerics (bool): If true, adds tf.debugging.check_numerics to
help find NaN / Inf values. For debugging only.
debug_summaries (bool): If true, output summary metrics to tf.
Returns:
importance_ratio (Tensor), importance_ratio_clipped (Tensor).
"""
current_policy_distribution = action_distribution
sample_action_log_probs = dist_utils.compute_log_probability(
collect_action_distribution, action).detach()
action_log_prob = dist_utils.compute_log_probability(
current_policy_distribution, action)
if log_prob_clipping > 0.0:
action_log_prob = action_log_prob.clamp(-log_prob_clipping,
log_prob_clipping)
if check_numerics:
assert torch.all(torch.isfinite(action_log_prob))
# Prepare both clipped and unclipped importance ratios.
importance_ratio = (action_log_prob - sample_action_log_probs).exp()
if check_numerics:
assert torch.all(torch.isfinite(importance_ratio))
if clipping_mode == 'double_sided':
importance_ratio_clipped = importance_ratio.clamp(
1 - importance_ratio_clipping, 1 + importance_ratio_clipping)
elif clipping_mode == 'capping':
importance_ratio_clipped = torch.min(
importance_ratio, torch.tensor(1 + importance_ratio_clipping))
else:
raise Exception('Unsupported clipping mode: ' + clipping_mode)
if debug_summaries and alf.summary.should_record_summaries():
with scope:
if importance_ratio_clipping > 0.0:
clip_fraction = (torch.abs(importance_ratio - 1.0) >
importance_ratio_clipping).to(
torch.float32).mean()
alf.summary.scalar('clip_fraction', clip_fraction)
alf.summary.histogram('action_log_prob', action_log_prob)
alf.summary.histogram('action_log_prob_sample',
sample_action_log_probs)
alf.summary.histogram('importance_ratio', importance_ratio)
alf.summary.scalar('importance_ratio_mean',
importance_ratio.mean())
alf.summary.histogram('importance_ratio_clipped',
importance_ratio_clipped)
return importance_ratio, importance_ratio_clipped
def discounted_return(rewards, values, step_types, discounts, time_major=True):
"""Computes discounted return for the first T-1 steps.
The difference between this function and the one tf_agents.utils.value_ops
is that the accumulated_discounted_reward is replaced by value for is_last
steps in this function.
```
Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'} + gamma^(T-t+1)*final_value.
```
Define abbreviations:
(B) batch size representing number of trajectories
(T) number of steps per trajectory
Args:
rewards (Tensor): shape is [T, B] (or [T]) representing rewards.
values (Tensor): shape is [T,B] (or [T]) representing values.
step_types (Tensor): shape is [T,B] (or [T]) representing step types.
discounts (Tensor): shape is [T, B] (or [T]) representing discounts.
time_major (bool): Whether input tensors are time major.
False means input tensors have shape [B, T].
Returns:
A tensor with shape [T-1, B] (or [T-1]) representing the discounted
returns. Shape is [B, T-1] when time_major is false.
"""
if not time_major:
discounts = discounts.transpose(0, 1)
rewards = rewards.transpose(0, 1)
values = values.transpose(0, 1)
step_types = step_types.transpose(0, 1)
assert values.shape[0] >= 2, ("The sequence length needs to be "
"at least 2. Got {s}".format(
s=values.shape[0]))
is_lasts = (step_types == StepType.LAST).to(dtype=torch.float32)
rets = torch.zeros(rewards.shape, dtype=rewards.dtype)
rets[-1] = values[-1]
with torch.no_grad():
for t in reversed(range(rewards.shape[0] - 1)):
acc_value = rets[t + 1] * discounts[t + 1] + rewards[t + 1]
rets[t] = is_lasts[t] * values[t] + (1 - is_lasts[t]) * acc_value
rets = rets[:-1]
if not time_major:
rets = rets.transpose(0, 1)
return rets.detach()
def one_step_discounted_return(rewards, values, step_types, discounts):
"""Calculate the one step discounted return for the first T-1 steps.
return = next_reward + next_discount * next_value if is not the last step;
otherwise will set return = current_discount * current_value.
Note: Input tensors must be time major
Args:
rewards (Tensor): shape is [T, B] (or [T]) representing rewards.
values (Tensor): shape is [T,B] (or [T]) representing values.
step_types (Tensor): shape is [T,B] (or [T]) representing step types.
discounts (Tensor): shape is [T, B] (or [T]) representing discounts.
Returns:
A tensor with shape [T-1, B] (or [T-1]) representing the discounted
returns.
"""
assert values.shape[0] >= 2, ("The sequence length needs to be "
"at least 2. Got {s}".format(
s=values.shape[0]))
is_lasts = (step_types == StepType.LAST).to(dtype=torch.float32)
rets = (1 - is_lasts[:-1]) * (rewards[1:] + discounts[1:] * values[1:]) + \
is_lasts[:-1] * discounts[:-1] * values[:-1]
return rets.detach()
def generalized_advantage_estimation(rewards,
values,
step_types,
discounts,
td_lambda=1.0,
time_major=True):
"""Computes generalized advantage estimation (GAE) for the first T-1 steps.
For theory, see
"High-Dimensional Continuous Control Using Generalized Advantage Estimation"
by <NAME>, <NAME> et al.
See https://arxiv.org/abs/1506.02438 for full paper.
The difference between this function and the one tf_agents.utils.value_ops
is that the accumulated_td is reset to 0 for is_last steps in this function.
Define abbreviations:
(B) batch size representing number of trajectories
(T) number of steps per trajectory
Args:
rewards (Tensor): shape is [T, B] (or [T]) representing rewards.
values (Tensor): shape is [T,B] (or [T]) representing values.
step_types (Tensor): shape is [T,B] (or [T]) representing step types.
discounts (Tensor): shape is [T, B] (or [T]) representing discounts.
td_lambda (float): A scalar between [0, 1]. It's used for variance
reduction in temporal difference.
time_major (bool): Whether input tensors are time major.
False means input tensors have shape [B, T].
Returns:
A tensor with shape [T-1, B] representing advantages. Shape is [B, T-1]
when time_major is false.
"""
if not time_major:
discounts = discounts.transpose(0, 1)
rewards = rewards.transpose(0, 1)
values = values.transpose(0, 1)
step_types = step_types.transpose(0, 1)
assert values.shape[0] >= 2, ("The sequence length needs to be "
"at least 2. Got {s}".format(
s=values.shape[0]))
is_lasts = (step_types == StepType.LAST).to(dtype=torch.float32)
weighted_discounts = discounts[1:] * td_lambda
advs = torch.zeros_like(rewards)
delta = rewards[1:] + discounts[1:] * values[1:] - values[:-1]
with torch.no_grad():
for t in reversed(range(rewards.shape[0] - 1)):
advs[t] = (1 - is_lasts[t]) * \
(delta[t] + weighted_discounts[t] * advs[t + 1])
advs = advs[:-1]
if not time_major:
advs = advs.transpose(0, 1)
return advs.detach()
```
#### File: alf/utils/value_ops_test.py
```python
import unittest
import torch
from alf.data_structures import TimeStep, StepType
from alf.utils import value_ops
import numpy as np
class DiscountedReturnTest(unittest.TestCase):
"""Tests for alf.utils.value_ops.discounted_return
"""
def test_discounted_return(self):
values = torch.tensor([[1.] * 5], dtype=torch.float32)
step_types = torch.tensor([[StepType.MID] * 5], dtype=torch.int64)
rewards = torch.tensor([[2.] * 5], dtype=torch.float32)
discounts = torch.tensor([[0.9] * 5], dtype=torch.float32)
expected = torch.tensor(
[[(((1 * 0.9 + 2) * 0.9 + 2) * 0.9 + 2) * 0.9 + 2,
((1 * 0.9 + 2) * 0.9 + 2) * 0.9 + 2,
(1 * 0.9 + 2) * 0.9 + 2, 1 * 0.9 + 2]],
dtype=torch.float32)
np.testing.assert_array_almost_equal(
value_ops.discounted_return(
rewards=rewards,
values=values,
step_types=step_types,
discounts=discounts,
time_major=False), expected)
# two episodes, and exceed by time limit (discount=1)
step_types = torch.tensor([[
StepType.MID, StepType.MID, StepType.LAST, StepType.MID,
StepType.MID
]],
dtype=torch.int32)
expected = torch.tensor(
[[(1 * 0.9 + 2) * 0.9 + 2, 1 * 0.9 + 2, 1, 1 * 0.9 + 2]],
dtype=torch.float32)
np.testing.assert_array_almost_equal(
value_ops.discounted_return(
rewards=rewards,
values=values,
step_types=step_types,
discounts=discounts,
time_major=False), expected)
# tow episodes, and end normal (discount=0)
step_types = torch.tensor([[
StepType.MID, StepType.MID, StepType.LAST, StepType.MID,
StepType.MID
]],
dtype=torch.int32)
discounts = torch.tensor([[0.9, 0.9, 0.0, 0.9, 0.9]])
expected = torch.tensor([[(0 * 0.9 + 2) * 0.9 + 2, 2, 1, 1 * 0.9 + 2]],
dtype=torch.float32)
np.testing.assert_array_almost_equal(
value_ops.discounted_return(
rewards=rewards,
values=values,
step_types=step_types,
discounts=discounts,
time_major=False), expected)
class GeneralizedAdvantageTest(unittest.TestCase):
"""Tests for alf.utils.value_ops.generalized_advantage_estimation
"""
def test_generalized_advantage_estimation(self):
values = torch.tensor([[2.] * 5], dtype=torch.float32)
step_types = torch.tensor([[StepType.MID] * 5], dtype=torch.int64)
rewards = torch.tensor([[3.] * 5], dtype=torch.float32)
discounts = torch.tensor([[0.9] * 5], dtype=torch.float32)
td_lambda = 0.6 / 0.9
d = 2 * 0.9 + 1
expected = torch.tensor([[((d * 0.6 + d) * 0.6 + d) * 0.6 + d,
(d * 0.6 + d) * 0.6 + d, d * 0.6 + d, d]],
dtype=torch.float32)
np.testing.assert_array_almost_equal(
value_ops.generalized_advantage_estimation(
rewards=rewards,
values=values,
step_types=step_types,
discounts=discounts,
td_lambda=td_lambda,
time_major=False), expected)
# two episodes, and exceed by time limit (discount=1)
step_types = torch.tensor([[
StepType.MID, StepType.MID, StepType.LAST, StepType.MID,
StepType.MID
]],
dtype=torch.int32)
expected = torch.tensor([[d * 0.6 + d, d, 0, d]], dtype=torch.float32)
np.testing.assert_array_almost_equal(
value_ops.generalized_advantage_estimation(
rewards=rewards,
values=values,
step_types=step_types,
discounts=discounts,
td_lambda=td_lambda,
time_major=False), expected)
# tow episodes, and end normal (discount=0)
step_types = torch.tensor([[
StepType.MID, StepType.MID, StepType.LAST, StepType.MID,
StepType.MID
]],
dtype=torch.int32)
discounts = torch.tensor([[0.9, 0.9, 0.0, 0.9, 0.9]],
dtype=torch.float32)
expected = torch.tensor([[1 * 0.6 + d, 1, 0, d]], dtype=torch.float32)
np.testing.assert_array_almost_equal(
value_ops.generalized_advantage_estimation(
rewards=rewards,
values=values,
step_types=step_types,
discounts=discounts,
td_lambda=td_lambda,
time_major=False), expected)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jesbu1/e-mission-eval",
"score": 2
}
|
#### File: e-mission-eval/percom_moves_collect_2014/TestFeatureCalc.py
```python
import unittest
import json
from featurecalc import calDistance, calSpeed, calHeading, calAvgSpeed, calSpeeds, calAccels, getIthMaxSpeed, getIthMaxAccel
import pygeocoder
class TestFeatureCalc(unittest.TestCase):
# All the test data is obtained by using
# Sections.find_one({"$and": [{'type': 'move'}, {'confirmed_mode': <modeId>}]})
# and then picking the first two points from the track points for that section
def setUp(self):
self.walktp1 = {"track_location": {"type": "Point",
"coordinates": [37.8724267522, -122.2593326013]}, "time": "20140415T182218Z"}
self.walktp2 = {"track_location": {"type": "Point",
"coordinates": [37.8722939116, -122.2594439528]}, "time": "20140415T182251Z"}
self.biketp1 = {"track_location": {"type": "Point", "coordinates": [37.8385561216, -122.2495945853]}, "time": "20140418T181035-0700"}
self.biketp2 = {"track_location": {"type": "Point", "coordinates": [37.838834329, -122.249646471]}, "time": "20140418T181100-0700"}
self.traintp1 = {"track_location": {"type": "Point", "coordinates": [37.8419243845, -122.251608766]}, "time": "20140418T083731-0700"}
self.traintp2 = {"track_location": {"type": "Point", "coordinates": [37.841983358, -122.2516275124]}, "time": "20140418T083748-0700"}
self.cartp1 = {"track_location": {"type": "Point", "coordinates": [37.8796206126, -122.272393763]}, "time": "20140418T013558Z"}
self.cartp2 = {"track_location": {"type": "Point", "coordinates": [37.8796948352, -122.2724807525]}, "time": "20140418T013618Z"}
from datetime import datetime
segments = json.load(open("testFeatureCalcData.json"))
self.walkSegment = segments[0]
self.walkSegment['section_start_datetime'] = datetime(2014, 4, 15, 18, 22, 18)
self.walkSegment['section_end_datetime'] = datetime(2014, 4, 15, 18, 31, 27)
self.bikeSegment = segments[1]
self.bikeSegment['section_start_datetime'] = datetime(2014, 4, 19, 1, 10, 35)
self.bikeSegment['section_end_datetime'] = datetime(2014, 4, 19, 1, 23, 16)
self.trainSegment = segments[2]
self.trainSegment['section_start_datetime'] = datetime(2014, 4, 18, 15, 37, 31)
self.trainSegment['section_end_datetime'] = datetime(2014, 4, 18, 15, 48, 3)
self.carSegment = segments[3]
self.carSegment['section_start_datetime'] = datetime(2014, 4, 18, 1, 35, 58)
self.carSegment['section_end_datetime'] = datetime(2014, 4, 18, 1, 42, 46)
# We spot check by using real values from the test data and comparing them to
# the calculations at http://www.movable-type.co.uk/scripts/latlong.html
def testCalDistance(self):
self.assertAlmostEqual(
calDistance([37.8724267522, -122.2593326013], [37.8678553385, -122.2597410423]),
509.6, places=1)
self.assertAlmostEqual(
calDistance(self.walktp1['track_location']['coordinates'], self.walktp2['track_location']['coordinates']),
17.71, places=1)
# points obtained from a bike trip
self.assertAlmostEqual(
calDistance(self.biketp1['track_location']['coordinates'], self.biketp2['track_location']['coordinates']),
31.27, places=1)
self.assertAlmostEqual(
calDistance(self.traintp1['track_location']['coordinates'], self.traintp2['track_location']['coordinates']),
6.761, places=1)
self.assertAlmostEqual(
calDistance(self.cartp1['track_location']['coordinates'], self.cartp2['track_location']['coordinates']),
11.24, places=1)
def testCalcSpeed(self):
# points obtained from a walk trip
self.assertAlmostEqual(
calSpeed(self.walktp1, self.walktp2), 0.53666, places=2)
self.assertAlmostEqual(
calSpeed(self.biketp1, self.biketp2), 1.25, places=2)
self.assertAlmostEqual(
calSpeed(self.traintp1, self.traintp2), 0.3977, places=2)
self.assertAlmostEqual(
calSpeed(self.cartp1, self.cartp2), 0.562, places=2)
def testCalcHeading(self):
# points from a walking trip
self.assertAlmostEqual(
calHeading(self.walktp1['track_location']['coordinates'], self.walktp2['track_location']['coordinates']),
-147, places=0)
self.assertAlmostEqual(
calHeading(self.biketp1['track_location']['coordinates'], self.biketp2['track_location']['coordinates']),
-8.37, places=0)
self.assertAlmostEqual(
calHeading(self.traintp1['track_location']['coordinates'], self.traintp2['track_location']['coordinates']),
-14.09, places=0)
self.assertAlmostEqual(
calHeading(self.cartp1['track_location']['coordinates'], self.cartp2['track_location']['coordinates']),
-43, places=0)
# The website returns only a positive heading - it converts a negative heading to positive
# by subtracting from 360. I think we can deal with negative headings, so we don't subtract
# but then we need to fix the value that we compare against
# 184deg 02'04''
self.assertAlmostEqual(
calHeading([37.8724267522, -122.2593326013], [37.8678553385, -122.2597410423]),
- (360 - 184.035), places=1)
def testAvgSpeeds(self):
self.assertAlmostEqual(
calAvgSpeed(self.walkSegment), 1.14025, places = 2)
self.assertAlmostEqual(
calAvgSpeed(self.bikeSegment), 4.92509, places = 2)
self.assertAlmostEqual(
calAvgSpeed(self.trainSegment), 4.36708, places = 2)
self.assertAlmostEqual(
calAvgSpeed(self.carSegment), 4.52696, places = 2)
def testSegSpeeds(self):
self.assertAlmostEqual(
calSpeeds(self.walkSegment)[0], 0.53666, places=2)
self.assertAlmostEqual(
calSpeeds(self.bikeSegment)[0], 1.25, places=2)
self.assertAlmostEqual(
calSpeeds(self.trainSegment)[0], 0.3977, places=2)
self.assertAlmostEqual(
calSpeeds(self.carSegment)[0], 0.562, places=2)
def testSegSecondSpeeds(self):
self.assertAlmostEqual(
calSpeeds(self.walkSegment)[1], 0.47711, places=2) # 52 secs
self.assertAlmostEqual(
calSpeeds(self.bikeSegment)[1], 2.05027, places=2) # 181 secs
self.assertAlmostEqual(
calSpeeds(self.trainSegment)[1], 5.61904, places=2) # 21 secs
self.assertAlmostEqual(
calSpeeds(self.carSegment)[1], 0.26278, places=2) # 19 secs
def testFirstAccel(self):
self.assertAlmostEqual(
calAccels(self.walkSegment)[0], 0.01626, places=3) # 33 secs
self.assertAlmostEqual(
calAccels(self.bikeSegment)[0], 0.05, places=4) # 25 secs
self.assertAlmostEqual(
calAccels(self.trainSegment)[0], 0.02339, places=4) # 17 secs
self.assertAlmostEqual(
calAccels(self.carSegment)[0], 0.02810, places=4) # 20 secs
def testSecondAccel(self):
self.assertAlmostEqual(
calAccels(self.walkSegment)[1], -0.00114, places=4) # 52 secs
self.assertAlmostEqual(
calAccels(self.bikeSegment)[1], 0.00442, places=4) # 181 secs
self.assertAlmostEqual(
calAccels(self.trainSegment)[1], 0.24863, places=3) # 21 secs
self.assertAlmostEqual(
calAccels(self.carSegment)[1], -0.01574, places=4) # 19 secs
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jesbu1/e-mission-upc-aggregator",
"score": 2
}
|
#### File: e-mission-upc-aggregator/aggregate_enclave_scripts/controller.py
```python
import sys
import os
import requests
from bottle import route, run, get, post, request
import json
import uuid
import subprocess
import atexit
import docker
import socket
import time
import signal
from docker.types import Mount
import threading
import http.client
port = 2000
json_data = json.load(open("mock_data.json"))
list_of_containers = list(json.load(open("mock_data.json")).keys())
client = docker.from_env()
controller_ip = socket.gethostbyname(socket.gethostname()) + ":" + str(port)
path = os.path.expanduser("~/e-mission-server/")
uuid_counter = 0
uuid_set = set()
uuid_counter_lock = threading.Lock()
ready_to_proceed = threading.Event()
# container_port = 1025
class DockerThread(threading.Thread):
def __init__(self, container, query_type, user_uuid, agg_ip, privacy_budget, controller_uuid):
threading.Thread.__init__(self)
self.container = container
self.query_type = query_type
self.user_uuid = user_uuid
self.agg_ip = agg_ip
self.privacy_budget = privacy_budget
self.controller_uuid = controller_uuid
def run(self):
self.container.unpause()
output = self.container.exec_run('bash user_enclave.bash ' + self.query_type + ' ' + self.user_uuid + ' ' + self.agg_ip + ' ' + controller_ip + ' ' + self.controller_uuid + ' ' + self.privacy_budget)
print(output)
self.container.pause()
@get("/")
def home():
return "hello!"
@post('/upload_info')
def upload():
pass
@get('/remove_containers')
def remove_containers():
for container in list_of_containers:
container[0].remove(force=True)
return "User containers removed"
@post('/user_finished')
def user_finished():
"""
Aggregator sends post request here to let controller know
that the container with given UUID's message has been received
"""
global uuid_counter
request_dict = json.loads(request.body.read().decode('UTF-8'))
controller_uuid = uuid.UUID(request_dict['controller_uuid'])
if controller_uuid in uuid_set:
with uuid_counter_lock:
uuid_counter += 1
print(uuid_counter)
if uuid_counter == len(uuid_set):
ready_to_proceed.set()
return "Done with current user_finished call"
return "stop trying to spam me, malicious entity!"
@post('/request_query')
def query_start():
"""
1. Read list of enclaves from file
2. Wake them up with docker resume
3. Ask for query from them
"""
request_dict = json.loads(request.body.read().decode('UTF-8'))
query_type = str(request_dict['query_type'])
privacy_budget = str(request_dict['privacy_budget'])
print(request_dict)
threads = []
aggregator_ip = request.environ['REMOTE_ADDR'] + ':2001'
print("aggregator_ip: " + str(aggregator_ip))
print("Length of list of containers: " + str(len(list_of_containers)))
batch_size = 10
global uuid_counter, uuid_set, ready_to_proceed
ready_to_proceed = threading.Event()
uuid_counter, uuid_set = 0, set()
for j in range(0, int(len(list_of_containers) / batch_size) + 1):
for i in range(min(int(len(list_of_containers) - j * batch_size), batch_size)):
rand_uuid = str(uuid.uuid1())
uuid_set.add(uuid.UUID(rand_uuid))
send_to_agg = http.client.HTTPConnection(aggregator_ip)
send_to_agg_data = json.dumps({'controller_ip':controller_ip, 'controller_uuid':rand_uuid})
send_to_agg.request("POST", "/add_uuid_map", send_to_agg_data)
response = send_to_agg.getresponse()
container = list_of_containers[j * batch_size + i]
thread = DockerThread(container[0], query_type, container[1], aggregator_ip, privacy_budget, rand_uuid)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
ready_to_proceed.wait() #wait until all requests received by agg
return "Finished"
@get('/start_containers')
def start():
mount = Mount(target='/usr/src/app/conf/storage/db.conf', source= path + 'conf/storage/db.conf', type='bind')
for i in range(len(list_of_containers)):
container = list_of_containers[i]
print(container)
#json_data[container]["privacy_budget"] = 10
list_of_containers[i] = [client.containers.run('skxu3/emission-scone3.5', command = "tail -f /dev/null",
name = container, remove=True, devices=['/dev/isgx'], network='e-mission', mounts=[mount], volumes={path :{'bind':'/usr/src/myapp','mode':'rw'}}, working_dir='/usr/src/myapp', detach=True),
container]
list_of_containers[i][0].pause()
print(list_of_containers)
#with open("mock_data.json", "w") as jsonFile:
# json.dump(json_data, jsonFile)
return "User containers started"
if __name__ == "__main__":
atexit.register(remove_containers)
start()
run(port=port, host='0.0.0.0',debug=True, server='paste')
#threading.Thread(target=run, args=(2000, '0.0.0.0')).start()
```
|
{
"source": "jesbu1/h-baselines",
"score": 2
}
|
#### File: alf_k8s-pytorch/alf_submit/submit_hiro.py
```python
from conf import Config
import argparse
import sys
import os
import subprocess
import json
# set HDFS env variables
os.environ["HADOOP_HOME"] = "/usr/local/hadoop-2.7.2"
os.environ["PATH"] = os.path.expandvars("${HADOOP_HOME}/bin/:") + os.environ["PATH"]
os.environ["HADOOP_PREFIX"] = os.path.expandvars("${HADOOP_HOME}")
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-1.7.0"
os.environ["CLASSPATH"] = os.path.expandvars("$($HADOOP_HOME/bin/hadoop classpath --glob)")
# traincli url
traincli_url = "https://gallery.hobot.cc/download/algorithmplatform/traincli/project/release/linux/x86_64/general/basic/%s/traincli-%s"
def check_traincli():
"""The new devserver has always traincli update-to-date. So this function
is no longer needed.
"""
ver = Config.traincli_version
binary = "traincli-" + ver
if not os.path.isfile(binary):
os.system("wget " + traincli_url % (ver, ver))
os.system("chmod +x " + binary)
def _generate_job_yaml(options):
with open('gail/job.yaml', 'r') as f:
job_yaml = f.read()
gpu_n = 4
if options.search_config != '':
with open(os.path.join("gail/job/alf/alf/examples",
options.search_config)) as f:
conf = json.load(f)
if "gpus" in conf:
gpu_n = len(conf["gpus"])
print(gpu_n)
job_yaml = job_yaml.replace("__job_name__", options.job_name)
job_yaml = job_yaml.replace("__gpu_per_worker__", str(gpu_n))
job_yaml = job_yaml.replace("__alf_version__", Config.alf_version)
with open("gail/job.yaml", 'w') as f:
f.write(job_yaml)
def _generate_job_script(options):
if options.search_config == '':
job_path = 'gail/job/job.sh'
else:
job_path = 'gail/job/grid_search.sh'
with open(job_path, 'r') as f:
job_script = f.read()
with open("gail/job/job.sh", 'w') as f:
f.write(job_script)
def generate_job_files(options):
_generate_job_yaml(options)
_generate_job_script(options)
def choose_cluster(options):
with open('gpucluster.yaml', 'r') as f:
cluster_str = f.read()
assert options.cluster in Config.clusters, \
"Cluster name {} is unrecognized!".format(options.cluster)
id_and_key = Config.clusters[options.cluster]
cluster_str = cluster_str.replace("__appid__", id_and_key["appid"])
cluster_str = cluster_str.replace("__appkey__", id_and_key["appkey"])
with open("gpucluster.yaml", 'w') as f:
f.write(cluster_str)
os.system("mkdir -p $HOME/.hobot; cp gpucluster.yaml $HOME/.hobot/")
def submit():
os.chdir("gail")
#check_traincli()
os.system("traincli submit -f job.yaml")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-j", "--job_name", help="The job name")
parser.add_argument("-s", "--search_config", type=str, default='',
help="The grid search json file")
parser.add_argument("-c", "--cluster", default="algo-small",
help="The cluster to put jobs on: algo-small|rl-small|share-rtx")
options = parser.parse_args(sys.argv[1:])
generate_job_files(options)
choose_cluster(options)
submit()
```
#### File: hbaselines/utils/misc.py
```python
import os
import errno
def ensure_dir(path):
"""Ensure that the directory specified exists, and if not, create it."""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise # pragma: no cover
return path
```
#### File: jesbu1/h-baselines/run_socialbot_evals.py
```python
import itertools
import random
import subprocess
import os
from absl import logging, flags, app
from multiprocessing import Queue, Manager
from pathos import multiprocessing
import traceback
import time
import sys
log_dir = sys.argv[1]
num_gpus = 2
max_worker_num = num_gpus * 1 + 1
nb_train_steps = 400
meta_update_freq = 1
actor_update_freq = 1
batch_size = 1024
num_envs = 10
COMMAND1 = f"python3 experiments/run_hiro.py {log_dir}"
COMMAND2 = f"--alg TD3 --evaluate --n_training 1 --verbose 1 --relative_goals --off_policy_corrections --eval_deterministic --num_envs {num_envs} --actor_lr 1e-4 --critic_lr 1e-4 --use_huber --target_noise_clip 0.5 --batch_size {batch_size} --tau 0.05 --gamma 0.99 --nb_train_steps {nb_train_steps} --meta_update_freq {meta_update_freq} --actor_update_freq {actor_update_freq} --intrinsic_reward_scale 1.0 --meta_period 3 --buffer_size 500000 --noise 0.1"
envs = ["GoalTask", "KickBallTask"]
total_steps = [4950000, 4950000]
horizons = [100, 200]
nb_rollout_steps = [10 * 100, 10 * 200]
def _init_device_queue(max_worker_num):
m = Manager()
device_queue = m.Queue()
for i in range(max_worker_num):
idx = i % num_gpus
device_queue.put(idx)
return device_queue
def run():
"""Run trainings with all possible parameter combinations in
the configured space.
"""
process_pool = multiprocessing.Pool(
processes=max_worker_num, maxtasksperchild=1)
device_queue = _init_device_queue(max_worker_num)
for i in range(3):
for i, env in enumerate(envs):
command = "%s %s --total_steps %d --horizon %d --nb_rollout_steps %d %s" % (COMMAND1, env, total_steps[i], horizons[i], nb_rollout_steps[i], COMMAND2)
process_pool.apply_async(
func=_worker,
args=[command, device_queue],
error_callback=lambda e: logging.error(e))
process_pool.close()
process_pool.join()
def _worker(command, device_queue):
# sleep for random seconds to avoid crowded launching
try:
time.sleep(random.uniform(0, 15))
device = device_queue.get()
logging.set_verbosity(logging.INFO)
logging.info("command %s" % command)
os.system("CUDA_VISIBLE_DEVICES=%d " % device + command)
device_queue.put(device)
except Exception as e:
logging.info(traceback.format_exc())
raise e
run()
```
#### File: SocialRobotCustom/examples/test_simple_navigation.py
```python
import gym
import random
import social_bot
import logging
import time
import psutil
import os
def main():
env = gym.make("SocialBot-SimpleNavigationLanguage-v0")
steps = 0
t0 = time.time()
proc = psutil.Process(os.getpid())
logging.info(" mem=%dM" % (proc.memory_info().rss // 1e6))
for _ in range(10000000):
obs = env.reset()
control = [random.random() * 0.2, random.random() * 0.2]
while True:
obs, reward, done, info = env.step(
dict(control=control, sentence="hello"))
steps += 1
if done:
logging.info("reward: " + str(reward) + "sent: " +
str(obs["sentence"]))
break
logging.info("steps=%s" % steps +
" frame_rate=%s" % (steps / (time.time() - t0)) +
" mem=%dM" % (proc.memory_info().rss // 1e6))
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
```
#### File: social_bot/envs/embodied_teacher_test.py
```python
import unittest
import random
import os
import time
import json
from collections import OrderedDict
import social_bot
import social_bot.pygazebo as gazebo
from absl import logging
from play_ground import PlayGround
from embodied_teacher import EmbodiedTeacher
from social_bot.tasks import GoalTask, KickingBallTask
class TestEmbodiedTeacher(unittest.TestCase):
def test_embodied_teacher(self):
agents = ['youbot_noplugin', 'pioneer2dx_noplugin', 'pr2_noplugin']
with open(
os.path.join(social_bot.get_model_dir(), "agent_cfg.json"),
'r') as cfg_file:
agent_cfgs = json.load(cfg_file)
for agent_type in agents:
for use_image_obs in [True, False]:
agent_cfg = agent_cfgs[agent_type]
test_tasks = [GoalTask, KickingBallTask]
if agent_cfg['camera_sensor'] == '' and use_image_obs:
continue
logging.info("Testing Case: Agent " + agent_type + ", Task " +
str(test_tasks) + ", UseImage: " +
str(use_image_obs))
env = EmbodiedTeacher(
with_language=True,
use_image_observation=use_image_obs,
image_with_internal_states=True,
agent_type=agent_type,
tasks=test_tasks,
demo_by_human=False)
step_cnt = 0
last_done_time = time.time()
while step_cnt < 100 and (time.time() - last_done_time) < 5:
actions = env._control_space.sample()
actions = dict(control=actions, sentence="hello")
teacher_actions = env._teacher_control_space.sample()
actions = OrderedDict(
learner=actions, teacher=teacher_actions)
env.step(actions)
step_cnt += 1
step_per_sec = step_cnt / (time.time() - last_done_time)
logging.info("Test Passed, FPS: " + str(step_per_sec))
env.close()
gazebo.close()
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
unittest.main()
```
#### File: social_bot/envs/simple_navigation.py
```python
from collections import OrderedDict
import numpy as np
import os
import random
import time
from absl import logging
import gym
import gym.spaces
import gin
import social_bot
from social_bot import teacher
from social_bot.envs.gazebo_base import GazeboEnvBase
from social_bot.gazebo_agent import GazeboAgent
from social_bot.teacher import TeacherAction
from social_bot.teacher import DiscreteSequence
from social_bot.tasks import GoalTask
import social_bot.pygazebo as gazebo
@gin.configurable
class SimpleNavigation(GazeboEnvBase):
"""
In this environment, the agent will receive reward 1 when it is close enough to the goal.
If it is moving away from the goal too much or still not close to the goal after max_steps,
it will get reward -1.
The observation space is a numpy array or a dict with keys 'image', 'states', 'sentence'
If without language and internal_states, observation is a numpy array contains the image
Otherwise observation is a dict. Depends on the configuration, it could be :
image and internal states (the states of agent joints)
image and language sequence
image, internal states and language sequence
"""
# number of physics simulation steps per step(). Each step() corresponds to
# a real time of NUM_SIMULATION_STEPS * max_step_size, where `max_step_size`
# is defined in file pioneer2dx_camera.world
NUM_SIMULATION_STEPS = 20
def __init__(self,
with_language=False,
image_with_internal_states=False,
port=None,
resized_image_size=None):
"""Create SimpleNavigation environment.
Args:
with_language (bool): whether to generate language for observation
image_with_internal_states (bool): If true, the agent's self internal
states i.e., joint position and velocities would be available
together with the image.
port (int): TCP/IP port for the simulation server
resized_image_size (None|tuple): If None, use the original image size
from the camera. Otherwise, the original image will be resized
to (width, height)
"""
super(SimpleNavigation, self).__init__(
world_file='pioneer2dx_camera.world', port=port)
self._with_language = with_language
self._image_with_internal_states = image_with_internal_states
self.set_rendering_cam_pose('4 -4 3 0 0.4 2.3')
self._seq_length = 20
# Setup agent
self._agent = GazeboAgent(
world=self._world,
agent_type='pioneer2dx_noplugin',
with_language=with_language,
vocab_sequence_length=self._seq_length,
use_image_observation=True,
resized_image_size=resized_image_size,
image_with_internal_states=image_with_internal_states)
# Setup teacher and tasks
self._teacher = teacher.Teacher(task_groups_exclusive=False)
task_group = teacher.TaskGroup()
task = GoalTask(
env=self,
max_steps=120,
goal_name="goal",
fail_distance_thresh=0.5,
distraction_list=[],
random_range=2.7,
polar_coord=False)
task_group.add_task(task)
self._teacher.add_task_group(task_group)
self._sentence_space = DiscreteSequence(self._teacher.vocab_size,
self._seq_length)
# Setup action space and observation space
self.reset()
self._agent.set_sentence_space(self._sentence_space)
self._control_space = self._agent.get_control_space()
self._action_space = self._agent.get_action_space()
self._observation_space = self._agent.get_observation_space(
self._teacher)
@property
def observation_space(self):
return self._observation_space
@property
def action_space(self):
return self._action_space
@property
def reward_range(self):
return -1., 1.
def step(self, action):
"""
Args:
action (dict|int): If with_language, action is a dictionary with key "control" and "sentence".
action['control'] is a vector whose dimension is
len(joints). action['sentence'] is a sentence sequence.
If not with_language, it is an int for the action id.
Returns:
If with_language, it is a dictionary with key 'obs' and 'sentence'
If not with_language, it is a numpy.array for observation
"""
if self._with_language:
sentence = action.get('sentence', None)
if type(sentence) != str:
sentence = self._teacher.sequence_to_sentence(sentence)
controls = action['control']
else:
sentence = ''
controls = action
self._agent.take_action(controls)
self._world.step(self.NUM_SIMULATION_STEPS)
teacher_action = self._teacher.teach(sentence)
obs = self._agent.get_observation(self._teacher,
teacher_action.sentence)
return (obs, teacher_action.reward, teacher_action.done, {})
def reset(self):
self._teacher.reset(self._agent, self._world)
teacher_action = self._teacher.teach("")
self._world.step(self.NUM_SIMULATION_STEPS)
obs = self._agent.get_observation(self._teacher,
teacher_action.sentence)
return obs
class SimpleNavigationDiscreteAction(SimpleNavigation):
def __init__(self, port=None):
super(SimpleNavigationDiscreteAction, self).__init__(port=port)
self._action_space = gym.spaces.Discrete(25)
def step(self, action):
control = [0.05 * (action // 5) - 0.1, 0.05 * (action % 5) - 0.1]
return super(SimpleNavigationDiscreteAction, self).step(control)
class SimpleNavigationLanguage(SimpleNavigation):
def __init__(self, port=None):
super(SimpleNavigationLanguage, self).__init__(
with_language=True, port=port)
class SimpleNavigationSelfStatesLanguage(SimpleNavigation):
def __init__(self, port=None):
super(SimpleNavigationSelfStatesLanguage, self).__init__(
with_language=True, image_with_internal_states=True, port=port)
def main():
"""
Simple testing of this enviroenment.
"""
import matplotlib.pyplot as plt
env = SimpleNavigationSelfStatesLanguage()
for _ in range(10000000):
obs = env.reset()
control = [random.random() * 0.2, random.random() * 0.2]
plt.imshow(obs['image'])
logging.info("Close the figure to continue")
plt.show()
fig = None
while True:
obs, reward, done, _ = env.step(
dict(control=control, sentence="hello"))
if fig is None:
fig = plt.imshow(obs['image'])
else:
fig.set_data(obs['image'])
plt.pause(0.00001)
if done:
logging.info("reward: " + str(reward) + "sent: " +
str(obs["sentence"]))
break
if __name__ == "__main__":
logging.set_verbosity(logging.DEBUG)
main()
```
#### File: python/social_bot/gazebo_agent.py
```python
import math
import os
import time
import random
import json
import gin
import numpy as np
import PIL.Image
from collections import OrderedDict
import gym
from absl import logging
import social_bot
import social_bot.pygazebo as gazebo
@gin.configurable
class GazeboAgent():
""" Class for the agent of gazebo-based SocialRobot enviroments
"""
def __init__(self,
world,
agent_type,
name=None,
config=None,
use_image_observation=True,
resized_image_size=None,
image_with_internal_states=False,
with_language=False,
with_agent_language=False,
vocab_sequence_length=20,
action_wrapper=None):
"""
Args:
world (pygazebo.World): the world
agent_type (str): the agent_type, supporting pr2_noplugin,
pioneer2dx_noplugin, turtlebot, youbot_noplugin and icub_with_hands for now
note that 'agent_type' should be exactly the same string as the model's
name at the beginning of model's sdf file
name (str): the name of the agent in world
if None it will be set the same as agent_type
config (dict): the configuarations for the agent
see `agent_cfg.jason` for details
use_image_observation (bool): Use image or not
resized_image_size (None|tuple): If None, use the original image size
from the camera. Otherwise, the original image will be resized
to (width, height)
image_with_internal_states (bool): If true, the agent's self internal states
i.e., joint position and velocities would be available together with image.
Only affect if use_image_observation is true
with_language (bool): The observation will be a dict with an extra sentence
with_agent_language (bool): Include language in agent's action space
vocab_sequence_length (int): the length of encoded sequence if with_language
action_wrapper (None|class): Some times primitive joints is not wanted, e.g., has
redundant dimensions or offset. If not None, this is used to transform the agent
actions. See ActionWrapper of gazebo_agent.py for example.
"""
self._world = world
self.type = agent_type
self._use_image_observation = use_image_observation
self._resized_image_size = resized_image_size
self._image_with_internal_states = image_with_internal_states
self._with_language = with_language
self._with_agent_language = with_agent_language
self._vocab_sequence_length = vocab_sequence_length
self._sentence_space = None
if config == None:
# Load agent configurations
with open(
os.path.join(social_bot.get_model_dir(), "agent_cfg.json"),
'r') as cfg_file:
agent_cfgs = json.load(cfg_file)
config = agent_cfgs[agent_type]
self.config = config
joints = config['control_joints']
if action_wrapper is not None:
self._action_wrapper = action_wrapper()
self._action_dim = self._action_wrapper.get_actions_dim()
else:
self._action_wrapper = None
self._action_dim = len(joints)
if name:
# the agent is wrapped by a new name in world
self.name = name
self.joints = []
for joint in joints:
self.joints.append(name + '::' + joint)
else:
self.name = agent_type
self.joints = joints
self._agent = self._world.get_agent(self.name)
# Set the funtions from pygazebo.agent to Agent
self.get_pose = self._agent.get_pose
self.set_pose = self._agent.set_pose
self.get_link_pose = self._agent.get_link_pose
self.set_link_pose = self._agent.set_link_pose
self.get_joint_state = self._agent.get_joint_state
self.set_joint_state = self._agent.set_joint_state
self.set_pid_controller = self._agent.set_pid_controller
self.get_collisions = self._agent.get_collisions
self.get_velocities = self._agent.get_velocities
# Setup joints and sensors
self._camera = config['camera_sensor']
self.action_range = self.setup_joints(self._agent, self.joints, config)
logging.debug("joints to control: %s" % self.joints)
def reset(self):
""" Reset the agent. """
self._agent.reset()
def take_action(self, action):
""" Take actions.
Args:
the actions to be taken.
"""
if self._action_wrapper is not None:
action = self._action_wrapper.wrap_actions(action)
controls = np.clip(action, -1.0, 1.0) * self.action_range
controls_dict = dict(zip(self.joints, controls))
self._agent.take_action(controls_dict)
def get_observation(self, teacher, sentence_raw="hello"):
""" Get the observation of agent.
Args:
teacher (social_bot.Teacher): the teacher, used to get the task specific
observations from teacher's taskgroups.
sentence_raw (string): the sentence intened to sent to the Agent. This can
be ignored if with_language is False.
Returns:
obs (dict |numpy.array): the return depends on the configurations: with
language or not, use image or not, and image_with_internal_states or not.
Possible situations:
low-dimensional full states
low-dimensional full states with language sentence
image from the camera of agent
image with internal states
image with language sentence
image with both internal states and language sentence
Note that low-dimensional full states is defined in
"Task.task_specific_observation()", which has all the infomation need
for the task. While the internal states that used as a supplementary
to image is form "Agent.get_internal_states()", which only contains
self joint positions and velocities. Joint positions are wrapped with
sin() and cos() to avoid the discontinuous point at 0 to 2*pi.
"""
if self._image_with_internal_states or self._with_language:
# observation is an OrderedDict
obs = self._create_observation_dict(teacher, sentence_raw)
elif self._use_image_observation: # observation is pure image
obs = self.get_camera_observation()
else: # observation is pure low-dimentional states
obs = teacher.get_task_specific_observation(self)
return obs
def get_camera_observation(self):
""" Get the camera image.
Returns:
a numpy.array of the image.
"""
image = np.array(
self._agent.get_camera_observation(self._camera), copy=False)
if self._resized_image_size:
image = PIL.Image.fromarray(image).resize(self._resized_image_size,
PIL.Image.ANTIALIAS)
image = np.array(image, copy=False)
return image
def get_internal_states(self):
""" Get the internal joint states of the agent.
Returns:
a numpy.array including joint positions and velocities
"""
joint_pos = []
joint_vel = []
for joint_id in range(len(self.joints)):
joint_name = self.joints[joint_id]
joint_state = self._agent.get_joint_state(joint_name)
joint_pos.append(joint_state.get_positions())
joint_vel.append(joint_state.get_velocities())
joint_pos = np.array(joint_pos).flatten()
joint_vel = np.array(joint_vel).flatten()
# pos of continous joint could be huge, wrap the range with sin and cos.
joint_pos_sin = np.sin(joint_pos)
joint_pos_cos = np.cos(joint_pos)
internal_states = np.concatenate(
(joint_pos_sin, joint_pos_cos, joint_vel), axis=0)
return internal_states
def get_control_space(self):
""" Get the pure controlling space without language. """
control_space = gym.spaces.Box(
low=-1.0, high=1.0, shape=[self._action_dim], dtype=np.float32)
return control_space
def get_action_space(self):
""" Get the action space with optional language. """
control_space = self.get_control_space()
if self._with_agent_language and self._with_language:
action_space = gym.spaces.Dict(
control=control_space, sentence=self._sentence_space)
else:
action_space = control_space
return action_space
def get_observation_space(self, teacher):
"""
Get the observation space with optional language.
Args:
teacher (social_bot.Teacher): the teacher, used to get the task specific
observations from teacher's taskgroups as a sample.
"""
obs_sample = self.get_observation(teacher)
if self._with_language or self._image_with_internal_states:
# observation is a dictionary
observation_space = self._construct_dict_space(obs_sample)
elif self._use_image_observation:
# observation is image
observation_space = gym.spaces.Box(
low=0, high=255, shape=obs_sample.shape, dtype=np.uint8)
else:
# observation is spare states
observation_space = gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=obs_sample.shape,
dtype=np.float32)
return observation_space
def set_sentence_space(self, sentence_space):
""" Set the sentence if with_languange is enabled.
Args:
sentence_space (gym.spaces): the space for sentence sequence
"""
self._sentence_space = sentence_space
def _create_observation_dict(self, teacher, sentence_raw):
obs = OrderedDict()
if self._use_image_observation:
obs['image'] = self.get_camera_observation()
if self._image_with_internal_states:
obs['states'] = self.get_internal_states()
else:
obs['states'] = teacher.get_task_specific_observation(self)
if self._with_language:
obs['sentence'] = teacher.sentence_to_sequence(
sentence_raw, self._vocab_sequence_length)
return obs
def _construct_dict_space(self, obs_sample):
""" A helper function when gym.spaces.Dict is used as observation.
Args:
obs_sample (numpy.array|dict) : a sample observation
Returns:
Return a gym.spaces.Dict with keys 'image', 'states', 'sentence'
Possible situation:
image with internal states
image with language sentence
image with both internal states and language sentence
pure low-dimensional states with language sentence
"""
ob_space_dict = dict()
if 'image' in obs_sample.keys():
ob_space_dict['image'] = gym.spaces.Box(
low=0,
high=255,
shape=obs_sample['image'].shape,
dtype=np.uint8)
if 'states' in obs_sample.keys():
ob_space_dict['states'] = gym.spaces.Box(
low=-np.inf,
high=np.inf,
shape=obs_sample['states'].shape,
dtype=np.float32)
if 'sentence' in obs_sample.keys():
ob_space_dict['sentence'] = self._sentence_space
ob_space = gym.spaces.Dict(ob_space_dict)
return ob_space
def setup_joints(self, agent, joints, agent_cfg):
""" Setup the joints acrroding to agent configuration.
Args:
agent (pygazebo.Agent): the agent
joints (list of string): the name of joints
agent_cfg (dict): the configuration
"""
joint_states = list(map(lambda s: agent.get_joint_state(s), joints))
joints_limits = list(
map(lambda s: s.get_effort_limits()[0], joint_states))
print("JOINT LIMITS: %s" % joints_limits)
print("USE PID: %s" % str(agent_cfg['use_pid']))
if agent_cfg['use_pid']:
for joint_index in range(len(joints)):
agent.set_pid_controller(
joint_name=joints[joint_index],
pid_control_type=agent_cfg['pid_type'][joint_index],
p=agent_cfg['pid'][joint_index][0],
i=agent_cfg['pid'][joint_index][1],
d=agent_cfg['pid'][joint_index][2],
max_force=joints_limits[joint_index])
control_range = agent_cfg['pid_control_limit']
else:
control_range = np.array(joints_limits)
return control_range
def get_egocentric_cord_2d(self, x, y, agent_yaw):
""" Get the egocentric coordinate from a global 2D x-y plane coordinate.
This is achieved by rotating the global coordinates x, y by -agent_yaw.
Args:
x (float): x of global x-y plane coordinate
y (float): y of global x-y plane coordinate
agent_yaw (float): agent yaw (rotation in z-axis), in radian
Returns:
tuple of float, the position in the transformed coordinate
"""
rotate = -agent_yaw
rotated_x = x * np.cos(rotate) - y * np.sin(rotate)
rotated_y = x * np.sin(rotate) + y * np.cos(rotate)
return (rotated_x, rotated_y)
def get_contacts(self, contacts_sensor, contact_collision):
""" Get contacts to the link.
Args:
contacts_sensor(string): the name of contacts_sensor
contact_collision(string): the collision to check contacts
Returns:
bool, there is contact or not
"""
contacts = self.get_collisions(contacts_sensor)
for collision in contacts:
if collision[0] == contact_collision or collision[
1] == contact_collision:
return True
return False
class ActionWrapper():
""" The action wrapper transform a new actions to primitive actions.
The primitive actions (like the force/velocity/position of joints) may have redundant
dimensions or offsets. By the action wrapper, we can transform the action to more
efficency one. The sub class should define the new action space in _NEW_ACTION_LIST.
"""
_NEW_ACTION_LIST = []
def get_actions_dim(self):
""" Get the dimension of the new action space
"""
return len(self._NEW_ACTION_LIST)
def wrap_actions(self, action):
""" Wrap transformed actions to primitive actions.
Args:
action (nparray): the new action from policy network
Returns:
np.array, the primitive actions send to simulator
"""
raise NotImplementedError("wrap_actions not implemented!")
@gin.configurable
class YoubotActionWrapper(ActionWrapper):
""" This action wrapper transform a new actions to primitive actions.
The new action space is the same as keyboard demostration interface, defined in _NEW_ACTION_LIST
The primitive actions (the joints) please refer to social_bot/models/agent_cfg.json.
"""
_NEW_ACTION_LIST = [
'arm_joint_yaw', 'arm_joint_pitch', 'arm_joint_pitch_2', 'palm_joint',
'gripper_finger_joint', 'wheel_speed', 'wheel_turning'
]
def wrap_actions(self, action):
""" Wrap transformed actions to primitive actions.
Args:
action (nparray): the new action from policy network
Returns:
np.array, the primitive actions send to simulator
"""
action = dict(zip(self._NEW_ACTION_LIST, action))
primitive_actions = [
# arm joints
action['arm_joint_yaw'],
0.25 + action['arm_joint_pitch'] / 2, # add pi/4 offset
0.25 + action['arm_joint_pitch'] / 2,
0.25 + action['arm_joint_pitch_2'],
action['palm_joint'],
# gripper joints
action['gripper_finger_joint'],
action['gripper_finger_joint'],
# wheel joints
action['wheel_speed'] + action['wheel_turning'],
action['wheel_speed'] - action['wheel_turning']
]
return primitive_actions
```
#### File: python/social_bot/teacher.py
```python
import numpy as np
import random
import gym
from absl import logging
class DiscreteSequence(gym.Space):
"""
gym.Space object for language sequence
"""
def __init__(self, vocab_size, max_length):
"""
Args:
vocab_size (int): number of different tokens
max_length (int): maximal length of the sequence
"""
super().__init__(shape=(max_length, ), dtype=np.int32)
self._vocab_size = vocab_size
self._max_length = max_length
class TeacherAction(object):
def __init__(self, reward=0.0, sentence="", done=False, is_idle=False,
success=False):
"""
Args:
done: end of an episode if true
success: if the episode is successful or not
"""
self.reward = reward
self.sentence = sentence
self.done = done
self.is_idle = is_idle
self.success = success
class TaskGroup(object):
"""A group of tasks.
Each task group consists of one or more tasks. Within one task group, one
task can run at one time. A random task is chosen after the current task is
finished.
"""
def __init__(self):
self._tasks = []
self._current_tid = None
self._current_task = None
self._current_reward_weight = 1.0
self._agent = None
self._world = None
self._is_idle = True
def add_task(self, task):
"""Add a task to the group.
Args:
task (Task): an instance of Task
Returns:
None
"""
self._tasks.append(task)
def teach(self, agent_sentence):
"""Generate TeacherAction.
Args:
agent_sentence (str): sentence from the agent
Returns:
TeacherAction
"""
task = self._get_current_task()
try:
# teacher_action is the value yielded in task
teacher_action = task.send(agent_sentence)
self._is_idle = teacher_action.is_idle
if teacher_action.done:
task.close()
self._current_task = None
self._is_idle = True
except StopIteration:
task.close()
self._current_task = None
self._is_idle = True
teacher_action = TeacherAction()
return teacher_action
def is_idle(self):
return self._is_idle
def reset(self, agent, world):
"""Reset the task group.
Current task will be closed and a random new one will be chosen.
Args:
agent (GazeboAgent): the learning agent in the world
world (pygazebo.World): the world containing the agent
Returns:
None
"""
self._agent = agent
self._world = world
if self._current_task is not None:
self._current_task.close()
self._current_task = None
# This function only returns a generator function.
# To get the task object use self._tasks[self._current_tid]
def _get_current_task(self):
if self._current_task is None:
tid = random.randint(0, len(self._tasks) - 1)
self._current_tid = tid
self._current_task = self._tasks[tid].run()
self._current_reward_weight = self._tasks[tid].reward_weight
# This send will cause self._current_task to execute until the first
# yield. We ignore the first yielded value.
self._current_task.send(None)
return self._current_task
def get_current_reward_weight(self):
"""Get reward weight for current task of the group
Args:
None
Returns:
float, the reward weight of current task
"""
return self._current_reward_weight
def get_tasks(self):
"""Get current tasks in the group.
Args:
None
Returns:
list, a list of current tasks in the group
"""
return self._tasks
class Teacher(object):
"""Teacher is for teaching the agent.
It is responsible for:
1. Giving reward
2. Arranging the environment
3. Generating sentences
4. Interpreting sentences from the agent
A teacher has several task groups. At each step
* If task_groups_exclusive is True
Only one task group will run at the same time. After the active become
idle, another one will be chosen randomly.
* If task_groups_exclusive is False
All the task groups run concurrently. The reward are sum together. The
first nonempty sentence will be used. If one of the action has done=True,
the resulted done will be True.
Each task group consists of one or more tasks. Within one task group, one
task can run at one time. A random task is chosen after the current task is
finished.
"""
def __init__(self, task_groups_exclusive=True):
"""Create a Teacher instance.
Args:
task_groups_exclusive (bool): If True, only one task group is active
at one time. Otherwise, multiple task groups run concurrently.
"""
self._task_groups_exclusive = task_groups_exclusive
self._vocab_list = None
self._task_groups = []
self._weights = []
self.vocab_size = 0
def add_task_group(self, task_group, weight=1):
"""Add a task group to teacher.
Args:
task_group (TaskGroup): TaskGroup to be added
weight (float): In task_groups_exclusive=True mode, the probability
of a TaskGroup being chosen is proportional to this value.
Returns:
None
"""
self._task_groups.append(task_group)
self._weights.append(weight)
def get_task_groups(self):
"""Get current task groups of teacher.
Args:
None
Returns:
list, a list of current task group
"""
return self._task_groups
def get_task_specific_observation(self, agent):
"""Get the task specific observation of all the tasks added to the teacher
Args:
agent (GazeboAgent): the agent
Returns:
numpy.array, the specific observation for all the tasks added
"""
task_specific_ob = np.array([])
for task_group in self.get_task_groups():
for task in task_group.get_tasks():
task_specific_ob = np.append(
task_specific_ob, task.task_specific_observation(agent))
return task_specific_ob
def _build_vocab_from_tasks(self):
"""Build vocabulary table."""
# Initialize vocab with '0' by index 0, which is used for padding
vocab_list = [
0,
]
for g in self._task_groups:
for t in g._tasks:
vocab_list = vocab_list + t.task_vocab
# Remove repeated words and convert to dict
self._vocab_list = sorted(set(vocab_list), key=vocab_list.index)
self.vocab_size = len(self._vocab_list)
self._vocab_dict = dict(
zip(self._vocab_list, list(range(0, self.vocab_size))))
def sentence_to_sequence(self, sentence, max_sequence_length):
"""Convert sentence string to numpy integer sequence.
Args:
sentence (str): string for the sentence. Note the currently, the
tokenization is case-sensitive. For example, "This" and "this"
are treated as word.
max_sequence_length (int): The length of the generated numpy array.
If number of words in sentence is smaller than this value, 0 is
padded at the end.
Returns:
numpy.array
"""
if self._vocab_list is None:
self._build_vocab_from_tasks()
word_list = sentence.split()
for word in word_list:
assert word in self._vocab_dict.keys(), \
"Word is out of vocab: " + word + \
", during encoding sentence to sequence"
sequence = list(map(lambda x: self._vocab_dict[x], word_list))
padding_num = max_sequence_length - len(sequence)
assert padding_num >= 0, "Sequence " + str(sequence) + \
" exceed max_sequence_length: " + str(max_sequence_length) + \
", consider to increase the max_sequence_length"
return np.pad(sequence, (0, padding_num), 'constant')
def sequence_to_sentence(self, sequence):
"""Convert integer sequence to str based on vocabulary table.
Values after the first 0 in the sequence are ignored. In the generated
string, words are separated by space ' '.
Args:
sequence (int[]): integer sequence
Returns:
str
"""
if self._vocab_list is None:
self._build_vocab_from_tasks()
for seq_index in range(len(sequence)):
assert sequence[seq_index] < self.vocab_size, \
"Unknown word id: " + str(sequence[seq_index]) + \
", during decoding sequence to sentence"
if sequence[seq_index] == 0:
break
word_list = list(
map(lambda x: self._vocab_list[x], sequence[:seq_index]))
return " ".join(word_list)
def reset(self, agent, world):
"""Reset teacher.
All the task group will be reset, that is, current task in each task
group is closed and a random new one will be chosen.
Args:
agent (GazeboAgent): the learning agent in the world
world (pygazebo.World): the world containing the agent
Returns:
None
"""
for g in self._task_groups:
g.reset(agent, world)
self._switch_task_group()
def _switch_task_group(self):
self._current_task_group = np.random.choice(
self._task_groups, p=np.array(self._weights) / sum(self._weights))
def teach(self, agent_sentence):
"""Generate TeacherAction.
Args:
agent_sentence (str): sentence from the agent
Returns:
TeacherAction
"""
return_action = None
if self._task_groups_exclusive:
if self._current_task_group.is_idle():
self._switch_task_group()
return_action = self._current_task_group.teach(agent_sentence)
else:
final_sentence = ''
final_reward = 0.
done = False
active_group_id = -1
success = False
# run all groups in parallel
for i, g in enumerate(self._task_groups):
teacher_action = g.teach(agent_sentence)
if teacher_action.done:
done = True
if teacher_action.success:
success = True
weight = g.get_current_reward_weight()
final_reward += weight * teacher_action.reward
if not final_sentence:
final_sentence = teacher_action.sentence
active_group_id = i
if active_group_id != -1:
g = self._task_groups.pop(active_group_id)
self._task_groups.insert(0, g)
return_action = TeacherAction(final_reward, final_sentence, done,
success=success)
return return_action
```
|
{
"source": "jesbu1/SocialRobot",
"score": 2
}
|
#### File: python/social_bot/tasks.py
```python
import math
import numpy as np
import os
import gin
import itertools
import random
import json
from collections import deque, OrderedDict
from abc import abstractmethod
from absl import logging
import social_bot
from social_bot.teacher import TeacherAction
class Task(object):
"""Base class for Task.
A Task is for teaching a single task.
"""
compatible_agents = [
'pioneer2dx_noplugin',
'pr2_noplugin',
'icub',
'icub_with_hands',
'youbot_noplugin',
]
def __init__(self, env, max_steps=200, reward_weight=1.0):
"""
Setting things up during the initialization.
Args:
env (social_bot.GazeboEnvBase): an instance of Gym Environment
reward_weight(float): the weight of reward for caculating final_reward in teacher.teach()
Returns:
None
"""
self._env = env
self._world = env._world
self._agent = env._agent
self._max_steps = max_steps
self.reward_weight = reward_weight
self.task_vocab = ['hello', 'well', 'done', 'failed', 'to']
@abstractmethod
def run(self):
""" run() use yield to generate TeacherAction.
Structure of run():
```python
def run(self):
...
# agent_sentence is provided by Teacher using send() in TaskGroup.teach()
agent_sentence = yield # the first yielded value is ignored
...
# TeacherAction will be passed to Teacher as the return value of send() in TaskGroup.teach()
agent_sentence = yield TeacherAction(...)
...
agent_sentence = yield TeacherAction(...)
...
yield TeacherAction(done=True)
```
Returns:
A generator of TeacherAction
"""
pass
def task_specific_observation(self, agent):
"""
The extra infomation needed by the task if sparse states are used.
This can be overridden by the sub task. Note that this is only for the
case "Agent._use_image_observation" is False. For image case, the
image form camera of agent is used. For case of image with internal
states, Agent.get_internal_states() is used, which only returns
self joint positions and velocities.
Args:
agent (GazeboAgent): the agent
Returns:
np.array, the observations of the task for non-image case
"""
return np.array([])
def set_agent(self, agent):
""" Set the agent of task.
The agent can be overridden by this function. This might be useful when multi
agents share the same task or embodied teacher.
Args:
agent (GazeboAgent): the agent
"""
self._agent = agent
def _get_states_of_model_list(self,
model_list,
including_velocity=True,
including_rotation=False):
""" Get the poses and velocities from a model list.
Args:
model_list (list): a list of model names
including_velocity (bool): if Ture, the velocity of objects will be included.
including_rotation (bool): if Ture, the rotation of objects (in roll pitch yaw) will be included.
Returns:
np.array, the poses and velocities of the models
"""
model_states = []
for model_id in range(len(model_list)):
model = self._world.get_model(model_list[model_id])
model_states.append(model.get_pose()[0])
if including_rotation:
model_states.append(model.get_pose()[1])
if including_velocity:
model_states.append(model.get_velocities()[0])
model_states = np.array(model_states).flatten()
return model_states
def _random_move_object(self,
target,
random_range,
center_pos=np.array([0, 0]),
min_distance=0,
height=0):
""" Move an object to a random position.
Args:
target (pyagzebo.Model): the target to move
random_range (float): the range of the new position
center_pos (numpy.array): the center coordinates (x, y) of the random range
min_distance (float): the new position will not be closer than this distance
height (float): height offset
Returns:
np.array, the new position
"""
r = random.uniform(min_distance, random_range)
theta = random.random() * 2 * np.pi
loc = (center_pos[0] + r * np.cos(theta),
center_pos[1] + r * np.sin(theta), height)
target.set_pose((loc, (0, 0, 0)))
return np.array(loc)
@gin.configurable
class GoalTask(Task):
"""
A simple teacher task to find a goal.
For this task, the agent will receive reward 1 when it is close enough to the goal.
If it is moving away from the goal too much or still not close to the goal after max_steps,
it will get reward -1.
"""
def __init__(self,
env,
max_steps,
goal_name="ball",
distraction_list=[
'coke_can', 'table', 'car_wheel', 'plastic_cup', 'beer'
],
success_distance_thresh=0.5,
fail_distance_thresh=2.0,
distraction_penalty_distance_thresh=0,
distraction_penalty=0.5,
random_agent_orientation=False,
sparse_reward=True,
random_range=5.0,
polar_coord=True,
random_goal=False,
use_curriculum_training=False,
curriculum_distractions=True,
curriculum_target_angle=False,
switch_goal_within_episode=False,
start_range=0,
increase_range_by_percent=50.,
reward_thresh_to_increase_range=0.4,
percent_full_range_in_curriculum=0.1,
max_reward_q_length=100,
reward_weight=1.0,
move_goal_during_episode=True,
end_episode_after_success=False,
success_with_angle_requirement=True,
additional_observation_list=[],
use_egocentric_states=False,
egocentric_perception_range=0):
"""
Args:
env (gym.Env): an instance of Environment
max_steps (int): episode will end if not reaching gaol in so many steps
goal_name (string): name of the goal in the world
distraction_list (list of string): a list of model. the model shoud be in gazebo database
success_distance_thresh (float): the goal is reached if it's within this distance to the agent
fail_distance_thresh (float): if the agent moves away from the goal more than this distance,
it's considered a failure and is given reward -1
distraction_penalty_distance_thresh (float): if positive, penalize agent getting too close
to distraction objects (objects that are not the goal itself)
distraction_penalty (float): positive float of how much to penalize getting too close to
distraction objects
random_agent_orientation (bool): whether randomize the orientation (yaw) of the agent at the beginning of an
episode.
sparse_reward (bool): if true, the reward is -1/0/1, otherwise the 0 case will be replaced
with normalized distance the agent get closer to goal.
random_range (float): the goal's random position range
polar_coord (bool): use cartesian coordinates in random_range, otherwise, use polar coord.
random_goal (bool): if True, teacher will randomly select goal from the object list each episode
use_curriculum_training (bool): when true, use curriculum in goal task training
curriculum_distractions (bool): move distractions according to curriculum as well
curriculum_target_angle (bool): enlarge angle to target when initializing target according
to curriculum. Only when all angles are satisfied does curriculum try to increase distance.
Uses range of 0-360 degrees, starting from 60 with increments of 20.
switch_goal_within_episode (bool): if random_goal and this are both true, goal will be re-picked
within episode every time target is reached, besides picking after whole episode ends.
start_range (float): for curriculum learning, the starting random_range to set the goal
increase_range_by_percent (float): for curriculum learning, how much to increase random range
every time agent reached the specified amount of reward.
reward_thresh_to_increase_range (float): for curriculum learning, how much reward to reach
before the teacher increases random range.
percent_full_range_in_curriculum (float): if above 0, randomly throw in x% of training examples
where random_range is the full range instead of the easier ones in the curriculum.
max_reward_q_length (int): how many recent rewards to consider when estimating agent accuracy.
reward_weight (float): the weight of the reward, is used in multi-task case
move_goal_during_episode (bool): if True, the goal will be moved during episode, when it has been achieved
end_episode_after_success (bool): if True, the episode will end once the goal is reached. A True value of this
flag will overwrite the effects of flags ``switch_goal_within_episode`` and ``move_goal_during_episode``.
success_with_angle_requirement: if True then calculate the reward considering the angular requirement
additional_observation_list: a list of additonal objects to be added
use_egocentric_states (bool): For the non-image observation case, use the states transformed to
egocentric coordinate, e.g., agent's egocentric distance and direction to goal
egocentric_perception_range (float): the max range in degree to limit the agent's observation.
E.g. 60 means object is only visible when it's within +/-60 degrees in front of the agent's
direction (yaw).
"""
self._max_play_ground_size = 5 # play ground will be (-5, 5) for both x and y axes.
# TODO: Remove the default grey walls in the play ground world file,
# and insert them according to the max_play_ground_size.
# The wall should be lower, and adjustable in length. Add a custom model for that.
super().__init__(
env=env, max_steps=max_steps, reward_weight=reward_weight)
self._goal_name = goal_name
self._success_distance_thresh = success_distance_thresh
self._fail_distance_thresh = fail_distance_thresh
self._distraction_penalty_distance_thresh = distraction_penalty_distance_thresh
if distraction_penalty_distance_thresh > 0:
assert distraction_penalty_distance_thresh < success_distance_thresh
self._distraction_penalty = distraction_penalty
self._sparse_reward = sparse_reward
self._random_agent_orientation = random_agent_orientation
self._use_curriculum_training = use_curriculum_training
self._curriculum_distractions = curriculum_distractions
self._curriculum_target_angle = curriculum_target_angle
self._switch_goal_within_episode = switch_goal_within_episode
if curriculum_target_angle:
self._random_angle = 60
self._start_range = start_range
self._is_full_range_in_curriculum = False
self._random_goal = random_goal
if random_goal and goal_name not in distraction_list:
distraction_list.append(goal_name)
self._distraction_list = distraction_list
self._object_list = distraction_list
if goal_name and goal_name not in distraction_list:
self._object_list.append(goal_name)
self._goals = self._object_list
self._move_goal_during_episode = move_goal_during_episode
self._end_episode_after_success = end_episode_after_success
self._success_with_angle_requirement = success_with_angle_requirement
if not additional_observation_list:
additional_observation_list = self._object_list
self._additional_observation_list = additional_observation_list
self._pos_list = list(
itertools.product(
range(-self._max_play_ground_size, self._max_play_ground_size),
range(-self._max_play_ground_size,
self._max_play_ground_size)))
self._pos_list.remove((0, 0))
self._polar_coord = polar_coord
self._use_egocentric_states = use_egocentric_states
self._egocentric_perception_range = egocentric_perception_range
if self.should_use_curriculum_training():
self._orig_random_range = random_range
self._random_range = start_range
self._max_reward_q_length = max_reward_q_length
self._q = deque(maxlen=max_reward_q_length)
self._reward_thresh_to_increase_range = reward_thresh_to_increase_range
self._increase_range_by_percent = increase_range_by_percent
self._percent_full_range_in_curriculum = percent_full_range_in_curriculum
angle_str = ""
if curriculum_target_angle:
angle_str = ", start_angle {}".format(self._random_angle)
logging.info(
"start_range %f%s, reward_thresh_to_increase_range %f",
self._start_range, angle_str,
self._reward_thresh_to_increase_range)
else:
self._random_range = random_range
self.task_vocab += self._object_list
self._env.insert_model_list(self._object_list)
def should_use_curriculum_training(self):
return (self._use_curriculum_training
and self._start_range >= self._success_distance_thresh * 1.2)
def _push_reward_queue(self, value):
if (not self.should_use_curriculum_training()
) or self._is_full_range_in_curriculum:
return
self._q.append(value)
if (value > 0 and len(self._q) == self._max_reward_q_length
and sum(self._q) >= self._max_reward_q_length *
self._reward_thresh_to_increase_range):
if self._curriculum_target_angle:
self._random_angle += 20
logging.info("Raising random_angle to %d", self._random_angle)
if (not self._curriculum_target_angle or self._random_angle > 360):
self._random_angle = 60
new_range = min((1. + self._increase_range_by_percent) *
self._random_range, self._orig_random_range)
if self._random_range < self._orig_random_range:
logging.info("Raising random_range to %f", new_range)
self._random_range = new_range
self._q.clear()
def get_random_range(self):
return self._random_range
def pick_goal(self):
if self._random_goal:
random_id = random.randrange(len(self._goals))
self.set_goal_name(self._goals[random_id])
def _get_agent_loc(self):
loc, agent_dir = self._agent.get_pose()
if self._agent.type.find('icub') != -1:
# For agent icub, we need to use the average pos here
loc = ICubAuxiliaryTask.get_icub_extra_obs(self._agent)[:3]
loc = np.array(loc)
return loc, agent_dir
def run(self):
""" Start a teaching episode for this task. """
agent_sentence = yield
self._agent.reset()
if self._random_agent_orientation:
loc, agent_dir = self._agent.get_pose()
self._agent.set_pose((loc, (agent_dir[0], agent_dir[1],
2 * math.pi * random.random())))
loc, agent_dir = self._agent.get_pose()
loc = np.array(loc)
self._random_move_objects()
self.pick_goal()
goal = self._world.get_model(self._goal_name)
self._move_goal(goal, loc, agent_dir)
steps_since_last_reward = 0
prev_min_dist_to_distraction = 100
while steps_since_last_reward < self._max_steps:
steps_since_last_reward += 1
loc, agent_dir = self._get_agent_loc()
goal_loc, _ = goal.get_pose()
goal_loc = np.array(goal_loc)
dist = np.linalg.norm(loc - goal_loc)
# dir from get_pose is (roll, pitch, yaw)
dir = np.array([math.cos(agent_dir[2]), math.sin(agent_dir[2])])
goal_dir = (goal_loc[0:2] - loc[0:2]) / dist
dot = sum(dir * goal_dir)
distraction_penalty, prev_min_dist_to_distraction = (
self._get_distraction_penalty(loc, dot,
prev_min_dist_to_distraction))
if dist < self._success_distance_thresh and (
not self._success_with_angle_requirement or dot > 0.707):
# within 45 degrees of the agent direction
reward = 1.0 - distraction_penalty
self._push_reward_queue(max(reward, 0))
logging.debug("yielding reward: " + str(reward))
agent_sentence = yield TeacherAction(
reward=reward, sentence="well done",
done=self._end_episode_after_success,
success=True)
steps_since_last_reward = 0
if self._switch_goal_within_episode:
self.pick_goal()
goal = self._world.get_agent(self._goal_name)
if self._move_goal_during_episode:
self._agent.reset()
loc, agent_dir = self._get_agent_loc()
self._move_goal(goal, loc, agent_dir)
elif dist > self._initial_dist + self._fail_distance_thresh:
reward = -1.0 - distraction_penalty
self._push_reward_queue(0)
logging.debug(
"yielding reward: {}, farther than {} from goal".format(
str(reward), str(self._fail_distance_thresh)))
yield TeacherAction(
reward=reward, sentence="failed", done=True)
else:
if self._sparse_reward:
reward = 0
else:
reward = (self._prev_dist - dist) / self._initial_dist
reward = reward - distraction_penalty
if distraction_penalty > 0:
logging.debug("yielding reward: " + str(reward))
self._push_reward_queue(0)
self._prev_dist = dist
agent_sentence = yield TeacherAction(
reward=reward, sentence=self._goal_name)
reward = -1.0
logging.debug("yielding reward: {}, took more than {} steps".format(
str(reward), str(self._max_steps)))
self._push_reward_queue(0)
if self.should_use_curriculum_training():
logging.debug("reward queue len: {}, sum: {}".format(
str(len(self._q)), str(sum(self._q))))
yield TeacherAction(reward=reward, sentence="failed", done=True)
def _get_distraction_penalty(self, agent_loc, dot,
prev_min_dist_to_distraction):
"""
Calculate penalty for hitting/getting close to distraction objects
"""
distraction_penalty = 0
if (self._distraction_penalty_distance_thresh > 0
and self._distraction_list):
curr_min_dist = 100
for obj_name in self._distraction_list:
obj = self._world.get_model(obj_name)
if not obj:
continue
obj_loc, _ = obj.get_pose()
obj_loc = np.array(obj_loc)
distraction_dist = np.linalg.norm(agent_loc - obj_loc)
if (distraction_dist >=
self._distraction_penalty_distance_thresh):
continue
if obj_name == self._goal_name and dot > 0.707:
continue # correctly getting to goal, no penalty
if distraction_dist < curr_min_dist:
curr_min_dist = distraction_dist
if (prev_min_dist_to_distraction >
self._distraction_penalty_distance_thresh):
logging.debug("hitting object: " + obj_name)
distraction_penalty += self._distraction_penalty
prev_min_dist_to_distraction = curr_min_dist
return distraction_penalty, prev_min_dist_to_distraction
def _move_goal(self, goal, agent_loc, agent_dir):
"""
Move goal as well as all distraction objects to a random location.
"""
avoid_locations = [agent_loc]
loc = self._move_obj(
obj=goal,
agent_loc=agent_loc,
agent_dir=agent_dir,
is_goal=True,
avoid_locations=avoid_locations)
avoid_locations.append(loc)
distractions = OrderedDict()
for item in self._distraction_list:
if item is not self._goal_name:
distractions[item] = 1
if len(distractions) and self._curriculum_distractions:
for item, _ in distractions.items():
distraction = self._world.get_agent(item)
loc = self._move_obj(
obj=distraction,
agent_loc=agent_loc,
agent_dir=agent_dir,
is_goal=False,
avoid_locations=avoid_locations)
avoid_locations.append(loc)
def _move_obj(self,
obj,
agent_loc,
agent_dir,
is_goal=True,
avoid_locations=[]):
if (self.should_use_curriculum_training()
and self._percent_full_range_in_curriculum > 0
and random.random() < self._percent_full_range_in_curriculum):
range = self._orig_random_range
self._is_full_range_in_curriculum = is_goal
else:
range = self._random_range
self._is_full_range_in_curriculum = False
attempts = 0
while True:
attempts += 1
dist = random.random() * range
if self._curriculum_target_angle:
angle_range = self._random_angle
else:
angle_range = 360
angle = math.radians(
math.degrees(agent_dir[2]) + random.random() * angle_range -
angle_range / 2)
loc = (dist * math.cos(angle), dist * math.sin(angle),
0) + agent_loc
if not self._polar_coord:
loc = np.asarray((random.random() * range - range / 2,
random.random() * range - range / 2, 0))
self._initial_dist = np.linalg.norm(loc - agent_loc)
satisfied = True
if (abs(loc[0]) > self._max_play_ground_size or abs(loc[1]) >
self._max_play_ground_size): # not within walls
satisfied = False
for avoid_loc in avoid_locations:
dist = np.linalg.norm(loc - avoid_loc)
if dist < self._success_distance_thresh:
satisfied = False
break
if satisfied or attempts > 10000:
if not satisfied:
logging.warning(
"Took forever to find satisfying " +
"object location. " +
"agent_loc: {}, range: {}, max_size: {}.".format(
str(agent_loc), str(range),
str(self._max_play_ground_size)))
break
self._prev_dist = self._initial_dist
obj.reset()
obj.set_pose((loc, (0, 0, 0)))
return loc
def _random_move_objects(self, random_range=10.0):
obj_num = len(self._object_list)
obj_pos_list = random.sample(self._pos_list, obj_num)
for obj_id in range(obj_num):
model_name = self._object_list[obj_id]
loc = (obj_pos_list[obj_id][0], obj_pos_list[obj_id][1], 0)
pose = (np.array(loc), (0, 0, 0))
self._world.get_model(model_name).set_pose(pose)
def get_goal_name(self):
"""
Args:
None
Returns:
Goal's name at this episode
"""
return self._goal_name
def set_goal_name(self, goal_name):
"""
Args:
Goal's name
Returns:
None
"""
logging.debug('Setting Goal to %s', goal_name)
self._goal_name = goal_name
def task_specific_observation(self, agent):
"""
Args:
agent (GazeboAgent): the agent
Returns:
np.array, the observations of the task for non-image case
"""
goal = self._world.get_model(self._goal_name)
goal_first = not agent._with_language
if goal_first: # put goal first
pose = np.array(goal.get_pose()[0]).flatten()
else: # has language input, don't put goal first
pose = None
for name in self._additional_observation_list:
if goal_first and name == self._goal_name:
continue
obj = self._world.get_model(name)
obj_pos = np.array(obj.get_pose()[0]).flatten()
if pose is None:
pose = obj_pos
else:
pose = np.concatenate((pose, obj_pos), axis=0)
agent_pose = np.array(agent.get_pose()).flatten()
if self._use_egocentric_states:
yaw = agent_pose[5]
# adds egocentric velocity input
vx, vy, vz, a1, a2, a3 = np.array(agent.get_velocities()).flatten()
rvx, rvy = agent.get_egocentric_cord_2d(vx, vy, yaw)
obs = [rvx, rvy, vz, a1, a2, a3]
# adds objects' (goal's as well as distractions') egocentric
# coordinates to observation
while len(pose) > 1:
x = pose[0] - agent_pose[0]
y = pose[1] - agent_pose[1]
rotated_x, rotated_y = agent.get_egocentric_cord_2d(x, y, yaw)
if self._egocentric_perception_range > 0:
dist = math.sqrt(rotated_x * rotated_x +
rotated_y * rotated_y)
rotated_x /= dist
rotated_y /= dist
magnitude = 1. / dist
if rotated_x < np.cos(
self._egocentric_perception_range / 180. * np.pi):
rotated_x = 0.
rotated_y = 0.
magnitude = 0.
obs.extend([rotated_x, rotated_y, magnitude])
else:
obs.extend([rotated_x, rotated_y])
pose = pose[3:]
obs = np.array(obs)
else:
agent_vel = np.array(agent.get_velocities()).flatten()
joints_states = agent.get_internal_states()
obs = np.concatenate((pose, agent_pose, agent_vel, joints_states),
axis=0)
return obs
@gin.configurable
class ICubAuxiliaryTask(Task):
"""
An auxiliary task spicified for iCub, to keep the agent from falling down
and to encourage the agent walk
"""
def __init__(self,
env,
max_steps,
target=None,
agent_init_pos=(0, 0),
agent_pos_random_range=0,
reward_weight=1.0):
"""
Args:
env (gym.Env): an instance of Environment
max_steps (int): episode will end in so many steps
reward_weight (float): the weight of the reward, should be tuned
accroding to reward range of other tasks
target (string): this is the target icub should face towards, since
you may want the agent interact with something
agent_init_pos (tuple): the expected initial position of the agent
pos_random_range (float): random range of the initial position
"""
super().__init__(
env=env, max_steps=max_steps, reward_weight=reward_weight)
self.task_vocab = ['icub']
self._target_name = target
self._pre_agent_pos = np.array([0, 0, 0], dtype=np.float32)
self._agent_init_pos = agent_init_pos
self._random_range = agent_pos_random_range
if self._target_name:
self._target = self._world.get_model(self._target_name)
with open(
os.path.join(social_bot.get_model_dir(), "agent_cfg.json"),
'r') as cfg_file:
agent_cfgs = json.load(cfg_file)
self._joints = agent_cfgs[self._agent.type]['control_joints']
def run(self):
""" Start a teaching episode for this task. """
self._pre_agent_pos = self.get_icub_extra_obs(self._agent)[:3]
agent_sentence = yield
done = False
# set icub random initial pose
x = self._agent_init_pos[0] + random.random() * self._random_range
y = self._agent_init_pos[1] + random.random() * self._random_range
orient = (random.random() - 0.5) * np.pi
if self._target_name and random.randint(0, 1) == 0:
# a trick from roboschool humanoid flag run, important to learn to steer
pos = np.array([x, y, 0.6])
orient = self._get_angle_to_target(
self._agent, pos, self._agent.type + '::root_link', np.pi)
self._agent.set_pose((np.array([x, y, 0.6]), np.array([0, 0, orient])))
while not done:
# reward for not falling (alive reward)
agent_height = np.array(
self._agent.get_link_pose(self._agent.type + '::head'))[0][2]
done = agent_height < 0.7 # fall down
standing_reward = agent_height
# movement cost, to avoid uncessary movements
joint_pos = []
for joint_name in self._joints:
joint_state = self._agent.get_joint_state(joint_name)
joint_pos.append(joint_state.get_positions())
joint_pos = np.array(joint_pos).flatten()
movement_cost = np.sum(np.abs(joint_pos)) / joint_pos.shape[0]
# orientation cost, the agent should face towards the target
if self._target_name:
agent_pos = self.get_icub_extra_obs(self._agent)[:3]
head_angle = self._get_angle_to_target(
self._agent, agent_pos, self._agent.type + '::head')
root_angle = self._get_angle_to_target(
self._agent, agent_pos, self._agent.type + '::root_link')
l_foot_angle = self._get_angle_to_target(
self._agent, agent_pos,
self._agent.type + '::l_leg::l_foot', np.pi)
r_foot_angle = self._get_angle_to_target(
self._agent, agent_pos,
self._agent.type + '::r_leg::r_foot', np.pi)
orient_cost = (np.abs(head_angle) + np.abs(root_angle) +
np.abs(l_foot_angle) + np.abs(r_foot_angle)) / 4
else:
orient_cost = 0
# sum all
reward = standing_reward - 0.5 * movement_cost - 0.2 * orient_cost
agent_sentence = yield TeacherAction(reward=reward, done=done)
@staticmethod
def get_icub_extra_obs(agent):
"""
Get contacts_to_ground, pose of key ponit of icub and center of them.
A static method, other task can use this to get additional icub info.
Args:
the agent
Returns:
np.array of the extra observations of icub, including average pos
"""
root_pose = np.array(
agent.get_link_pose(agent.name + '::root_link')).flatten()
chest_pose = np.array(
agent.get_link_pose(agent.name + '::chest')).flatten()
l_foot_pose = np.array(
agent.get_link_pose(agent.name + '::l_leg::l_foot')).flatten()
r_foot_pose = np.array(
agent.get_link_pose(agent.name + '::r_leg::r_foot')).flatten()
foot_contacts = np.array([
agent.get_contacts("l_foot_contact_sensor",
'ground_plane::link::collision'),
agent.get_contacts("r_foot_contact_sensor",
'ground_plane::link::collision')
]).astype(np.float32)
average_pos = np.sum([
root_pose[0:3], chest_pose[0:3], l_foot_pose[0:3], r_foot_pose[0:3]
],
axis=0) / 4.0
obs = np.concatenate((average_pos, root_pose, chest_pose, l_foot_pose,
r_foot_pose, foot_contacts))
return obs
def _get_angle_to_target(self, aegnt, agent_pos, link_name, offset=0):
""" Get angle from a icub link, relative to target.
Args:
agent (GazeboAgent): the agent
agent_pos (numpay array): the pos of agent
link_name (string): link name of the agent
offset (float): the yaw offset of link, for some links have initial internal rotation
Returns:
float, angle to target
"""
yaw = aegnt.get_link_pose(link_name)[1][2]
yaw = (yaw + offset) % (
2 * np.pi
) - np.pi # model icub has a globle built-in 180 degree rotation
target_pos, _ = self._target.get_pose()
walk_target_theta = np.arctan2(target_pos[1] - agent_pos[1],
target_pos[0] - agent_pos[0])
angle_to_target = walk_target_theta - yaw
# wrap the range to [-pi, pi)
angle_to_target = (angle_to_target + np.pi) % (2 * np.pi) - np.pi
return angle_to_target
def task_specific_observation(self, agent):
"""
Args:
agent (GazeboAgent): the agent
Returns:
np.array, the observations of the task for non-image case
"""
icub_extra_obs = self.get_icub_extra_obs(agent)
if self._target_name:
agent_pos = icub_extra_obs[:3]
# TODO: be compatible with calling multiple times in one env step
agent_speed = (
agent_pos - self._pre_agent_pos) / self._env.get_step_time()
self._pre_agent_pos = agent_pos
yaw = agent.get_link_pose(agent.type + '::root_link')[1][2]
angle_to_target = self._get_angle_to_target(
agent, agent_pos, agent.type + '::root_link')
rot_minus_yaw = np.array([[np.cos(-yaw), -np.sin(-yaw), 0],
[np.sin(-yaw),
np.cos(-yaw), 0], [0, 0, 1]])
vx, vy, vz = np.dot(rot_minus_yaw,
agent_speed) # rotate to agent view
orientation_ob = np.array(
[np.sin(angle_to_target),
np.cos(angle_to_target), vx, vy, vz],
dtype=np.float32)
return np.concatenate([icub_extra_obs] + [orientation_ob])
else:
return icub_extra_obs
@gin.configurable
class KickingBallTask(Task):
"""
A simple task to kick a ball so that it rolls into the gate. An
optional reward shaping can be used to guide the agent run to the ball first:
Agent will receive 100 when succefully kick the ball into the gate.
Agent will receive the speed of getting closer to the ball before touching the
ball within 45 degrees of agent direction. The reward is trunked within
parameter target_speed.
Agent will receive negative normalized distance from ball to gate center
after touching the ball within the direction. An offset of
"target_speed + 1" is included since touching the ball must be better
than not touching.
If no reward shaping, then the agent will only get -1/0/1 rewards.
"""
def __init__(self,
env,
max_steps,
gate_distance=5.0,
random_range=4.0,
target_speed=2.0,
reward_weight=1.0,
distraction_list=[
'coke_can', 'table', 'car_wheel',
#'plastic_cup', 'beer'
],
distraction_penalty_distance_thresh=0,
distraction_penalty=0.5,
curriculum_distractions=True,
sparse_reward=False):
"""
Args:
env (gym.Env): an instance of Environment
max_steps (int): episode will end if the task is not achieved in so
many steps
gate_distance (float): the distance from the gate to the ball on
average. A smaller distance makes the kicking task easier.
random_range (float): the ball's random position range
target_speed (float): the target speed runing to the ball. The agent will receive no more
higher reward when its speed is higher than target_speed.
reward_weight (float): the weight of the reward
sparse_reward (bool): if True, the agent will only get -1/0/1 rewards.
"""
super().__init__(
env=env, max_steps=max_steps, reward_weight=reward_weight)
self._max_play_ground_size = 5 # play ground will be (-5, 5) for both x and y axes.
self._random_range = random_range
self._target_speed = target_speed
self._sparse_reward = sparse_reward
self._gate_distance = gate_distance
self._distraction_list = distraction_list
self._distraction_penalty_distance_thresh = distraction_penalty_distance_thresh
if distraction_penalty_distance_thresh > 0:
assert distraction_penalty_distance_thresh < success_distance_thresh
self._distraction_penalty = distraction_penalty
self._curriculum_distractions = curriculum_distractions
# By looking up the 'robocup_3Dsim_goal' model file:
self._gate_width = 2.1
self._gate_post_radius = 0.05
self._env.insert_model(
model="robocup_3Dsim_goal",
name="gate",
pose="-%s 0 0 0 -0 3.14159265" % gate_distance)
self._env.insert_model(model="ball", pose="1.50 1.5 0.2 0 -0 0")
self._env.insert_model_list(self._distraction_list)
def run(self):
""" Start a teaching episode for this task. """
agent_sentence = yield
gate = self._world.get_model("gate")
ball = self._world.get_model("ball")
gate_loc, dir = gate.get_pose()
self._move_ball(ball)
agent_loc, dir = self._agent.get_pose()
ball_loc, _ = ball.get_pose()
prev_dist = np.linalg.norm(
np.array(ball_loc)[:2] - np.array(agent_loc)[:2])
init_gate_dist = np.linalg.norm(
np.array(ball_loc)[:2] - np.array(gate_loc)[:2])
steps = 0
hitted_ball = False
prev_min_dist_to_distraction = 100
while steps < self._max_steps:
steps += 1
if not hitted_ball and not self._sparse_reward:
agent_loc, dir = self._agent.get_pose()
if self._agent.type.find('icub') != -1:
# For agent icub, we need to use the average pos here
agent_loc = ICubAuxiliaryTask.get_icub_extra_obs(
self._agent)[:3]
ball_loc, _ = ball.get_pose()
dist = np.linalg.norm(
np.array(ball_loc)[:2] - np.array(agent_loc)[:2])
# trunk progress_reward to target_speed
progress_reward = min(
self._target_speed,
(prev_dist - dist) / self._env.get_step_time())
prev_dist = dist
if dist < 0.3:
dir = np.array([math.cos(dir[2]), math.sin(dir[2])])
gate_dir = (np.array(ball_loc[0:2]) - np.array(
agent_loc[0:2])) / dist
dot = sum(dir * gate_dir)
if dot > 0.707:
# within 45 degrees of the agent direction
hitted_ball = True
agent_sentence = yield TeacherAction(reward=progress_reward)
else:
agent_loc, dir = self._get_agent_loc()
gate_loc, _ = gate.get_pose()
ball_loc, _ = ball.get_pose()
dist = np.linalg.norm(
np.array(ball_loc)[:2] - np.array(gate_loc)[:2])
dir = np.array([math.cos(dir[2]), math.sin(dir[2])])
gate_dir = (np.array(ball_loc[0:2]) - np.array(
agent_loc[0:2])) / dist
dot = sum(dir * gate_dir)
distraction_penalty, prev_min_dist_to_distraction = (
self._get_distraction_penalty(agent_loc, dot,
prev_min_dist_to_distraction))
if self._in_the_gate(ball_loc):
if self._sparse_reward:
reward = 1.
else:
reward = 100.
reward = reward - distraction_penalty
agent_sentence = yield TeacherAction(
reward=reward, sentence="well done", done=True,
success=True)
else:
if self._sparse_reward:
reward = 0.
else:
reward = self._target_speed + 3 - dist / init_gate_dist
reward = reward - distraction_penalty
agent_sentence = yield TeacherAction(
reward=reward)
yield TeacherAction(reward=-1.0, sentence="failed", done=True)
def task_specific_observation(self, agent):
"""
Args:
agent (GazeboAgent): the agent
Returns:
np.array, the observations of the task for non-image case
"""
obj_poses = self._get_states_of_model_list(['ball', 'gate'])
agent_pose = np.array(agent.get_pose()).flatten()
agent_vel = np.array(agent.get_velocities()).flatten()
joints_states = agent.get_internal_states()
obs = np.concatenate((obj_poses, agent_pose, agent_vel, joints_states),
axis=0)
return obs
def _in_the_gate(self, ball_loc):
pass_gate_line = (ball_loc[0] < -self._gate_distance)
half_width = self._gate_width / 2 - self._gate_post_radius # =1.0
within_gate = (half_width > ball_loc[1] > -half_width)
return (pass_gate_line and within_gate)
def _move_ball(self, ball):
range = self._random_range
while True:
loc = (random.random() * range - range / 2,
random.random() * range - range / 2, 0)
if not self._in_the_gate(loc):
break
ball.set_pose((loc, (0, 0, 0)))
agent_loc, agent_dir = self._get_agent_loc()
avoid_locations = [agent_loc, np.asarray(loc)]
distractions = OrderedDict()
for item in self._distraction_list:
distractions[item] = 1
if len(distractions) and self._curriculum_distractions:
for item, _ in distractions.items():
distraction = self._world.get_agent(item)
loc = self._move_obj(
obj=distraction,
agent_loc=agent_loc,
agent_dir=agent_dir,
is_goal=False,
avoid_locations=avoid_locations)
avoid_locations.append(loc)
def _get_agent_loc(self):
loc, agent_dir = self._agent.get_pose()
if self._agent.type.find('icub') != -1:
# For agent icub, we need to use the average pos here
loc = ICubAuxiliaryTask.get_icub_extra_obs(self._agent)[:3]
loc = np.array(loc)
return loc, agent_dir
def _move_obj(self,
obj,
agent_loc,
agent_dir,
is_goal=True,
avoid_locations=[]):
range = self._random_range
self._is_full_range_in_curriculum = False
attempts = 0
while True:
attempts += 1
dist = random.random() * range
angle_range = 360
angle = math.radians(
math.degrees(agent_dir[2]) + random.random() * angle_range -
angle_range / 2)
loc = (dist * math.cos(angle), dist * math.sin(angle),
0) + agent_loc
self._initial_dist = np.linalg.norm(loc - agent_loc)
satisfied = True
if (abs(loc[0]) > self._max_play_ground_size or abs(loc[1]) >
self._max_play_ground_size): # not within walls
satisfied = False
for avoid_loc in avoid_locations:
dist = np.linalg.norm(loc - avoid_loc)
if dist < 0.5:
satisfied = False
break
if satisfied or attempts > 10000:
if not satisfied:
logging.warning(
"Took forever to find satisfying " +
"object location. " +
"agent_loc: {}, range: {}, max_size: {}.".format(
str(agent_loc), str(range),
str(self._max_play_ground_size)))
break
self._prev_dist = self._initial_dist
obj.reset()
obj.set_pose((loc, (0, 0, 0)))
return loc
def _get_distraction_penalty(self, agent_loc, dot,
prev_min_dist_to_distraction):
"""
Calculate penalty for hitting/getting close to distraction objects
"""
distraction_penalty = 0
if (self._distraction_penalty_distance_thresh > 0
and self._distraction_list):
curr_min_dist = 100
for obj_name in self._distraction_list:
obj = self._world.get_model(obj_name)
if not obj:
continue
obj_loc, _ = obj.get_pose()
obj_loc = np.array(obj_loc)
distraction_dist = np.linalg.norm(agent_loc - obj_loc)
if (distraction_dist >=
self._distraction_penalty_distance_thresh):
continue
if distraction_dist < curr_min_dist:
curr_min_dist = distraction_dist
if (prev_min_dist_to_distraction >
self._distraction_penalty_distance_thresh):
logging.debug("hitting object: " + obj_name)
distraction_penalty += self._distraction_penalty
prev_min_dist_to_distraction = curr_min_dist
return distraction_penalty, prev_min_dist_to_distraction
@gin.configurable
class Reaching3D(Task):
"""
A task to reach a random 3D position with the end effector of a robot arm.
An optional distance based reward shaping can be used.
This task is only compatible with Agent kuka_lwr_4plus.
"""
compatible_agents = ['kuka_lwr_4plus']
def __init__(self,
env,
max_steps,
random_range=0.65,
success_distance_thresh=0.1,
reward_shaping=True,
reward_weight=1.0):
"""
Args:
env (gym.Env): an instance of Environment
max_steps (int): episode will end if not reaching goal in so many steps
random_range (float): the goal's random position range
success_distance_thresh (float): the goal is reached if it's within this distance to the agent
reward_shaping (bool): if false, the reward is -1/0/1, otherwise the 0 case will be replaced
with negative distance to goal.
reward_weight (float): the weight of the reward
"""
super().__init__(
env=env, max_steps=max_steps, reward_weight=reward_weight)
assert self._agent.type in self.compatible_agents, "Reaching3D Task only support kuka_lwr_4plus for now"
self._reaching_link = '::lwr_arm_6_link'
self._random_range = random_range
self._success_distance_thresh = success_distance_thresh
self._reward_shaping = reward_shaping
self._env.insert_model(model="goal_indicator")
self._goal = self._world.get_model('goal_indicator')
def run(self):
""" Start a teaching episode for this task. """
agent_sentence = yield
goal_loc, _ = self._goal.get_pose()
reaching_loc, _ = self._agent.get_link_pose(self._agent.type +
self._reaching_link)
self._move_goal(self._goal, np.array(reaching_loc))
steps = 0
while steps < self._max_steps:
steps += 1
reaching_loc, _ = self._agent.get_link_pose(self._agent.type +
self._reaching_link)
goal_loc, _ = self._goal.get_pose()
dist = np.linalg.norm(np.array(goal_loc) - np.array(reaching_loc))
if dist < self._success_distance_thresh:
agent_sentence = yield TeacherAction(
reward=1.0, sentence="well done", done=True, success=True)
else:
reward = (-dist) if self._reward_shaping else 0
agent_sentence = yield TeacherAction(reward=reward, done=False)
yield TeacherAction(reward=-1.0, sentence="failed", done=True)
def _move_goal(self, goal, agent_loc):
while True:
r = 0.15 + random.random() * self._random_range
theta = random.random() * 2 * np.pi
phi = (random.random() - 0.5) * np.pi
loc = (r * np.sin(phi) * np.cos(theta),
r * np.sin(phi) * np.sin(theta), 0.2 + np.cos(phi))
if np.linalg.norm(loc - agent_loc) > self._success_distance_thresh:
break
goal.set_pose((loc, (0, 0, 0)))
def task_specific_observation(self, agent):
"""
Args:
agent (GazeboAgent): the agent
Returns:
np.array, the observations of the task for non-image case
"""
goal_loc, _ = self._goal.get_pose()
reaching_loc, _ = agent.get_link_pose(self._agent.type +
self._reaching_link)
joints_states = agent.get_internal_states()
#obs = np.concatenate((goal_loc, reaching_loc, joints_states), axis=0)
obs = np.concatenate((goal_loc, joints_states), axis=0)
return obs
@gin.configurable
class PickAndPlace(Task):
"""
A task to grip an object (a wood cube), move and then place it to the target position.
A simple reward shaping can be used to guide the agent to grip cube and move to the position:
If object is not being gripped, the reward is the gripper contacts, wether object is off the
ground, and negative distance between object and gripper
If being gripped, an extra truncked negative distance from object to goal is added.
If suceesfully placed, a reward of 100 is given.
This task is only compatible with Agent youbot_noplugin.
"""
compatible_agents = ['youbot_noplugin']
def __init__(self,
env,
max_steps,
object_random_range=0.6,
place_to_random_range=0.6,
min_distance=0.3,
object_half_height=0.025,
success_distance_thresh=0.05,
reward_shaping=False,
reward_weight=1.0):
"""
Args:
env (gym.Env): an instance of Environment
max_steps (int): episode will end if not complet the task in so many steps, recommend to be 150
for agent youbot_noplugin and object 5cm cube
object_random_range (float): the object's random position range to the agent
place_to_random_range (float): the range of target placing position to the object
min_distance (float): the min_distance of the placing position to the object
object_half_height (float): Note that model for stacking task should be of no offset inside the model.
This means an initial pose of 0 height makes half of the obejct underground. This specifies the
initial height of object's center, e.g, half of the edge length of a cube, or radius of a ball.
success_distance_thresh (float): consider success if the target is within this distance to the
goal position
reward_shaping (bool): if false, the reward is -1/0/1, otherwise the 0 case will be replaced
with shapped reward.
reward_weight (float): the weight of the reward
"""
super().__init__(
env=env, max_steps=max_steps, reward_weight=reward_weight)
assert self._agent.type in self.compatible_agents, "PickAndPlace Task \
only support youbot_noplugin for now"
self._palm_link = 'youbot_noplugin::gripper_palm_link'
self._finger_link_l = 'youbot_noplugin::gripper_finger_link_l'
self._finger_link_r = 'youbot_noplugin::gripper_finger_link_r'
self._object_name = 'wood_cube_5cm_without_offset'
self._object_collision_name = 'wood_cube_5cm_without_offset::link::collision'
self._object_half_height = object_half_height
self._object_random_range = object_random_range
self._place_to_random_range = place_to_random_range
self._min_distance = min_distance
self._success_distance_thresh = success_distance_thresh
self._reward_shaping = reward_shaping
self._env.insert_model_list([self._object_name, 'goal_indicator'])
self._goal = self._world.get_model('goal_indicator')
self._object = self._world.get_model(self._object_name)
def run(self):
""" Start a teaching episode for this task. """
agent_sentence = yield
obj_pos = self._random_move_object(
target=self._object,
random_range=self._object_random_range,
center_pos=np.array([0, 0]),
min_distance=self._min_distance,
height=self._object_half_height)
goal_pos = self._random_move_object(
target=self._goal,
random_range=self._place_to_random_range,
center_pos=obj_pos[:2],
min_distance=self._min_distance,
height=self._object_half_height)
steps = 0
while steps < self._max_steps:
steps += 1
# get positions
obj_pos, _ = self._object.get_pose()
obj_height = obj_pos[2]
finger_l_pos, _ = self._agent.get_link_pose(self._finger_link_l)
finger_r_pos, _ = self._agent.get_link_pose(self._finger_link_r)
finger_pos = (
np.array(finger_l_pos) + np.array(finger_r_pos)) / 2.0
# get contacts
l_contact = self._agent.get_contacts('finger_cnta_l',
self._object_collision_name)
r_contact = self._agent.get_contacts('finger_cnta_r',
self._object_collision_name)
# check distance and contacts
obj_dist = np.linalg.norm(np.array(obj_pos) - goal_pos)
obj_dist_xy = np.linalg.norm(np.array(obj_pos)[:2] - goal_pos[:2])
dist_z = abs(obj_height - goal_pos[2])
palm_dist = np.linalg.norm(
np.array(obj_pos) - np.array(finger_pos))
obj_lifted = obj_height / self._object_half_height - 1.0
gripping_feature = 0.25 * l_contact + 0.25 * r_contact + min(
obj_lifted, 0.5) # encourge to lift the object by obj_height
gripping = (gripping_feature > 0.99)
# success condition, minus an offset of object height on z-axis
if gripping and obj_dist_xy < self._success_distance_thresh and (
dist_z - self._object_half_height <
self._success_distance_thresh):
logging.debug("object has been successfuly placed")
reward = 200.0 if self._reward_shaping else 1.0
agent_sentence = yield TeacherAction(
reward=reward, sentence="well done", done=True,
success=True)
else:
#shaped reward modification
#0 if gripping else (gripping_feature - palm_dist)
shaped_reward = max(
2.0 - obj_dist / self._place_to_random_range,
1.0) if gripping else (gripping_feature - palm_dist)
reward = shaped_reward if self._reward_shaping else 0
agent_sentence = yield TeacherAction(reward=reward, done=False)
yield TeacherAction(reward=-1.0, sentence="failed", done=True)
def task_specific_observation(self, agent):
"""
Args:
agent (GazeboAgent): the agent
Returns:
np.array, the observations of the task for non-image case
"""
# Use 3 position of the links to uniquely determine the 6 + 2 DoF gripper
finger_l_pos, _ = agent.get_link_pose(self._finger_link_l)
finger_r_pos, _ = agent.get_link_pose(self._finger_link_r)
palm_pos, _ = agent.get_link_pose(self._palm_link)
# goal pos and object pos
goal_pos, _ = self._goal.get_pose()
obj_pos, obj_rot = self._object.get_pose()
# contacts
finger_contacts = np.array([
agent.get_contacts('finger_cnta_l', self._object_collision_name),
agent.get_contacts('finger_cnta_r', self._object_collision_name)
]).astype(np.float32)
# agent self states
agent_pose = np.array(agent.get_pose()).flatten()
joints_states = agent.get_internal_states()
obs = np.array(
[goal_pos, obj_pos, obj_rot, finger_l_pos, finger_r_pos,
palm_pos]).flatten()
return np.concatenate(
(obs, finger_contacts, agent_pose, joints_states), axis=0)
@gin.configurable
class Stack(Task):
"""
A task to stack several wood cubes. The agent need to grasp the cube and
stack it one by one, until all of them are stacked together.
The number of cubes can be configured by objects_num. Distribution of
cubes' initial position is configured by average_distance_max,
average_distance_min and objects_range.
Success condition is that all objects are stacked, and the gripper of agent
leave the cubes (no contacts to cubes) for 5 time steps.
The agent will receive a reward of 1 when success if reward shaping is not
used. If reward shaping is used, the reward is the stacking number plus:
if not gripping, negative distance to the closest obj not being stacked
if gripping, distance to closet stacking candidate, i.e, (x, y,
target_height). target_height is (stacked_num + 1) * object_height,
plus a margin of 0.55 * object_height
This task is only compatible with Agent youbot_noplugin.
"""
compatible_agents = ['youbot_noplugin']
def __init__(self,
env,
max_steps,
average_distance_max=0.5,
average_distance_min=0.3,
objects_num=3,
objects_range=0.25,
object_half_height=0.025,
success_distance_thresh=0.03,
reward_shaping=True,
reward_weight=1.0):
"""
Args:
env (gym.Env): an instance of Environment.
max_steps (int): episode will end if not complet the task in so
many steps.
average_distance_max (float): the max distance from the agent to
the center of the objects' initial position distribution
average_distance_min (float): the min distance from the agent to
the center of the objects' initial position distribution
objects_num (int): the number of objects to stack.
objects_range (float): the range of objects around center position.
object_half_height (float): Note that model for stacking task
should be of no offset inside the model. This means an initial
pose of 0 height makes half of the obejct underground. This
specifies the initial height of object's center, e.g, half of
the edge length of a cube, or radius of a ball.
success_distance_thresh (float): consider success if the objects'
x-y plance distance is within this threshold.
reward_shaping (bool): if false, the reward is -1/0/1, otherwise
the shapeed reward will be used.
reward_weight (float): the weight of the reward.
"""
super().__init__(
env=env, max_steps=max_steps, reward_weight=reward_weight)
assert self._agent.type in self.compatible_agents, "Stack task only \
support youbot_noplugin for now"
self._reward_shaping = reward_shaping
self._palm_link = 'youbot_noplugin::gripper_palm_link'
self._finger_link_l = 'youbot_noplugin::gripper_finger_link_l'
self._finger_link_r = 'youbot_noplugin::gripper_finger_link_r'
self._object_collision_name = '::wood_cube_5cm_without_offset::link::collision'
self._object_half_height = object_half_height
self._avg_distance_max = average_distance_max
self._avg_distance_min = average_distance_min
self._objects_num = objects_num
self._objects_range = objects_range
self._success_distance_thresh = success_distance_thresh
self._object_names = []
self._objects = []
for obj_index in range(objects_num):
name = 'wood_cube_' + str(obj_index)
self._object_names.append(name)
self._env.insert_model(
model='wood_cube_5cm_without_offset', name=name)
self._objects.append(self._world.get_model(name))
def run(self):
""" Start a teaching episode for this task. """
agent_sentence = yield
# randomly move objects
r = random.uniform(self._avg_distance_min, self._avg_distance_max)
theta = random.random() * 2 * np.pi
stacking_pos = np.array([r * np.cos(theta), r * np.sin(theta)])
for obj_index in range(self._objects_num):
self._random_move_object(
target=self._objects[obj_index],
random_range=self._objects_range,
center_pos=stacking_pos,
min_distance=0,
height=self._object_half_height)
steps = 0
succ_cnt = 0
while steps < self._max_steps:
steps += 1
# get gripper pos
finger_l_pos, _ = self._agent.get_link_pose(self._finger_link_l)
finger_r_pos, _ = self._agent.get_link_pose(self._finger_link_r)
finger_pos = (
np.array(finger_l_pos) + np.array(finger_r_pos)) / 2.0
# get object's position and contacts
obj_positions = []
l_contacts = []
r_contacts = []
for obj_index in range(self._objects_num):
obj_pos, _ = self._objects[obj_index].get_pose()
obj_positions.append(obj_pos)
l_contacts.append(1.0 * self._agent.get_contacts(
'finger_cnta_l', self._object_names[obj_index] +
self._object_collision_name))
r_contacts.append(1.0 * self._agent.get_contacts(
'finger_cnta_r', self._object_names[obj_index] +
self._object_collision_name))
# convert to ndarray
l_contacts = np.array(l_contacts)
r_contacts = np.array(r_contacts)
contacts = l_contacts + r_contacts
obj_positions = np.array(obj_positions)
obj_positions_xy = obj_positions[:, :2]
obj_heights = obj_positions[:, 2]
# get the objects in different stacking states
obj_list = np.arange(self._objects_num)
stacked_candidates = np.where(
(contacts == 0) *
(obj_heights / self._object_half_height > 1.5)
)[0] # off the ground and not being grasped, considerd as being stacked
stacked_pos = obj_positions[stacked_candidates]
top_index = None
bottom_obj = None
stacked_obj_num = 0
while (len(stacked_pos) > 0):
# find the highest object of the stack
top_index = np.argmax(stacked_pos[:, 2])
# find the bottom one within self._success_distance_thresh
bottom_obj = np.where(
(obj_heights - self._object_half_height < 0.01) *
(np.linalg.norm(
obj_positions_xy - stacked_pos[top_index][:2], axis=1)
< self._success_distance_thresh))[0]
if (len(bottom_obj) == 0):
# can not find an object below, for some reason the object
# is in the air without being grasped or stacked
stacked_pos = np.delete(stacked_pos, top_index, axis=0)
else:
# get the stacked object list in which object is
# within success_distance_thresh and without contacts
stacked_obj_num = len(
np.where((contacts == 0) * (np.linalg.norm(
obj_positions_xy -
obj_positions_xy[bottom_obj[0]][:2],
axis=1) < self._success_distance_thresh))[0]) - 1
break
# check success condition and give returns
# if reward shaping is used, the reward is the stacking number plus:
# if not gripping, - distance to the closest obj not being stacked
# if gripping, distance to closet stacking candidate:
# (x, y, target_height)
# target_height: (stacked_num + 1) * object_height,
# plus a margin 0.55 * object_height
# being_grasped: contacts are True and off the ground
target_height_by_half_obj_height = 3.1 + stacked_obj_num * 2.0
grasped_obj_index = np.where(
(l_contacts * r_contacts) *
(obj_heights / self._object_half_height > 2.0))[0]
# success flag: all objects are stacked and no contacts to gripper
succ_flag = (stacked_obj_num == self._objects_num -
1) and np.sum(contacts) == 0
succ_cnt = succ_cnt + 1 if succ_flag else 0
# give returns
if succ_cnt >= 5: # successfully stacked and leave the objects for 5 steps
logging.debug("object has been successfuly placed")
reward = 200.0 * self._objects_num if self._reward_shaping else 1.0
agent_sentence = yield TeacherAction(
reward=reward, sentence="well done", done=True)
elif len(grasped_obj_index) == 0: # nothing is being grasped
if stacked_obj_num == 0:
unstacked_obj_list = obj_list
else:
unstacked_obj_list = np.where(
np.linalg.norm(
obj_positions_xy -
obj_positions_xy[bottom_obj[0]][:2],
axis=1) >= self._success_distance_thresh)[0]
if len(unstacked_obj_list) == 0:
# all are stacked, this can hapen during the last steps before success
stage_reward = 0.5
else:
closest_obj_in_unstacked = np.argmin(
np.linalg.norm(
obj_positions[unstacked_obj_list] - finger_pos,
axis=1))
closest_obj = unstacked_obj_list[closest_obj_in_unstacked]
distance_to_closest_obj = np.linalg.norm(
obj_positions[closest_obj] - finger_pos)
lifted = obj_heights[
closest_obj] / self._object_half_height - 1.0
stage_reward = (0.5 * contacts[closest_obj] + max(
1.0 - distance_to_closest_obj / self._avg_distance_max,
0) + min(lifted, 1.0)) / 3.0
reward = stacked_obj_num + 0.5 * stage_reward if self._reward_shaping else 0
agent_sentence = yield TeacherAction(reward=reward, done=False)
else: # an object is being grasped
if stacked_obj_num == 0: # any target on the ground is fine, prefer the closest one
target_list = np.delete(obj_list, grasped_obj_index[0])
target_id = np.argmin(
np.linalg.norm(
obj_positions[target_list] -
obj_positions[grasped_obj_index[0]],
axis=1))
target_pos = obj_positions[target_list][target_id]
else:
target_id = bottom_obj[0]
target_pos = obj_positions[target_id]
dist_xy = np.linalg.norm(
obj_positions[grasped_obj_index[0]][:2] - target_pos[:2])
dist_z = abs((obj_positions[grasped_obj_index[0]][2] /
self._object_half_height) /
target_height_by_half_obj_height - 1.0)
stage_reward = 1.0 - min(
dist_xy / self._objects_range + dist_z, 2.0) / 2.0
reward = stacked_obj_num + 0.5 + 0.5 * stage_reward if self._reward_shaping else 0
agent_sentence = yield TeacherAction(reward=reward, done=False)
yield TeacherAction(reward=-1.0, sentence="failed", done=True)
def task_specific_observation(self, agent):
"""
Args:
agent (GazeboAgent): the agent
Returns:
np.array, the observations of the task for non-image case
"""
# object poses and contacts
obj_poses = []
l_contacts = []
r_contacts = []
for obj_index in range(self._objects_num):
# get object's position
obj_pos, obj_rot = self._objects[obj_index].get_pose()
obj_poses.append(obj_pos)
obj_poses.append(obj_rot)
# get contacts
l_contacts.append(1.0 * self._agent.get_contacts(
'finger_cnta_l',
self._object_names[obj_index] + self._object_collision_name))
r_contacts.append(1.0 * self._agent.get_contacts(
'finger_cnta_r',
self._object_names[obj_index] + self._object_collision_name))
obj_poses = np.array(obj_poses).flatten()
l_contacts = np.array(l_contacts)
r_contacts = np.array(r_contacts)
contacts = l_contacts + r_contacts
# Use 3 points to uniquely determine the 6 + 2 DoF gripper
finger_l_pos, _ = agent.get_link_pose(self._finger_link_l)
finger_r_pos, _ = agent.get_link_pose(self._finger_link_r)
palm_pos, _ = agent.get_link_pose(self._palm_link)
gripper_states = np.array([finger_l_pos, finger_r_pos,
palm_pos]).flatten()
# agent self states
agent_pose = np.array(agent.get_pose()).flatten()
joints_states = agent.get_internal_states()
return np.concatenate(
(agent_pose, joints_states, gripper_states, contacts, obj_poses),
axis=0)
```
|
{
"source": "jesbu1/spinningup",
"score": 2
}
|
#### File: envs/metaworld/MT10.py
```python
from metaworld.benchmarks import MT10
from gym.envs.metaworld.base import MTEnv
import gym
def MT10HelperEnv():
return MTEnv(MT10.get_train_tasks())
```
|
{
"source": "jesbu1/viper",
"score": 2
}
|
#### File: viper/pong/karel.py
```python
from karel_env.karel_gym_env import KarelGymEnv
from ..core.rl import *
from .karel import *
from ..core.dt import *
from ..util.log import *
#from .custom_dqn import DQN
from .ppo import PPO
from collections import Iterable
import random
from itertools import product
import gym
from gym.spaces import Box
import sys
class KarelEnvWrapper(gym.Wrapper):
def __init__(self, env=None, op=[2, 0, 1]):
"""
Transpose observation space for images
"""
gym.Wrapper.__init__(self, env)
assert len(op) == 3, "Error: Operation, " + str(op) + ", must be dim3"
self.op = op
obs_shape = self.observation_space.shape
if len(obs_shape) == 3:
self.observation_space = Box(
self.observation_space.low[0, 0, 0],
self.observation_space.high[0, 0, 0], [
obs_shape[self.op[0]], obs_shape[self.op[1]],
obs_shape[self.op[2]]
],
dtype=self.observation_space.dtype)
def step(self, action):
ob, reward, done, info = self.env.step(action)
perception_vector = self.env._world.get_perception_vector()
return (self.observation(ob.astype(np.float32)), np.array(perception_vector, dtype=np.float32)), float(reward), done, {}
def reset(self):
ob = self.observation(np.array(self.env.reset(), dtype=np.float32))
perception_vector = np.array(self.env._world.get_perception_vector(), np.float32)
return ob, perception_vector
def observation(self, ob):
if len(self.observation_space.shape) == 3:
return np.transpose(ob, (self.op[0], self.op[1], self.op[2]))
return ob
environments = [
'cleanHouse',
'fourCorners',
'harvester',
'randomMaze',
'stairClimber_sparse',
'topOff',
]
env_to_hw = dict(
cleanHouse=(14, 22),
fourCorners=(12, 12),
harvester=(8, 8),
randomMaze=(8, 8),
stairClimber_sparse=(12, 12),
topOff=(12, 12),
)
env_to_time = dict(
cleanHouse=300,
fourCorners=100,
harvester=100,
randomMaze=100,
stairClimber_sparse=100,
topOff=100,
)
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def _generate_run_name(parameters,
id,
repeat,
token_len=20,
max_len=255):
"""Generate a run name by writing abbr parameter key-value pairs in it,
for an easy curve comparison between different search runs without going
into the gin texts.
Args:
parameters (dict): a dictionary of parameter configurations
id (int): an integer id of the run
repeat (int): an integer id of the repeats of the run
token_len (int): truncate each token for so many chars
max_len (int): the maximal length of the generated name; make sure
that this value won't exceed the max allowed filename length in
the OS
Returns:
str: a string with parameters abbr encoded
"""
def _abbr_single(x):
def _initials(t):
words = [w for w in t.split('_') if w]
len_per_word = max(token_len // len(words), 1)
return '_'.join([w[:len_per_word] for w in words])
if isinstance(x, str):
tokens = x.replace("/", "_").split(".")
tokens = [_initials(t) for t in tokens]
return ".".join(tokens)
else:
return str(x)
def _abbr(x):
if isinstance(x, Iterable) and not isinstance(x, str):
strs = []
for key in x:
try:
val = x.get(key)
strs.append("%s=%s" % (_abbr(key), _abbr(val)))
except:
strs.append("%s" % _abbr(key))
return "+".join(strs)
else:
return _abbr_single(x)
name = "%04dr%d" % (id, repeat)
abbr = _abbr(parameters)
if abbr:
name += "+" + abbr
# truncate the entire string if it's beyond the max length
return name[:max_len]
def learn_dt(input_args):
# Parameters
env_task = input_args.env_task
topOff_config=0.05
harvester_config=0.1
#extra_suffix = f"{topOff_config}"
extra_suffix = f"{topOff_config}"
env_task_metadata = {"mode": "train", "marker_prob": harvester_config, "hash_info": 'viper/pong/pytorch-a2c-ppo-acktr-gail/tasks/run2_topOff_all_states_w_12.pkl', 'train_configs': topOff_config, 'test_configs': 1 - topOff_config}
args = dict(task_definition='custom_reward',
env_task=env_task,
max_episode_steps=env_to_time[env_task],
obv_type='global',
wall_prob=0.25,
height=env_to_hw[env_task][0],
width=env_to_hw[env_task][1],
incorrect_marker_penalty=True,
delayed_reward=True,
perception_noise_prob=0,
action_noise_prob=0,
env_task_metadata=env_task_metadata,
seed=random.randint(0, 100000000))
config = AttrDict()
config.update(args)
env = KarelGymEnv(config)
env._max_episode_steps = config.max_episode_steps
env = KarelEnvWrapper(env)
custom_args = AttrDict()
id=input_args.pop("id")
repeat=input_args.pop("repeat")
custom_args.update(input_args)
max_depth = custom_args.max_depth
n_batch_rollouts = custom_args.n_batch_rollouts
max_samples = custom_args.max_samples
max_iters = custom_args.max_iters
train_frac = custom_args.train_frac
is_reweight = custom_args.is_reweight
run_name = _generate_run_name(custom_args, id, repeat) + extra_suffix
if not os.path.exists(f"../data/karel/ppo/{run_name}"):
os.makedirs(f"../data/karel/ppo/{run_name}")
log_fname = f'../data/karel/ppo/{run_name}/karel_dt.log'
#model_path = f'../data/saved_dqn/karel/{env_task}/saved'
model_path = f'../data/saved_ppo/karel/{env_task}{extra_suffix}/saved_conv'
n_test_rollouts = 50
save_dirname = f'../data/karel/ppo/{run_name}{extra_suffix}'
save_fname = 'dt_policy.pk'
save_viz_fname = 'dt_policy.dot'
is_train = True
# Logging
set_file(log_fname)
# Data structures
teacher = PPO(env, model_path, train=False)
student = DTPolicy(max_depth)
state_transformer = lambda x: x
# Train student
if is_train:
student = train_dagger(env, teacher, student, state_transformer, max_iters, n_batch_rollouts, max_samples, train_frac, is_reweight, n_test_rollouts)
save_dt_policy(student, save_dirname, save_fname)
save_dt_policy_viz(student, save_dirname, save_viz_fname)
else:
student = load_dt_policy(save_dirname, save_fname)
# Test student
rew = test_policy(env, student, state_transformer, n_test_rollouts)
log('Final reward: {}'.format(rew), INFO)
log('Number of nodes: {}'.format(student.tree.tree_.node_count), INFO)
if __name__ == '__main__':
#max_depth = [6, 12, 15]
max_depth = [15]
n_batch_rollouts = [10]
#max_samples = [100000, 200000, 400000]
max_samples = [100000]
#is_reweight = [False, True]
is_reweight = [False]
#grid_search = product(*(environments, max_depth, n_batch_rollouts, max_samples, is_reweight))
grid_search = product(*(max_depth, n_batch_rollouts, max_samples, is_reweight))
for id, param_config in enumerate(grid_search):
for repeat in range(5):
d, n, s, i = param_config
input_args = AttrDict(
env_task = sys.argv[1],
max_depth = d,
n_batch_rollouts = n,
max_samples = s,
max_iters = 80,
train_frac = 0.8,
is_reweight = i,
id=id,
repeat=repeat,
)
learn_dt(input_args)
```
|
{
"source": "jescap/multiuav_collision_avoidance",
"score": 3
}
|
#### File: collision_free_vel/scripts/algorithms.py
```python
from random import shuffle
from auxiliar_functions import provoke_collisions
# Calcula todas las soluciones factibles y escoge la optima segun el criterio de minimizar la maxima desviacion
def bf_minimize_max_deviation(UAVs, directions_list, cost_function, detect_collision_method):
def criteria(uavs):
return max([cost_function(uav.direction, d) for uav, d in uavs])
result_list = optimize_brute_force(UAVs, directions_list, 0, [], [], detect_collision_method)
if not len(result_list): return False
result_list.sort(key= lambda x: criteria(x))
result, value = [result_list[0]], criteria(result_list[0])
for i in range(1, len(result_list)):
if criteria(result_list[i]) != value:
break
result.append(result_list[i])
def criteria2(uavs):
return sum([cost_function(uav.direction, d) for uav, d in uavs])
return select_optimum(result, criteria2), len(result_list)
def optimize_brute_force(UAVs, directions_list, index, result, result_list, detect_collision_method):
if index == len(UAVs):
result_list.append(result)
return result_list
for k in directions_list[index]:
if not provoke_collisions(UAVs[index], k, result, detect_collision_method):
result_list = optimize_brute_force(UAVs, directions_list, index+1, result + [(UAVs[index], k)], result_list, detect_collision_method)
return result_list
def select_optimum(result_list, cost):
if not result_list: return False
result_list.sort(key=lambda x: cost(x))
result, value = [result_list[0]], cost(result_list[0])
for i in range(1, len(result_list)):
if cost(result_list[i]) != value:
break
result.append(result_list[i])
shuffle(result)
return result[0]
```
|
{
"source": "jeschaef/Graph4Med",
"score": 3
}
|
#### File: jeschaef/Graph4Med/connect.py
```python
import logging
from os import getenv
from typing import List
from dotenv import load_dotenv
from neo4j import GraphDatabase
# Logger
log = logging.getLogger()
# Load env variables
load_dotenv()
neo4j_server = getenv("NEO4J_SERVER")
neo4j_user = getenv("NEO4J_USER")
neo4j_password = getenv("NEO4J_PASSWORD")
# Connection handles
class Neo4jConnection:
"""Neo4J connection handle
"""
def __init__(self, uri, user, password) -> None:
"""Establish a connection to a Neo4J server
Args:
uri (str): Connection URI for the driver
user (str): User name
password (str): Password
"""
self.driver = GraphDatabase.driver(uri, auth=(user, password))
log.debug(f'Connected to Neo4J database')
def close(self) -> None:
"""Close the connection
"""
self.driver.close()
log.debug(f'Disonnected from Neo4J database')
def version(self) -> List[str]:
"""Return the database version as string to verify connection
Returns:
List[str]: Name, version and edition of the Neo4J database
"""
with self.driver.session() as session:
result = session.run(('call dbms.components() '
'yield name, versions, edition '
'unwind versions as version '
'return name, version, edition;'))
return result.single().values()
```
#### File: jeschaef/Graph4Med/create.py
```python
import json
import random
from datetime import date, datetime
import pandas as pd
from faker import Faker
from neomodel import db
from connect import *
from model import *
from model import _Neodash_Dashboard
from utils import read_resource, read_csv
# Specify Neo4J url for neomodel
db.set_connection(f"bolt://{neo4j_user}:{neo4j_password}@{neo4j_server}")
# Logger
log = logging.getLogger()
def hash_patient_id(id):
"""Hash a patient id (created by synthea) to a smaller integer"""
return abs(hash(id) % 1000000)
class Control:
"""Handle the deletion and creation of the Neo4J database.
"""
def __init__(self) -> None:
"""Constructor"""
pass
def deleteData(self):
"""Delete all the nodes and relationships.
"""
log.info('Deleting all nodes and relationships...')
query = 'MATCH (n) WHERE NOT n:_Neodash_Dashboard DETACH DELETE n'
db.cypher_query(query)
def create_nodes(self):
"""Create nodes and their relationships in neo4j
"""
log.info('Creating nodes ...')
self.create_patients()
self.create_families()
self.create_projects()
self.create_diagnoses()
self.create_orders()
self.create_materials()
self.create_analysis()
self.create_fusions_rnaseq()
self.create_fusions_array_cgh()
self.merge_fusions()
self.create_results()
@db.transaction
def create_patients(self, n=100):
"""Create one Patient node with attributes
[patient_id, name, birthday, gender] per patient.
"""
log.info('Creating patient nodes ...')
# Load patient data from csv
patients = read_csv('patients.csv', nrows=n)
patients['NAME'] = patients['LAST'] + ', ' + patients['FIRST']
patients['BIRTHDATE'] = pd.to_datetime(patients['BIRTHDATE'])
# Generate patients from records
for idx, row in patients.iterrows():
p = Patient(patient_id=hash_patient_id(row['Id']),
name=row['NAME'],
birthday=row['BIRTHDATE'].date(),
gender=row['GENDER'])
p.save()
num_patients = len(Patient.nodes)
log.info(f'Created {num_patients} Patients')
@db.transaction
def create_families(self):
"""Create some Family nodes and link some patient
to the family he/she belongs to.
"""
log.info('Creating family nodes ...')
# Iterate over patients
for p in Patient.nodes.all():
# Get/create a family
id = int(random.uniform(0, 0.8 * len(Patient.nodes)))
fam = Family.get_or_create({'family_id': id})[0]
# Create relationship between family and patient
p.in_family.connect(fam)
@db.transaction
def create_projects(self):
"""Create some project nodes (attributes [name]) and link
patients to the projects they attended.
"""
log.info('Creating project nodes ...')
for p in Patient.nodes.all():
# Get/create a project
id = int(random.uniform(0, 0.1 * len(Patient.nodes)))
prj = Project.get_or_create({'name': f'Project #{id}'})[0]
# Create relationship between project and patient
p.in_project.connect(prj)
@db.transaction
def create_diagnoses(self, limit=80):
"""Create all Diagnosis nodes and he had with attributes [diagnosis_id, icd, name].
Link each patient to his diagnoses where the relationship contains the properties
[date (of diagnosis), age_at_diagnosis, addition].
"""
log.info('Creating diagnosis nodes ...')
conds = read_csv('conditions.csv')
conds = conds.drop(['STOP', 'ENCOUNTER'], axis=1)
fake = Faker()
# Create diagnoses nodes
for i, ((code, name), group) in enumerate(conds.groupby(['CODE', 'DESCRIPTION']), start=1):
if i >= limit:
break
diag = Diagnosis(name=name, icd=code)
diag.save()
# Link patients
for _, row in group.iterrows():
try:
p = Patient.nodes.get(patient_id=hash_patient_id(row['PATIENT']))
except Patient.DoesNotExist:
continue
# Create structured relationship between diagnosis and patient (n:m)
today = date.today()
age = today.year - p.birthday.year - ((today.month, today.day) < (p.birthday.month, p.birthday.day))
args = {
'date': datetime.strptime(row['START'], "%Y-%m-%d"),
'age_at_diagnosis': int(random.random() * age),
'addition': fake.pystr()}
p.has_diagnoses.connect(diag, args)
diag.has_patients.connect(p, args)
# Create ALL-Diagnosis
diag = Diagnosis(name='Lymphatische Vorläuferneoplasien_B lymphoblastische Leukämie', icd='1234567')
diag.save()
# Link all patients
for p in Patient.nodes.all():
# Create structured relationship between diagnosis and patient (n:m)
today = date.today()
age = today.year - p.birthday.year - ((today.month, today.day) < (p.birthday.month, p.birthday.day))
args = {
'date': datetime.strptime(row['START'], "%Y-%m-%d"),
'age_at_diagnosis': int(random.random() * age),
'addition': fake.pystr()}
p.has_diagnoses.connect(diag, args)
diag.has_patients.connect(p, args)
num_diag = len(Diagnosis.nodes)
log.info(f'Created {num_diag} Diagnoses')
@db.transaction
def create_orders(self):
"""Create an Order node for each order and connect
orders and patients with a relationship.
"""
log.info('Creating order nodes ...')
fake = Faker()
# For each patient add each of his orders
for p in Patient.nodes:
for i in range(random.randint(1,2)):
# Create an Order
study_id = random.randint(0, 10)
o = Order(order_id=f'{p.patient_id}#{i}',
date=fake.date_between(),
type=f'OrderType #{random.randint(0, 4)}',
study_id=f'Study #{study_id}',
study_name=f'Study #{study_id}')
o.save()
# Create order-patient relationship (n:1)
o.for_patient.connect(p)
p.has_orders.connect(o)
num_orders = len(Order.nodes)
log.info(f'Created {num_orders} Orders')
@db.transaction
def create_materials(self):
"""Create a Material node for each material and submaterial.
Connect it to the corresponding patient where it comes from
"""
log.info('Creating material nodes ...')
# Iterate over all patients
for p in Patient.nodes:
# Create 3 main materials for each patient
materials = [None, None, None]
for i in range(3):
# Create a material
m = Material(material_id=f"{p.patient_id}#Mat{i}",
description="DNA,RNA,etc.",
type_id=f"Type {i}")
m.save()
p.has_materials.connect(m)
m.of_patient.connect(p)
materials[i] = m
# Create 0-2 submaterials for each material
for m2 in materials:
for i in range(random.randint(0, 2)):
m = Material(material_id=f"{m2.material_id}-{i}",
description=f"{m2.description}",
type_id=f"{m2.type_id}-Sub{i}")
m.save()
m.created_from.connect(m2)
num_materials = len(Material.nodes)
log.info(f'Created {num_materials} Materials')
@db.transaction
def create_analysis(self):
"""Create an Analysis node for each analysis instance with the attributes
[analysis_id, qsv_question,qsv_analysis_assessment, analytical_result].
Create analysis-order relationships between the analysis and the corresponding order (n:1)
and connect each analysis to the specific type of analysis.
"""
log.info('Creating analysis nodes...')
fake = Faker()
# Iterate over all orders
for o in Order.nodes:
# Create 0-1 RNASeqAnalysis nodes from records
if random.random() > 0.5:
a = RNASeqAnalysis(analysis_id=f"{o.order_id}#RNASeq",
analytical_result=fake.pystr(),
qsv_question=fake.pystr(),
qsv_analysis_assessment=fake.pystr())
a.save()
# Create analysis-order relationships (n:1)
a.for_order.connect(o)
o.has_analyses.connect(a)
# Create analysis-material relationship (1:n)
p = o.for_patient.single()
materials = p.has_materials.all()
m = random.choice(materials) # choose material randomly
# Check if there is a more specific submaterial
m2 = m.created_from.get_or_none()
if m2 is not None:
m = m2
a.on_material.connect(m)
m.used_in_analyses.connect(a)
# Create 0-1 ArrayCGHAnalysis nodes from records
if random.random() > 0.5:
a = ArrayCGHAnalysis(analysis_id=f"{o.order_id}#ArrayCGH",
analytical_result=fake.pystr(),
qsv_question=fake.pystr(),
qsv_analysis_assessment=fake.pystr())
a.save()
# Create analysis-order relationships (n:1)
a.for_order.connect(o)
o.has_analyses.connect(a)
# Create analysis-material relationship (1:n)
p = o.for_patient.single()
materials = p.has_materials.all()
m = random.choice(materials) # choose material randomly
# Check if there is a more specific submaterial
m2 = m.created_from.get_or_none()
if m2 is not None:
m = m2
a.on_material.connect(m)
m.used_in_analyses.connect(a)
# Create 0-1 karyotype analysis nodes
if random.random() > 0.5:
a = KaryotypeAnalysis(analysis_id=f"{o.order_id}#ArrayCGH",
analytical_result=fake.pystr(),
qsv_question=fake.pystr(),
qsv_analysis_assessment=fake.pystr())
a.save()
# Create analysis-order relationships (n:1)
a.for_order.connect(o)
o.has_analyses.connect(a)
# Create analysis-material relationship (1:n)
p = o.for_patient.single()
materials = p.has_materials.all()
m = random.choice(materials) # choose material randomly
# Check if there is a more specific submaterial
m2 = m.created_from.get_or_none()
if m2 is not None:
m = m2
a.on_material.connect(m)
m.used_in_analyses.connect(a)
num_analyses = len(Analysis.nodes)
log.info(f'Created {num_analyses} Analyses')
@db.transaction
def create_fusions_rnaseq(self):
log.info('Creating fusions RNASeq...')
fake = Faker()
# RNASeq fusions
fusions = read_csv('fusions.csv')
for _, row in fusions.iterrows():
f = Fusion(name=row['fusion_gene'])
f.save()
# Relate analyses to fusions
for a in RNASeqAnalysis.nodes:
# Add 0-2 fusions
samples = fusions.sample(n=random.randint(0,2))
for _, s in samples.iterrows():
f = Fusion.get_or_create({'name':s['fusion_gene']})[0]
a.showed_fusions.connect(f, {'text': fake.pystr()})
num_fusions = len(Fusion.nodes)
log.info(f'Created {num_fusions} Fusions (RNASeq)')
@db.transaction
def create_fusions_array_cgh(self):
log.info('Creating fusions Array CGH...')
fake = Faker()
# CGH Array fusions
f = Fusion.nodes.get(name='P2RY8-CRLF2')
log.info(f'Created {0} new Fusions (Array CGH)')
# Hypo-/Hyperdiploidy & Normal (Aneuploidy)
hypo = Aneuploidy(name='Hypodiploidy')
hypo.save()
hyper = Aneuploidy(name='Hyperdiploidy')
hyper.save()
# Relate analyses to fusions and hypo/hyperdiploidy
num_analyses = int(len(ArrayCGHAnalysis.nodes) / 3)
samples = random.sample(ArrayCGHAnalysis.nodes.all(), num_analyses)
for a in samples:
a.showed_fusions.connect(f, {'text': fake.pystr()})
# Update analysis with chromosomes
chromosomes = random.choice(['<44 Chr.', '<45 Chr.', '>50 Chr.', '45-50 Chr.', '46,XX', '46,XY'])
a.chromosomes = chromosomes
a.save()
# Assign hypo-/hyperdiploidy relation
if chromosomes in ['<44 Chr.', '<45 Chr.']:
a.has_aneuploidy.connect(hypo)
elif chromosomes in ['>50 Chr.']:
a.has_aneuploidy.connect(hyper)
@db.transaction
def merge_fusions(self):
"""Merge fusions nodes with different names that refer to
the same mutation, e.g. "CRLF2-P2RY8" and "P2RY8-CRLF2".
"""
log.info("Merging fusion nodes...")
# Prepare queries
queries = [
read_resource('cypher/merge_gene_name_inverted.cypher'),
read_resource('cypher/merge_bcr_abl.cypher'),
read_resource('cypher/merge_etv6_runx1.cypher'),
read_resource('cypher/merge_tcf3_pbx1.cypher'),
read_resource('cypher/merge_kmt2a_group.cypher'),
read_resource('cypher/merge_pax5_group.cypher'),
read_resource('cypher/merge_znf384_group.cypher'),
read_resource('cypher/merge_mef2d_group.cypher'),
]
for q in queries:
log.debug(f'Running query: {q}')
results, meta = db.cypher_query(q)
log.debug(f'Executed query: {results}, {meta}')
@db.transaction
def create_results(self):
log.info('Creating result nodes ...')
fake = Faker()
# Link each analysis to one result
for a in Analysis.nodes:
r = Result(name=fake.pystr())
r.save()
a.has_results.connect(r, {
'result_id': f'{a.analysis_id}#Res',
'description': fake.pystr(),
'value': fake.pystr(),
})
# Output number of result nodes
num_results = len(Result.nodes)
log.info(f'Created {num_results} Results')
@db.transaction
def add_dashboard(self):
log.info('Adding dashboard ...')
# Load json
res = read_resource('dashboard.json')
dashboard_json = json.loads(res)
# Query random patient id
patient_id = Patient.nodes[0].patient_id
dashboard_json['settings']['parameters']['neodash_patient_patient_id'] = patient_id
# Create node
d = _Neodash_Dashboard(title='ALL Dashboard',
date=datetime.now(),
user='neo4j',
version='2.0',
content=dashboard_json)
d.save()
log.info('Added neodash dashboard node')
```
#### File: jeschaef/Graph4Med/utils.py
```python
import logging
from pathlib import Path
import pandas as pd
# Logger
log = logging.getLogger()
def get_project_root() -> Path:
"""Return the path of the project root folder.
Returns:
Path: Path to project root
"""
return Path(__file__).parent
def read_resource(file):
"""Read a resource file located in directory 'res/' in project root.
Args:
file (str): Filename
Returns:
AnyStr: The content of the resource file.
"""
res_path = get_project_root() / 'res'
with open(res_path / file, 'r', encoding='utf-8') as fd:
return fd.read()
def read_csv(file, **kwargs):
"""Read csv data (located in resources folder res/csv/) into a pandas data frame.
Args:
file (str): Filename
Returns:
DataFrame: DataFrame containing the data read from the csv file
"""
log.debug(f'Reading csv data from file "{file}" in "res/csv/"...')
file_path = ((get_project_root() / "res") / "csv") / file
return pd.read_csv(file_path, **kwargs)
```
|
{
"source": "JeschkeLab/DeerLab",
"score": 2
}
|
#### File: DeerLab/deerlab/classes.py
```python
import numpy as np
from deerlab.utils import Jacobian, nearest_psd
from scipy.stats import norm
from scipy.signal import fftconvolve
from scipy.linalg import block_diag
from scipy.optimize import brentq
from scipy.interpolate import interp1d
import copy
class FitResult(dict):
# ========================================================================
r""" Represents the results of a fit.
Attributes
----------
x : ndarray
The solution of the optimization.
success : bool
Whether or not the optimizer exited successfully.
cost : float
Value of the cost function at the solution.
residuals : ndarray
Vector of residuals at the solution.
stats : dict
Goodness of fit statistical estimators:
* ``stats['chi2red']`` - Reduced \chi^2 test
* ``stats['r2']`` - R^2 test
* ``stats['rmsd']`` - Root-mean squared deviation (RMSD)
* ``stats['aic']`` - Akaike information criterion
* ``stats['aicc']`` - Corrected Akaike information criterion
* ``stats['bic']`` - Bayesian information criterion
Methods
-------
plot()
Display the fit results on a Matplotlib window. The script returns a
`matplotlib.axes <https://matplotlib.org/api/axes_api.html>`_ object.
All graphical parameters can be adjusted from this object.
Notes
-----
There may be additional attributes not listed above depending of the
specific fit function. Since this class is essentially a subclass of dict
with attribute accessors, one can see which attributes are available
using the `keys()` method.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in sorted(self.items())])
else:
return self.__class__.__name__ + "()"
def __dir__(self):
return list(self.keys())
# =========================================================================
class UQResult:
# =========================================================================
r""" Represents the uncertainty quantification of fit results.
Attributes
----------
type : string
Uncertainty quantification approach:
* 'covariance' - Covariance-based uncertainty analysis
* 'bootstrap' - Bootstrapped uncertainty analysis
mean : ndarray
Mean values of the uncertainty distribution of the parameters.
median : ndarray
Median values of the uncertainty distribution of the parameters.
std : ndarray
Standard deviations of the uncertainty distribution of the parameters.
covmat : ndarray
Covariance matrix
nparam : int scalar
Number of parameters in the analysis.
Methods
-------
"""
def __init__(self,uqtype,data=None,covmat=None,lb=None,ub=None,threshold=None,profiles=None,noiselvl=None):
#Parse inputs schemes
if uqtype=='covariance':
# Scheme 1: UQResult('covariance',parfit,covmat,lb,ub)
self.type = uqtype
parfit = data
nParam = len(parfit)
elif uqtype == 'profile':
# Scheme 2: UQResult('profile',profiles)
if not isinstance(profiles,list):
profiles = [profiles]
self.type = uqtype
self.__parfit = data
self.__noiselvl = noiselvl
self.profile = profiles
self.threshold = threshold
nParam = len(np.atleast_1d(data))
elif uqtype == 'bootstrap':
# Scheme 2: UQResult('bootstrap',samples)
self.type = uqtype
samples = data
self.samples = samples
nParam = np.shape(samples)[1]
elif uqtype=='void':
# Scheme 2: UQResult('void')
self.type = uqtype
self.mean, self.median, self.std, self.covmat, self.nparam = ([] for _ in range(5))
return
else:
raise NameError('uqtype not found. Must be: ''covariance'', ''bootstrap'' or ''void''.')
if lb is None:
lb = np.full(nParam, -np.inf)
if ub is None:
ub = np.full(nParam, np.inf)
# Set private variables
self.__lb = lb
self.__ub = ub
self.nparam = nParam
# Create confidence intervals structure
if uqtype=='covariance':
self.mean = parfit
self.median = parfit
self.std = np.sqrt(np.diag(covmat))
self.covmat = covmat
# Profile-based CI specific fields
elif uqtype == 'profile':
xs = [self.pardist(n)[0] for n in range(nParam)]
pardists = [self.pardist(n)[1] for n in range(nParam)]
means = [np.trapz(pardist*x,x) for x,pardist in zip(xs,pardists)]
std = [np.sqrt(np.trapz(pardist*(x-mean)**2,x)) for x,pardist,mean in zip(xs,pardists,means)]
self.mean = means
self.median = self.percentile(50)
self.std = std
self.covmat = np.diag(np.array(std)**2)
# Bootstrap-based CI specific fields
elif uqtype == 'bootstrap':
means = np.mean(samples,0)
covmat = np.squeeze(samples)[email protected](samples)/np.shape(samples)[0] - means*means.T
self.mean = means
self.median = self.percentile(50)
self.std = np.squeeze(np.std(samples,0))
self.covmat = covmat
# Gets called when an attribute is accessed
#--------------------------------------------------------------------------------
def __getattribute__(self, attr):
try:
# Calling the super class to avoid recursion
if attr!='type' and super(UQResult, self).__getattribute__('type') == 'void':
# Check if the uncertainty quantification has been done, if not report that there is nothing in the object
raise ValueError('The requested attribute/method is not available. Uncertainty quantification has not been calculated during the fit by using the `uq=None` keyword.')
except AttributeError:
# Catch cases where 'type' attribute has still not been defined (e.g. when using copy.deepcopy)
pass
# Otherwise return requested attribute
return super(UQResult, self).__getattribute__(attr)
#--------------------------------------------------------------------------------
# Combination of multiple uncertainties
#--------------------------------------------------------------------------------
def join(self,*args):
"""
Combine multiple uncertainty quantification instances.
Parameters
----------
uq : any number of :ref:`UQResult`
Uncertainty quantification objects with ``N1,N2,...,Nn`` parameters to be joined
to the object calling the method with ``M`` parameters.
Returns
-------
uq_joined : :ref:`UQResult`
Joined uncertainty quantification object with a total of ``M + N1 + N2 + ... + Nn`` parameters.
The parameter vectors are concatenated on the order they are passed.
"""
# Original metadata
mean = self.mean
covmat = self.covmat
lbm = self.__lb
ubm = self.__ub
for uq in args:
if not isinstance(uq, UQResult):
raise TypeError('Only UQResult objects can be joined.')
if uq.type=='void':
raise TypeError('Void UQResults cannot be joined.')
# Concatenate metadata of external UQResult objects
mean = np.concatenate([mean, uq.mean])
covmat = block_diag(covmat, uq.covmat)
lbm = np.concatenate([lbm, uq.__lb])
ubm = np.concatenate([ubm, uq.__ub])
# Return new UQResult object with combined information
return UQResult('covariance',mean,covmat,lbm,ubm)
#--------------------------------------------------------------------------------
# Parameter distributions
#--------------------------------------------------------------------------------
def pardist(self,n=0):
"""
Generate the uncertainty distribution of the n-th parameter
Parameters
----------
n : int scalar
Index of the parameter
Returns
-------
ax : ndarray
Parameter values at which the distribution is evaluated
pdf : ndarray
Probability density function of the parameter uncertainty.
"""
if n > self.nparam or n < 0:
raise ValueError('The input must be a valid integer number.')
if self.type == 'covariance':
# Generate Gaussian distribution based on covariance matrix
sig = np.sqrt(self.covmat[n,n])
xmean = self.mean[n]
x = np.linspace(xmean-4*sig,xmean+4*sig,500)
pdf = 1/sig/np.sqrt(2*np.pi)*np.exp(-((x-xmean)/sig)**2/2)
if self.type == 'bootstrap':
# Get bw using silverman's rule (1D only)
samplen = self.samples[:, n].real
if np.all(samplen == samplen[0]):
# Dirac's delta distribution
x = np.array([0.9*samplen[0],samplen[0],1.1*samplen[0]])
pdf = np.array([0,1,0])
else:
sigma = np.std(samplen, ddof=1)
bw = sigma*(len(samplen)*3/4.0)**(-1/5)
# Make histogram
maxbin = np.maximum(np.max(samplen),np.mean(samplen)+3*sigma)
minbin = np.minimum(np.min(samplen),np.mean(samplen)-3*sigma)
bins = np.linspace(minbin,maxbin, 2**10 + 1)
count, edges = np.histogram(samplen, bins=bins)
# Generate kernel
delta = np.maximum(np.finfo(float).eps,(edges.max() - edges.min()) / (len(edges) - 1))
kernel_x = np.arange(-4*bw, 4*bw + delta, delta)
kernel = norm(0, bw).pdf(kernel_x)
# Convolve
pdf = fftconvolve(count, kernel, mode='same')
# Set x coordinate of pdf to midpoint of bin
x = edges[:-1] + delta
if self.type=='profile':
if not isinstance(self.profile,list) and n==0:
profile = self.profile
else:
profile = self.profile[n]
σ = self.__noiselvl
obj2likelihood = lambda f: 1/np.sqrt(σ*2*np.pi)*np.exp(-1/2*f/σ**2)
profileinterp = interp1d(profile['x'], profile['y'], kind='slinear', fill_value=1e6,bounds_error=False)
x = np.linspace(np.min(profile['x']), np.max(profile['x']), 2**10 + 1)
pdf = obj2likelihood(profileinterp(x))
# Generate kernel
sigma = np.sum(x*pdf/np.sum(pdf))
bw = sigma*(1e12*3/4.0)**(-1/5)
delta = np.maximum(np.finfo(float).eps,(x.max() - x.min()) / (len(x) - 1))
kernel_x = np.arange(-5*bw, 5*bw + delta, delta)
kernel = norm(0, bw).pdf(kernel_x)
# Convolve
pdf = fftconvolve(pdf, kernel, mode='same')
# Clip the distributions outside the boundaries
pdf[x < self.__lb[n]] = 0
pdf[x > self.__ub[n]] = 0
# Enforce non-negativity (takes care of negative round-off errors)
pdf = np.maximum(pdf,0)
# Ensure normalization of the probability density function
pdf = pdf/np.trapz(pdf, x)
return x, pdf
#--------------------------------------------------------------------------------
# Parameter percentiles
#--------------------------------------------------------------------------------
def percentile(self,p):
"""
Compute the p-th percentiles of the parameters uncertainty distributions
Parameters
----------
p : float scalar
Percentile (between 0-100)
Returns
-------
prctiles : ndarray
Percentile values of all parameters
"""
if p>100 or p<0:
raise ValueError('The input must be a number between 0 and 100')
x = np.zeros(self.nparam)
for n in range(self.nparam):
# Get parameter PDF
values,pdf = self.pardist(n)
# Compute corresponding CDF
cdf = np.cumsum(pdf)
cdf /= max(cdf)
# Eliminate duplicates
cdf, index = np.lib.arraysetops.unique(cdf,return_index=True)
# Interpolate requested percentile
x[n] = np.interp(p/100,cdf,values[index])
return x
#--------------------------------------------------------------------------------
# Covariance-based confidence intervals
#--------------------------------------------------------------------------------
def ci(self,coverage):
"""
Compute the confidence intervals for the parameters.
Parameters
----------
coverage : float scalar
Coverage (confidence level) of the confidence intervals (between 0-100)
Returns
-------
ci : 2D-ndarray
Confidence intervals for the parameters:
* ``ci[:,0]`` - Lower confidence intervals
* ``ci[:,1]`` - Upper confidence intervals
"""
if coverage>100 or coverage<0:
raise ValueError('The input must be a number between 0 and 100')
value = self.mean if hasattr(self,'mean') else self.__parfit
iscomplex = np.iscomplexobj(value)
alpha = 1 - coverage/100
p = 1 - alpha/2 # percentile
confint = np.zeros((self.nparam,2))
if iscomplex: confint = confint.astype(complex)
if self.type=='covariance':
# Compute covariance-based confidence intervals
# Clip at specified box boundaries
standardError = norm.ppf(p)*np.sqrt(np.diag(self.covmat))
confint[:,0] = np.maximum(self.__lb, self.mean.real - standardError)
confint[:,1] = np.minimum(self.__ub, self.mean.real + standardError)
if iscomplex:
confint[:,0] = confint[:,0] + 1j*np.maximum(self.__lb, self.mean.imag - standardError)
confint[:,1] = confint[:,1] + 1j*np.minimum(self.__ub, self.mean.imag + standardError)
elif self.type=='bootstrap':
# Compute bootstrap-based confidence intervals
# Clip possible artifacts from the percentile estimation
confint[:,0] = np.minimum(self.percentile((1-p)*100), np.amax(self.samples))
confint[:,1] = np.maximum(self.percentile(p*100), np.amin(self.samples))
elif self.type=='profile':
# Compute likelihood-profile-based confidence intervals
for n,profile in enumerate(self.profile):
# Construct interpolator for the profile
profileinterp = interp1d(profile['x'], profile['y'], kind='slinear', fill_value=1e6,bounds_error=False)
#-----------------------------------------------------------------
def getCIbound(boundary,optimum):
def getprofile_at(value):
return profileinterp(value) - self.threshold(coverage/100)
# Evaluate the profile function
fbound = getprofile_at(boundary)
f0 = getprofile_at(optimum)
# Check the signs of the shifted profile
if np.sign(fbound)==np.sign(f0):
# If both edges have the same sign return one of the edges
ci_bound = boundary
else:
searchrange = [boundary,optimum] if boundary<optimum else [optimum,boundary]
ci_bound = brentq(getprofile_at, *searchrange,maxiter=int(1e4))
return ci_bound
#-----------------------------------------------------------------
# Get the upper and lower bounds of the confidence interval
confint[n,0] = getCIbound(profile['x'].min(),self.__parfit[n])
confint[n,1] = getCIbound(profile['x'].max(),self.__parfit[n])
# Remove singleton dimensions
confint = np.squeeze(confint)
return confint
# Error Propagation (covariance-based only)
#--------------------------------------------------------------------------------
def propagate(self,model,lb=None,ub=None,samples=None):
"""
Uncertainty propagation. This function takes the uncertainty analysis of the
parameters and propagates it to another functon depending on those parameters.
Parameters
----------
model : callable
Callable model function taking an array of ``nparam`` parameters.
lbm : ndarray
Lower bounds of the values returned by ``model``, by default assumed unconstrained.
ubm : ndarray
Upper bounds of the values returned by ``model``, by default assumed unconstrained.
Returns
-------
modeluq : :ref:`UQResult`
New uncertainty quantification analysis for the ouputs of ``model``.
"""
parfit = self.mean
# Evaluate model with fit parameters
modelfit = model(parfit)
iscomplex = np.iscomplexobj(modelfit)
# Validate input boundaries
if lb is None:
lb = np.full(np.size(modelfit), -np.inf)
if ub is None:
ub = np.full(np.size(modelfit), np.inf)
lb,ub = (np.atleast_1d(var) for var in [lb,ub])
if np.size(modelfit)!=np.size(lb) or np.size(modelfit)!=np.size(ub):
raise IndexError ('The 2nd and 3rd input arguments must have the same number of elements as the model output.')
if samples is None:
Nsamples = 1000
else:
Nsamples = samples
if self.type=='covariance':
if iscomplex:
model_ = model
model = lambda p: np.concatenate([model_(p).real,model_(p).imag])
# Get jacobian of model to be propagated with respect to parameters
J = Jacobian(model,parfit,self.__lb,self.__ub)
# Clip at boundaries
modelfit = np.maximum(modelfit,lb)
modelfit = np.minimum(modelfit,ub)
# Error progation
modelcovmat = nearest_psd(<EMAIL>)
if iscomplex:
N = modelcovmat.shape[0]
Nreal = np.arange(0,N/2).astype(int)
Nimag = np.arange(N/2,N).astype(int)
modelcovmat = modelcovmat[np.ix_(Nreal,Nreal)] + 1j* modelcovmat[np.ix_(Nimag,Nimag)]
# Construct new uncertainty object
return UQResult('covariance',modelfit,modelcovmat,lb,ub)
elif self.type=='bootstrap':
sampled_parameters = [[]]*self.nparam
for n in range(self.nparam):
# Get the parameter uncertainty distribution
values,pdf = self.pardist(n)
# Random sampling form the uncertainty distribution
sampled_parameters[n] = [np.random.choice(values, p=pdf/sum(pdf)) for _ in range(Nsamples)]
# Convert to matrix
sampled_parameters = np.atleast_2d(sampled_parameters)
# Bootstrap sampling of the model response
sampled_model = [model(sampled_parameters[:,n]) for n in range(Nsamples)]
# Convert to matrix
sampled_model = np.atleast_2d(sampled_model)
# Construct new uncertainty object
return UQResult('bootstrap',data=sampled_model,lb=lb,ub=ub)
#--------------------------------------------------------------------------------
# =========================================================================
```
#### File: DeerLab/test/test_model_class.py
```python
from collections import namedtuple
from deerlab.whitegaussnoise import whitegaussnoise
from deerlab.model import Model, fit
import numpy as np
# Simple non-linear function for testing
x = np.linspace(0,5,100)
def gauss(mean,width):
return np.exp(-(x-mean)**2/width**2/2)
# Non-linear definition
def gauss2(mean1,mean2,width1,width2,amp1,amp2):
return amp1*gauss(mean1,width1) + amp2*gauss(mean2,width2)
# Linear + Non-linear definition
def gauss2_design(mean1,mean2,width1,width2):
return np.atleast_2d([gauss(mean1,width1), gauss(mean2,width2)]).T
# Linear + Non-linear definition
def gauss2_identity():
return np.eye(len(x))
# Linear + Non-linear definition
def gauss2_scaled(scale):
return scale*np.eye(len(x))
mock_data = gauss2(mean1=3,mean2=4,width1=0.5,width2=0.2,amp1=0.5,amp2=0.6)
def test_construction_length():
#================================================================
"Check that the model is contructed correctly with the appropiate number of parameters"
model = Model(gauss)
assert model.Nparam==2
#================================================================
def test_construction_names():
#================================================================
"Check that the model is contructed correctly with the correct parameter names"
model = Model(gauss)
assert 'mean' in model.__dict__ and 'width' in model.__dict__
#================================================================
def test_parameters_set():
#================================================================
"Check that attributes of the parameters are editable"
model = Model(gauss)
model.mean.set(lb=0,ub=10)
assert getattr(model.mean,'lb')==0 and getattr(model.mean,'ub')==10
#================================================================
def test_call_keywords():
#================================================================
"Check that calling the model with parameter returns the correct response"
model = Model(gauss)
response = model(mean=3,width=0.5)
reference = gauss(mean=3,width=0.5)
assert np.allclose(response,reference)
#================================================================
def test_call_positional():
#================================================================
"Check that calling the model with parameter returns the correct response"
model = Model(gauss)
response = model(3,0.5)
reference = gauss(3,0.5)
assert np.allclose(response,reference)
#================================================================
def test_call_mixed():
#================================================================
"Check that calling the model with parameter returns the correct response"
model = Model(gauss)
response = model(3,width=0.5)
reference = gauss(3,0.5)
assert np.allclose(response,reference)
#================================================================
def test_addlinear_length():
#================================================================
"Check that the model is contructed correctly with the appropiate number of parameters"
model = Model(gauss2_design)
model.addlinear('amp1',lb=0)
model.addlinear('amp2',lb=0)
assert model.Nparam==6
#================================================================
def test_addlinear_names():
#================================================================
"Check that linear parameters can be properly added"
model = Model(gauss2_design)
model.addlinear('amp1',lb=0)
model.addlinear('amp2',lb=0)
assert 'amp1' in model.__dict__ and 'amp2' in model.__dict__
#================================================================
def test_addlinear_set():
#================================================================
"Check that attributes of the linear parameters are editable"
model = Model(gauss2_design)
model.addlinear('amp1')
model.amp1.set(lb=0,ub=10)
assert getattr(model.amp1,'lb')==0 and getattr(model.amp1,'ub')==10
#================================================================
def test_addlinear_call_keywords():
#================================================================
"Check that calling the model with parameters returns the correct response"
model = Model(gauss2_design)
model.addlinear('amp1')
model.addlinear('amp2')
response = model(mean1=3,mean2=4,width1=0.2,width2=0.3,amp1=0.5,amp2=0.4)
reference = gauss2(mean1=3,mean2=4,width1=0.2,width2=0.3,amp1=0.5,amp2=0.4)
assert np.allclose(response,reference)
#================================================================
def test_addlinear_call_positional():
#================================================================
"Check that calling the model with parameters returns the correct response"
model = Model(gauss2_design)
model.addlinear('amp1')
model.addlinear('amp2')
response = model(3,4,0.2,0.3,0.5,0.4)
reference = gauss2(3,4,0.2,0.3,0.5,0.4)
assert np.allclose(response,reference)
#================================================================
def test_addlinear_call_mixed():
#================================================================
"Check that calling the model with parameters returns the correct response"
model = Model(gauss2_design)
model.addlinear('amp1')
model.addlinear('amp2')
response = model(3,4,0.2,width2=0.3,amp1=0.5,amp2=0.4)
reference = gauss2(3,4,0.2,width2=0.3,amp1=0.5,amp2=0.4)
assert np.allclose(response,reference)
#================================================================
#================================================================
def test_addlinear_vector_length():
"Check that linear parameters can be defined as vectors"
model = Model(gauss2_identity)
model.addlinear('gaussian', vec=100)
assert model.Nparam==100
#================================================================
#================================================================
def test_addlinear_vector_names():
"Check that linear parameters can be defined as vectors"
model = Model(gauss2_identity)
model.addlinear('gaussian', vec=100)
assert 'gaussian' in model.__dict__
#================================================================
def test_addlinear_vector_set():
#================================================================
"Check that attributes of the vector linear parameters are editable"
model = Model(gauss2_identity)
model.addlinear('gaussian', vec=100)
model.gaussian.set(lb=np.zeros(100))
assert np.allclose(getattr(model.gaussian,'lb'),np.zeros(100))
#================================================================
def test_addlinear_vector_call_keywords():
#================================================================
"Check that calling the model with scalar and vector parameters returns the correct response"
model = Model(gauss2_identity)
model.addlinear('gaussian', vec=len(x))
reference = gauss2(mean1=3,mean2=4,width1=0.2,width2=0.3,amp1=0.5,amp2=0.4)
response = model(gaussian=reference)
assert np.allclose(response,reference)
#================================================================
def test_addlinear_vector_call_positional():
#================================================================
"Check that calling the model with scalar and vector parameters returns the correct response"
model = Model(gauss2_identity)
model.addlinear('gaussian', vec=len(x))
reference = gauss2(3,4,0.2,0.3,0.5,0.4)
response = model(reference)
assert np.allclose(response,reference)
#================================================================
def test_mixed_vector_length():
#================================================================
"Check the definition of scalar nonlinear parameters and vector linear parameters"
model = Model(gauss2_scaled)
model.addlinear('gaussian', vec=100)
assert model.Nparam==101
#================================================================
def test_mixed_vector_names():
#================================================================
"Check the definition of scalar nonlinear parameters and vector linear parameters"
model = Model(gauss2_scaled)
model.addlinear('gaussian', vec=100)
assert 'gaussian' in model.__dict__ and 'scale' in model.__dict__
#================================================================
def test_mixed_vector_call_keywords():
#================================================================
"Check that calling the model with scalar and vector parameters returns the correct response"
model = Model(gauss2_scaled)
model.addlinear('gaussian', vec=len(x))
reference = 5*gauss2(mean1=3,mean2=4,width1=0.2,width2=0.3,amp1=0.5,amp2=0.4)
response = model(scale=5,gaussian=gauss2(mean1=3,mean2=4,width1=0.2,width2=0.3,amp1=0.5,amp2=0.4))
assert np.allclose(response,reference)
#================================================================
def test_mixed_vector_call_positional():
#================================================================
"Check that calling the model with scalar and vector parameters returns the correct response"
model = Model(gauss2_scaled)
model.addlinear('gaussian', vec=len(x))
reference = 5*gauss2(3,4,0.2,0.3,0.5,0.4)
response = model(5,gauss2(3,4,0.2,0.3,0.5,0.4))
assert np.allclose(response,reference)
#================================================================
def test_addnonlinear_length():
#================================================================
"Check that the model is contructed correctly with the appropiate number of parameters"
model = Model(gauss)
model.addnonlinear('trivial1',lb=0)
model.addnonlinear('trivial2',lb=0)
assert model.Nparam==4
#================================================================
def test_addnonlinear_names():
#================================================================
"Check that linear parameters can be properly added"
model = Model(gauss)
model.addnonlinear('trivial1',lb=0)
model.addnonlinear('trivial2',lb=0)
assert hasattr(model,'trivial1') and hasattr(model,'trivial2')
#================================================================
def test_addnonlinear_set():
#================================================================
"Check that attributes of the linear parameters are editable"
model = Model(gauss)
model.addnonlinear('trivial1')
model.trivial1.set(lb=0,ub=10)
assert getattr(model.trivial1,'lb')==0 and getattr(model.trivial1,'ub')==10
#================================================================
def test_addnonlinear_call_keywords():
#================================================================
"Check that calling the model with parameters returns the correct response"
model = Model(gauss)
model.addnonlinear('trivial1')
model.addnonlinear('trivial2')
response = model(mean=3,width=0.2,trivial1=1,trivial2=1)
reference = gauss(mean=3,width=0.2)
assert np.allclose(response,reference)
#================================================================
def test_addnonlinear_call_positional():
#================================================================
"Check that calling the model with parameters returns the correct response"
model = Model(gauss)
model.addnonlinear('trivial1')
model.addnonlinear('trivial2')
response = model(3,0.2,1,1)
reference = gauss(3,0.2)
assert np.allclose(response,reference)
#================================================================
#----------------------------------------------------------------
def _getmodel(type):
if type=='parametric':
model = Model(gauss2)
model.mean1.set(lb=0, ub=10, par0=2)
model.mean2.set(lb=0, ub=10, par0=4)
model.width1.set(lb=0.01, ub=5, par0=0.2)
model.width2.set(lb=0.01, ub=5, par0=0.2)
model.amp1.set(lb=0, ub=5, par0=1)
model.amp2.set(lb=0, ub=5, par0=1)
elif type=='semiparametric':
model = Model(gauss2_design)
model.mean1.set(lb=0, ub=10, par0=2)
model.mean2.set(lb=0, ub=10, par0=4)
model.width1.set(lb=0.01, ub=5, par0=0.2)
model.width2.set(lb=0.01, ub=5, par0=0.2)
model.addlinear('amp1',lb=0, ub=5)
model.addlinear('amp2',lb=0, ub=5)
elif type=='nonparametric':
model = Model(gauss2_design(3,4,0.5,0.2))
model.addlinear('amp1',lb=0)
model.addlinear('amp2',lb=0)
return model
#----------------------------------------------------------------
# ======================================================================
def test_preserve_original():
"Check that the original model is not changed by the function"
model = Model(gauss)
model.mean.par0 = 3
model.width.par0 = 0.2
_ = fit(model,mock_data)
assert model._parameter_list() == ['mean','width']
# ======================================================================
def test_fit_parametric():
#================================================================
"Check that a parametric model can be correctly fitted"
model = _getmodel('parametric')
fitResult = fit(model,mock_data)
assert np.allclose(fitResult.model,mock_data)
#================================================================
def test_fit_semiparametric():
#================================================================
"Check that a semiparametric model can be correctly fitted"
model = _getmodel('semiparametric')
fitResult = fit(model,mock_data)
assert np.allclose(fitResult.model,mock_data)
#================================================================
def test_fit_nonparametric():
#================================================================
"Check that a semiparametric model can be correctly fitted"
model = _getmodel('nonparametric')
fitResult = fit(model,mock_data)
assert np.allclose(fitResult.model,mock_data,atol=1e-3)
#================================================================
def test_freeze():
#================================================================
"Check that a model parameter can be frozen to a fixed value"
model = Model(gauss)
model.mean.freeze(3)
assert model.mean.value==3 and model.mean.frozen==True
#================================================================
def test_unfreeze():
#================================================================
"Check that a model parameter can be frozen and then reversed"
model = Model(gauss)
model.mean.freeze(3)
model.mean.unfreeze()
assert model.mean.frozen==False and model.mean.value==None
#================================================================
def test_fit_parametric_frozen():
#================================================================
"Check that a parametric model can be correctly fitted"
model = _getmodel('parametric')
model.mean1.freeze(3)
fitResult = fit(model,mock_data)
assert np.allclose(fitResult.model,mock_data)
#================================================================
def test_fit_semiparametric_frozen():
#================================================================
"Check that a semiparametric model can be correctly fitted"
model = _getmodel('semiparametric')
model.mean1.freeze(3)
fitResult = fit(model,mock_data)
assert np.allclose(fitResult.model,mock_data)
#================================================================
def test_fit_nonparametric_frozen():
#================================================================
"Check that a semiparametric model can be correctly fitted"
model = _getmodel('nonparametric')
model.amp1.freeze(0.5)
fitResult = fit(model,mock_data)
assert np.allclose(fitResult.model,mock_data)
#================================================================
#----------------------------------------------------------------
def assert_attributes_cis(fitobject,attributes):
for attr in attributes:
parfit = getattr(fitobject,attr)
parci = getattr(fitobject,f'{attr}Uncert').ci(95)
ci_lower = parci[0]
ci_upper = parci[1]
if getattr(fitobject,f'{attr}Uncert').type=='bootstrap':
assert np.allclose(parfit,getattr(fitobject,f'{attr}Uncert').median)
assert parfit<=ci_upper and parfit>=ci_lower
#----------------------------------------------------------------
def test_CIs_parametric():
#================================================================
"Check the default confidence intervals of the fitted parameters"
model = _getmodel('parametric')
fitResult = fit(model,mock_data)
assert_attributes_cis(fitResult,['mean1','mean2','width1','width2','amp1','amp2'])
#================================================================
def test_CIs_semiparametric():
#================================================================
"Check the default confidence intervals of the fitted parameters"
model = _getmodel('semiparametric')
fitResult = fit(model,mock_data)
assert_attributes_cis(fitResult,['mean1','mean2','width1','width2','amp1','amp2'])
#================================================================
def test_CIs_nonparametric():
#================================================================
"Check the default confidence intervals of the fitted parameters"
model = _getmodel('semiparametric')
fitResult = fit(model,mock_data)
assert_attributes_cis(fitResult,['amp1','amp2'])
#================================================================
def test_bootCIs_parametric():
#================================================================
"Check the bootstrapped confidence intervals of the fitted parameters"
model = _getmodel('parametric')
noisydata = mock_data + whitegaussnoise(0.01,seed=1)
fitResult = fit(model,noisydata,bootstrap=3)
assert_attributes_cis(fitResult,['mean1','mean2','width1','width2','amp1','amp2'])
#================================================================
def test_bootCIs_semiparametric():
#================================================================
"Check the bootstrapped confidence intervals of the fitted parameters"
model = _getmodel('semiparametric')
noisydata = mock_data + whitegaussnoise(0.01,seed=1)
fitResult = fit(model,noisydata,bootstrap=3)
assert_attributes_cis(fitResult,['mean1','mean2','width1','width2','amp1','amp2'])
#================================================================
def test_bootCIs_nonparametric():
#================================================================
"Check the bootstrapped confidence intervals of the fitted parameters"
model = _getmodel('semiparametric')
noisydata = mock_data + whitegaussnoise(0.01,seed=1)
fitResult = fit(model,noisydata,bootstrap=3)
assert_attributes_cis(fitResult,['amp1','amp2'])
#================================================================
# Simple non-linear function for testing
def gauss_axis(axis,mean,width):
return np.exp(-(axis-mean)**2/width**2/2)
# Non-linear definition
def gauss2_axis(axis,mean1,mean2,width1,width2,amp1,amp2):
return amp1*gauss_axis(axis,mean1,width1) + amp2*gauss_axis(axis,mean2,width2)
# Linear + Non-linear definition
def gauss2_design_axis(axis,mean1,mean2,width1,width2):
return np.atleast_2d([gauss_axis(axis,mean1,width1), gauss_axis(axis,mean2,width2)]).T
# Linear + Non-linear definition
def gauss2_identity_axis(axis):
return np.eye(len(axis))
mock_data_fcn = lambda axis: gauss2_axis(axis,mean1=3,mean2=4,width1=0.5,width2=0.2,amp1=0.5,amp2=0.6)
def test_model_with_constant_positional():
#================================================================
"Check that a model with axis can be defined and called"
model = Model(gauss_axis,constants='axis')
x = np.linspace(0,10,300)
reference = gauss_axis(x,3,0.5)
response = model(x,3,0.5)
assert np.allclose(reference,response)
#================================================================
def test_model_with_constant_keywords():
#================================================================
"Check that a model with axis can be defined and called via keywords"
model = Model(gauss_axis,constants='axis')
x = np.linspace(0,10,300)
reference = gauss_axis(x,3,0.5)
response = model(axis=x, mean=3, width=0.5)
assert np.allclose(reference,response)
#================================================================
def test_model_with_constant_mixed():
#================================================================
"Check that a model with axis can be defined and called via keywords"
model = Model(gauss_axis,constants='axis')
x = np.linspace(0,10,300)
reference = gauss_axis(x,3,0.5)
response = model(x, mean=3, width=0.5)
assert np.allclose(reference,response)
#================================================================
#----------------------------------------------------------------
def _getmodel_axis(type):
if type=='parametric':
model = Model(gauss2_axis,constants='axis')
model.mean1.set(lb=0, ub=10, par0=2)
model.mean2.set(lb=0, ub=10, par0=4)
model.width1.set(lb=0.01, ub=5, par0=0.2)
model.width2.set(lb=0.01, ub=5, par0=0.2)
model.amp1.set(lb=0, ub=5, par0=1)
model.amp2.set(lb=0, ub=5, par0=1)
elif type=='semiparametric':
model = Model(gauss2_design_axis,constants='axis')
model.mean1.set(lb=0, ub=10, par0=2)
model.mean2.set(lb=0, ub=10, par0=4)
model.width1.set(lb=0.01, ub=5, par0=0.2)
model.width2.set(lb=0.01, ub=5, par0=0.2)
model.addlinear('amp1',lb=0, ub=5)
model.addlinear('amp2',lb=0, ub=5)
elif type=='nonparametric':
model = Model(lambda x: gauss2_design_axis(x,3,4,0.5,0.2),constants='x')
model.addlinear('amp1',lb=0)
model.addlinear('amp2',lb=0)
return model
#----------------------------------------------------------------
def test_fit_parametric_constant():
#================================================================
"Check that a parametric model can be correctly fitted while specifying an axis"
model = _getmodel_axis('parametric')
x = np.linspace(0,10,200)
fitResult = fit(model,mock_data_fcn(x),x)
assert np.allclose(fitResult.model,mock_data_fcn(x))
#================================================================
def test_fit_semiparametric_constant():
#================================================================
"Check that a semiparametric model can be correctly fitted while specifying an axis"
model = _getmodel_axis('semiparametric')
x = np.linspace(0,10,200)
fitResult = fit(model,mock_data_fcn(x),x)
assert np.allclose(fitResult.model,mock_data_fcn(x))
#================================================================
def test_fit_nonparametric_constant():
#================================================================
"Check that a semiparametric model can be correctly fitted while specifying an axis"
model = _getmodel_axis('nonparametric')
x = np.linspace(0,10,200)
fitResult = fit(model,mock_data_fcn(x),x)
assert np.allclose(fitResult.model,mock_data_fcn(x),atol=1e-3)
#================================================================
def gauss_multiaxis(axis1,axis2,mean,width):
return np.exp(-(axis1-mean)**2/width**2/2)
def gauss2_multiaxis(axis1,axis2,mean1,mean2,width1,width2,amp1,amp2):
return amp1*gauss_axis(axis1,mean1,width1) + amp2*gauss_axis(axis2,mean2,width2)
def test_model_with_multiple_constants():
#================================================================
"Check that a model with axis can be defined and called"
model = Model(gauss_multiaxis,constants=['axis1','axis2'])
x1 = np.linspace(0,5,300)
x2 = np.linspace(5,10,300)
reference = gauss_multiaxis(x1,x2,3,0.5)
response = model(x1,x2,3,0.5)
assert np.allclose(reference,response)
#================================================================
def test_model_with_multiple_constants_fit():
#================================================================
"Check that a model with axis can be defined and called"
model = Model(gauss2_multiaxis,constants=['axis1','axis2'])
model.mean1.set(lb=0, ub=10, par0=2)
model.mean2.set(lb=0, ub=10, par0=4)
model.width1.set(lb=0.01, ub=5, par0=0.2)
model.width2.set(lb=0.01, ub=5, par0=0.2)
model.amp1.set(lb=0, ub=5, par0=1)
model.amp2.set(lb=0, ub=5, par0=1)
x = np.linspace(0,10,300)
x1 = np.linspace(0,10,300)
x2 = np.linspace(0,10,300)
fitResult = fit(model,mock_data_fcn(x),x1,x2)
assert np.allclose(fitResult.model,mock_data_fcn(x))
#================================================================
def test_model_constant_same_values_keywords():
#================================================================
"Check that a model with axis can be defined and called with same values as parameters"
model = Model(gauss_axis,constants='axis')
reference = gauss_axis(axis=3,mean=3,width=0.5)
response = model(axis=3,mean=3,width=0.5)
assert np.allclose(reference,response)
#================================================================
def test_model_constant_same_values_positional():
#================================================================
"Check that a model with axis can be defined and called with same values as parameters"
model = Model(gauss_axis,constants='axis')
reference = gauss_axis(3,3,0.5)
response = model(3,3,0.5)
assert np.allclose(reference,response)
#================================================================
def model(phase, center, width):
y = gauss(center, width)
y = y*np.exp(-1j*phase)
return y
mymodel = Model(model)
mymodel.phase.set(par0=2*np.pi/5, lb=-np.pi, ub=np.pi)
mymodel.center.set(par0=4, lb=1, ub=6)
mymodel.width.set(par0=0.2, lb=0.05, ub=5)
y = mymodel(phase=np.pi/5, center=3, width=0.5)
def test_complex_model_complex_data():
# ======================================================================
"Check the fit of a complex-valued model to complex-valued data"
fitResult = fit(mymodel,y)
assert np.allclose(fitResult.model.real,y.real) and np.allclose(fitResult.model.imag,y.imag)
# ======================================================================
def test_complex_model_real_data():
# ======================================================================
"Check the fit of a complex-valued model to real-valued data"
fitResult = fit(mymodel,y.real)
assert np.allclose(fitResult.model.real,y.real) and np.allclose(fitResult.model.imag,np.zeros_like(y.real))
# ======================================================================
def test_real_model_complex_data():
# ======================================================================
"Check the fit of a real-valued model to complex-valued data"
mymodel = Model(gauss)
mymodel.mean.set(par0=4, lb=1, ub=6)
mymodel.width.set(par0=0.2, lb=0.05, ub=5)
fitResult = fit(mymodel,y.real + 1j*np.zeros_like(y.imag))
assert np.allclose(fitResult.model.real,y.real) and np.allclose(fitResult.model.imag,np.zeros_like(y.real))
# ======================================================================
```
|
{
"source": "JeschkeLab/messtechnikpy",
"score": 4
}
|
#### File: messtechnikpy/tutorial_2_linearity_in_spectroscopy/Bloch.py
```python
import numpy as np
from numpy import pi
def Bloch_stationary(frq_axis,frq,T1,T2,nu1 = 0.002):
"""
Computes spectrum from specification of a single spectral line and an
optional frequency axis based on stationary solutions of the Bloch
equations
an appropriate frequency axis is determined, if none is provided
Input parameters:
frq_axis frequency axis
frq frequency offset from carrier (resonance offset)
T1 longitudinal relaxation time
T2 transverse relaxation time
Optional parameters:
nu1 irradiation amplitude (default=0.002, linear regime)
Output parameters:
spectrum complex spectrum, real part absorption, imaginary part dispersion
<NAME>, 2020 adapted from <NAME>, 2011, for lecture course Messtechnik
"""
ndefault = 1024 # default number of points of frequency axis
verbose = True # warnings are output, if true and suppressed, if false
# create frequency axis, if necessary
linewidth = 1/(pi*T2)
# determine if irradiation amplitude leads to saturation
S = (2*pi*nu1)**2*T1*T2
# determine range of significant line intensity
minfrq = frq - 10*linewidth
maxfrq = frq + 10*linewidth
if min(frq_axis)>minfrq and verbose:
print('Warning: Spectral line extends beyond minimum of frequency axis.\n')
if max(frq_axis)<maxfrq and verbose:
print('Warning: Spectral line extends beyond maximum of frequency axis.\n')
arg = (2*pi*(frq_axis - frq)*T2)**2
denom = arg + 1 + S
dispersion = -2*pi*nu1*(2*pi*(frq_axis - frq)*T2**2)/denom
absorption = 2*pi*nu1*T2*np.ones_like(frq_axis)/denom
spectrum = absorption + 1j*dispersion
return spectrum
```
|
{
"source": "jeschkies/marathon",
"score": 2
}
|
#### File: system/fixtures/__init__.py
```python
import common
import os.path
import pytest
import shakedown
from datetime import timedelta
def fixtures_dir():
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope="function")
def wait_for_marathon_and_cleanup():
print("entering wait_for_marathon_and_cleanup fixture")
shakedown.wait_for_service_endpoint('marathon', timedelta(minutes=5).total_seconds())
yield
shakedown.wait_for_service_endpoint('marathon', timedelta(minutes=5).total_seconds())
common.clean_up_marathon()
print("exiting wait_for_marathon_and_cleanup fixture")
@pytest.fixture(scope="function")
def wait_for_marathon_user_and_cleanup():
print("entering wait_for_marathon_user_and_cleanup fixture")
shakedown.wait_for_service_endpoint('marathon-user', timedelta(minutes=5).total_seconds())
with shakedown.marathon_on_marathon():
yield
shakedown.wait_for_service_endpoint('marathon-user', timedelta(minutes=5).total_seconds())
common.clean_up_marathon()
print("exiting wait_for_marathon_user_and_cleanup fixture")
@pytest.fixture(scope="function")
def events_to_file():
print("entering events_to_file fixture")
shakedown.run_command_on_master('rm events.txt')
shakedown.run_command_on_master(
'curl --compressed -H "Cache-Control: no-cache" -H "Accept: text/event-stream" '
'-o events.txt leader.mesos:8080/v2/events &')
yield
shakedown.kill_process_on_host(shakedown.master_ip(), '[c]url')
shakedown.run_command_on_master('rm events.txt')
print("exiting events_to_file fixture")
@pytest.fixture(scope="function")
def user_billy():
print("entering user_billy fixture")
shakedown.add_user('billy', 'billy')
shakedown.set_user_permission(rid='dcos:adminrouter:service:marathon', uid='billy', action='full')
shakedown.set_user_permission(rid='dcos:service:marathon:marathon:services:/', uid='billy', action='full')
yield
shakedown.remove_user_permission(rid='dcos:adminrouter:service:marathon', uid='billy', action='full')
shakedown.remove_user_permission(rid='dcos:service:marathon:marathon:services:/', uid='billy', action='full')
shakedown.remove_user('billy')
print("exiting user_billy fixture")
```
|
{
"source": "jeschkies/nyan",
"score": 2
}
|
#### File: nyan/feature_extractor/main.py
```python
from feature_extractor.extractors import EsaFeatureExtractor
import json
import logging
import socket
import sys
import time
from utils.daemon import Daemon
import stomp #needs to be after daemon for some reason
import yaml
"""
Receives news articles in a STOMP message from the feed crawler.
Text features are then extracted based on a feature model learned in an offline
process. The articles are then send on to the article ranker.
"""
class StompListener(object):
def __init__(self, config):
self.config_ = config
self.logger_ = logging.getLogger("main")
self.extractor = EsaFeatureExtractor(prefix = config['prefix'])
def __extract_features(self, message):
'''
Extracts features from clean content and sends it on
'''
self.logger_.debug("Got article '%s'" % message['headline'])
features = self.extractor.get_features(message['clean_content'])
version = self.extractor.get_version()
#add features to json representation of article
message['features'] = {'version': version,
'data': features}
#send message on to Article Ranker
try:
self.conn_.send(json.dumps(message), destination="queue/features")
except Exception as inst:
self.logger_.error("Could not send message to feature queue. "
"Unknown Error %s: %s" % (type(inst), inst))
def on_error(self, hears, message):
self.logger_ .error('received an error %s' % message)
def on_message(self, headers, message):
received_message = json.loads(message)
self.__extract_features(received_message)
def set_stomp_connection(self, connection):
self.conn_ = connection
class FeatureExtractorDaemon(Daemon):
def __init__(self, pidfile, config_file = None, log_file = None):
logging.basicConfig(format='-' * 80 + '\n' +
'%(asctime)s : %(levelname)s in %(module)s [%(pathname)s:%(lineno)d]:\n' +
'%(message)s\n' +
'-' * 80,
level=logging.DEBUG,
filename= log_file)
try:
if config_file != None:
stream = file(config_file, 'r')
self.config_ = yaml.load(stream)
stream.close()
else:
self.config_ = None
except IOError as e:
print "Could not open %s: %s" % (config_file, e)
sys.exit(1)
except Exception as inst:
print "Unknown error %s: %s" % (type(inst), inst)
sys.exit(1)
super(FeatureExtractorDaemon, self).__init__(pidfile)
def run(self):
logger = logging.getLogger("main")
if self.config_ == None:
logger.error("No config.")
sys.exit(1)
hosts = [('localhost', 61613)]
connected = False
trys = 5
while not connected:
try:
trys = trys-1
listener = StompListener(self.config_)
conn = stomp.Connection()
conn.set_listener('', listener)
conn.start()
conn.connect()
conn.subscribe(destination='queue/rawarticles', ack='auto')
connected = True
listener.set_stomp_connection(conn)
except stomp.exception.ConnectFailedException:
if trys > 0:
pass
else:
logger.error("Could not connect to STOMP broker")
sys.exit(1)
except socket.error:
pass
if connected:
logger.info("connected to STOMP broker")
while True:
time.sleep(20)
if __name__ == "__main__":
from optparse import OptionParser
p = OptionParser()
p.add_option('-c', '--config', action="store", dest='config',
help="specify config file")
p.add_option('-d', action="store_true", dest='daemonize',
help="run the server as a daemon")
p.add_option('-l', '--log', action="store", dest='log',
help="specify log file")
p.add_option('-p', '--pidfile', dest='pidfile',
default='/tmp/daemon-feature-extractor.pid',
help="store the process id in the given file. Default is "
"/tmp/daemon-feature-extractor.pid")
(options, args) = p.parse_args()
daemon = FeatureExtractorDaemon(options.pidfile, options.config, options.log)
if len(sys.argv) >= 2:
if 'start' == sys.argv[1]:
if not options.config or not options.log:
print "No config or logfile set."
sys.exit(2)
elif options.daemonize:
daemon.start()
else:
daemon.run()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
if not options.config or not options.log:
print "No config or logfile set."
sys.exit(2)
else:
daemon.restart()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart options" % sys.argv[0]
sys.exit(2)
```
#### File: nyan/frontend/frontend.py
```python
from appuser import AppUser
from datetime import datetime, timedelta
from flask import (Flask, abort, redirect, url_for, render_template, request,
flash, session)
from flask.ext.login import (LoginManager, current_user, login_required,
login_user, logout_user, UserMixin, AnonymousUser,
confirm_login, fresh_login_required)
from gensim.corpora import Dictionary
import hashlib
from jinja2 import Environment, FileSystemLoader
import jinja2_filters
import logging
from models.mongodb_models import (Vendor, User, Article, Feedback,
ReadArticleFeedback)
from mongoengine import *
from nltk.tokenize import sent_tokenize
import os.path
import sys
import time
from utils.helper import load_config
import yaml
#Configure logger
logging.basicConfig(format='-' * 80 + '\n' +
'%(asctime)s : %(levelname)s in %(module)s [%(pathname)s:%(lineno)d]:\n' +
'%(message)s\n' +
'-' * 80,
level=logging.DEBUG,
filename= "log.txt")
#Flask app
app = Flask(__name__)
#salt for hashing etc.
SALT = u""
#Load non-FLASK config
config = load_config("config.yaml", app.logger)
#Flask config
try:
SECRET_KEY = config['flask']['secret_key']
DEBUG = config['flask']['debug']
except KeyError as e:
app.logger.error("Malformed config." +
"Could not get flask secret key and debug option: %s"
% (e))
sys.exit(1)
app.config.from_object(__name__)
#Login manager
login_manager = LoginManager()
login_manager.login_view = "login"
login_manager.login_message = u"Please log in to access this page."
#login_manager.refresh_view = "reauth"
@login_manager.user_loader
def load_user(user_id):
'''
Loads user from Database
'''
try:
user = User.objects(id = user_id).first()
except Exception as inst:
app.logger.error("Could not login user %s: %s" % (type(inst), type))
return None
if user == None:
app.logger.error("No user found for %s" % user_id)
return None
return AppUser(user)
login_manager.setup_app(app)
#Connect to mongo database
connect(config['database']['db-name'],
username= config['database']['user'],
password= config['database']['passwd'],
port = config['database']['port'])
#jinja2 filter to test if vendor is in given subscription
def is_subscribed(vendor):
if not current_user.is_authenticated():
return False
try:
for v in current_user.mongodb_user.subscriptions:
if v == vendor:
return True
return False
except Exception as inst:
app.logger.error("Error when checking subscription %s: %s" % (type(inst), inst))
return False
#register jinja2 filters
app.jinja_env.filters['datetimeformat'] = jinja2_filters.datetimeformat
app.jinja_env.filters['datetimeformat_read'] = jinja2_filters.datetimeformat_read
app.jinja_env.filters['firstparagraph'] = jinja2_filters.firstparagraph
app.jinja_env.filters['prevdate'] = jinja2_filters.prevdate
app.jinja_env.filters['nextdate'] = jinja2_filters.nextdate
app.jinja_env.filters['start_timer'] = jinja2_filters.start_timer
app.jinja_env.filters['end_timer'] = jinja2_filters.end_timer
#jinja2 filter to get the range of a list
app.jinja_env.filters['range'] = lambda l, start, stop: l[start:stop]
#register jinja2 filters
app.jinja_env.tests['today'] = jinja2_filters.is_today
app.jinja_env.filters['is_subscriped'] = is_subscribed
#Dictionary
app.logger.debug("Load dictionary.")
try:
dictionary_ = Dictionary.load(config["dictionary"])
except IOError as ioe:
app.logger.error("Could not load dictionary %s: "
"%s" % (config["dictionary"], ioe))
dictionary_ = None
except KeyError as e:
app.logger.error("Malformed config. Could not get path to dictionary: %s"
% (e))
dictionary_ = None
except Exception as inst:
app.logger.error("Could not load dictionary %s. "
"Unknown error %s: %s" %
(config["dictionary"], type(inst), inst))
dictionary_ = None
@app.route('/')
def index():
return redirect(url_for('login'))
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated():
return redirect(request.args.get("next") or url_for('top'))
if request.method == 'POST':
e_mail = request.form['e_mail'].lower()
password = request.form['password']
if e_mail != None and password!=<PASSWORD>:
#get user from database
try:
start = time.time()
users = User.objects(email = e_mail)
user = users.first()
end = time.time()
app.logger.info("Getting user took %f.5 seconds." % (end-start))
except Exception as inst:
app.logger.error("Could not login user %s: %s" %
(type(inst), type))
raise abort(500)
if user == None:
app.logger.error("No user found for %s" % e_mail)
flash('Username or password are not correct.', 'error')
else:
#check password
m = hashlib.sha256()
m.update(password.encode("UTF-8"))
m.update(SALT.encode("UTF-8"))
if m.hexdigest() == user.password:
app.logger.debug("Login %s" % e_mail)
login_user(AppUser(user))
return redirect(request.args.get("next") or url_for('top'))
else:
flash('Username or password are not correct.', 'error')
return render_template('login.html')
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('login'))
@app.route('/all/')
@app.route('/all/<date>')
@login_required
def all(date=None):
if date == None:
date_ = datetime.now()
else:
date_ = datetime.fromtimestamp(time.mktime(time.strptime(date, u'%d-%m-%Y')))
#check if user has any subscriptions
if len(current_user.get_subscriptions()) == 0:
return render_template('no_subscriptions.html',date=date_,
tab="all", user=current_user.get_user_data)
#get articles
articles_ = current_user.get_articles(date=date_)
read_articles_ = current_user.get_read_articles(date=date_)
if len(articles_) == 0:
return render_template('no_news.html',date=date_,
tab="all", user=current_user.get_user_data)
#render template
return render_template('overview.html',
date=date_, tab="all",
articles= articles_,
read_articles = read_articles_)
@app.route('/read/<key>')
@login_required
def read(key):
try:
article_ = Article.objects(id = key).first()
except ValidationError as ve:
app.logger.error("Error on reading %s (%s): %s" % (key, type(ve), ve))
article_ = None
if article_ == None:
return render_template('no_article.html',
date=datetime.now())
#save user feedback
current_user.save_read_article_feedback(article = article_,
score = 1.0)
#render read article view
return render_template('read.html',
article= article_,
date=datetime.now())
@app.route('/top/')
@app.route('/top/<date>')
@login_required
def top(date=None):
if date == None:
date_ = datetime.now()
else:
date_ = datetime.fromtimestamp(time.mktime(time.strptime(date, u'%d-%m-%Y')))
#check if user has any subscriptions
if len(current_user.mongodb_user.subscriptions) == 0:
return render_template('no_subscriptions.html',
date=date_,
tab="all", user=current_user.mongodb_user)
#get articles
articles_ = current_user.get_top_articles(date = date_,
min_rating = config['rating'])
if len(articles_) == 0:
return render_template('no_news.html',date=date_,
tab="top", user=current_user.mongodb_user)
#render template
return render_template('top_overview.html',
date=date_, tab="top",
articles= articles_)
@app.route('/register')
@login_required
def register():
'''
Registers a new user to service.
'''
#only Karsten is allowed to add a new user
if current_user.get_email() != "<EMAIL>":
return redirect(url_for('index'))
return render_template('add_user.html',
tab="", date = datetime.now())
@app.route('/subscriptions')
@login_required
def subscriptions():
return render_template('subscriptions.html',
tab="subscriptions", date = datetime.now(),
vendors = Vendor.objects())
@app.route('/profile')
@login_required
def profile():
return render_template('profile.html',
tab="profile",
date = datetime.now(),
user_model = current_user.get_trained_profile(),
dictionary = dictionary_)
@app.route('/ajax_change_password', methods = ['POST'])
def ajax_change_password():
if not current_user.is_authenticated():
abort(403)
old_password = request.form['old_password']
new_password = request.form['new_password']
new_password_repeat = request.form['new_password_repeat']
#check old password
m = hashlib.sha256()
m.update(old_password.encode("UTF-8"))
m.update(SALT.encode("UTF-8"))
#old password is wrong
if m.hexdigest() != current_user.mongodb_user['password']:
abort(403)
if new_password != <PASSWORD>:
abort(403)
if new_password == "":
abort(400)
#change password
m = hashlib.sha256()
m.update(new_password.encode("UTF-8"))
m.update(SALT.encode("UTF-8"))
try:
current_user.set_password(new_password = <PASSWORD>())
except OperationError as e:
app.logger.error("Could not save password to database")
abort(500)
except Exception as inst:
app.logger.error("Could not change password %s: %s" % (type(inst), type))
abort(500)
return ""
@app.route('/ajax_subscripe', methods = ['POST'])
def ajax_subscripe():
'''
Called remotely to subscripe current user to a vendor
'''
if not current_user.is_authenticated():
abort(403)
vendor_id = request.form['vendor_id']
app.logger.error("Subscribe user to %s" % vendor_id)
try:
new_vendor = Vendor.objects(id=vendor_id).first()
current_user.add_vendor_to_subscriptions(new_vendor)
except Exception as inst:
app.logger.error("Could not subscribe user %s: %s" % (type(inst), type))
abort(500)
return ""
@app.route('/ajax_unsubscripe', methods = ['POST'])
def ajax_unsubscripe():
'''
Called remotely to unsupscribe current user from vendor
'''
if not current_user.is_authenticated():
abort(403)
vendor_id = request.form['vendor_id']
app.logger.error("Unsubscribe user from %s" % vendor_id)
try:
vendor = Vendor.objects(id=vendor_id).first()
current_user.remove_vendor_from_subscriptions(vendor)
except Exception as inst:
app.logger.error("Could not unsubscribe user %s: %s" % (type(inst), type))
abort(500)
return ""
@app.route('/ajax_add_user', methods = ['POST'])
def ajax_add_user():
'''
Called remotely to add a new user.
'''
if not current_user.is_authenticated():
abort(403)
name = request.form['name']
email = request.form['email'].lower()
new_password = request.form['new_password']
new_password_repeat = request.form['new_password_repeat']
if current_user.mongodb_user.email != "<EMAIL>":
abort(403)
#check passwords
if new_password != new_password_repeat:
abort(400)
if new_password == "":
abort(400)
#hash password
m = hashlib.sha256()
m.update(new_password.encode("UTF-8"))
m.update(SALT.encode("UTF-8"))
#check if user with email address already exists
users_with_same_email = User.objects(email = email)
if len(users_with_same_email) > 0:
abort(400)
try:
app.logger.debug("Adding new user %s" % name)
#just pick the first article as feedback
first_article = Article.objects().first()
first_profile = LearnedProfile(features = first_article.features)
new_user = User(name = name, password = <PASSWORD>(),
email = email,
learned_profile = [first_profile])
new_user.save(safe=True)
first_feedback = ReadArticleFeedback(user_id = new_user.id,
article = first_article,
score = 1.0)
first_feedback.save()
app.logger.debug("...done.")
except Exception as inst:
app.logger.error("Could not add new user: %s: %s" % (type(inst), type))
abort(500)
return ""
#Start app
if __name__ == '__main__':
app.run()
```
#### File: feature_extractor/esa/file_wikicorpus.py
```python
import errno
import exceptions
from gensim import utils, corpora, models
import itertools
import logging
from esamodel import EsaModel, DocumentTitles
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
import multiprocessing
import os
import sys
# Wiki is first scanned for all distinct word types (~7M). The types that appear
# in more than 10% of articles (supposedly stop words) are removed and
# from the rest, the DEFAULT_DICT_SIZE most frequent types are kept.
DEFAULT_DICT_SIZE = 50000
# No word which appear less then NO_BELOW times are kept
NO_BELOW = 20
#Number of topics to create for lda model
NUM_TOPICS = 500
class CleanDocument(object):
"""
Takes a document as a string.
Tokenzies documents and stems document words.
Does not removes stops because all stop words will be removed later when
the dictionary is filtered.
Resulting document will be a list of tokens.
Needs to be converted with dictionary.
"""
def __init__(self, document):
'''
:param document: A string with the content of the document.
'''
#use pattern lemmatizer. see gensim.utils.lemmatizer.
#Note: len(words) < 15 are filtered out
self.clean_document_ = utils.lemmatize(document)
def get(self):
return self.clean_document_
def __iter__(self):
'''
Iters through words of document.
'''
for word in self.clean_document_:
yield word
def process_file_path(file_path):
with open(file_path, "r") as file:
#last character is a breaking /n
article_name = file.readline()[:-1]
#remaining lines is doc
doc = " ".join(file.readlines())
lemmatized_doc = utils.lemmatize(doc)
return (article_name, lemmatized_doc)
class CleanCorpus(corpora.TextCorpus):
'''
Loads all documents in a directory from a file system. Each file in a dir
is regarded as a document. It should be a texfile.
The first line is the article name.
Stems all words and removes stop words. Tokenizes each document
'''
def __init__(self, fname, no_below=NO_BELOW, keep_words=DEFAULT_DICT_SIZE,
dictionary=None):
'''
See gensim.corpora.textcorpus for details.
:param fnam: The path to scan for documents.
'''
self.fname = fname
self.article_names = []
if keep_words is None:
keep_words = DEFAULT_DICT_SIZE
if no_below is None:
no_below = NO_BELOW
self.file_paths = [os.path.join(self.fname, name) for name in os.listdir(self.fname)
if os.path.isfile(os.path.join(self.fname, name))]
self.processes = 2
#each file is considered an article
self.total_articles = len(self.file_paths)
if dictionary is None:
self.dictionary = corpora.Dictionary(self.get_texts())
self.dictionary.filter_extremes(no_below=no_below, no_above=0.1,
keep_n=keep_words)
else:
self.dictionary = dictionary
def get_texts(self):
'''
Files are processed parallel.
See wikicorpus.py by <NAME>
'''
logger = logging.getLogger("feature_extractor")
logger.info("Scanning %d files." % self.total_articles)
articles_processed = 0
pool = multiprocessing.Pool(self.processes)
for group in utils.chunkize_serial(self.file_paths,
chunksize=10*self.processes):
for article_name, tokens in pool.imap(process_file_path, group):
articles_processed += 1
try:
name = article_name.strip("\n").decode("UTF-8")
except UnicodeDecodeError as e:
logger.error("Could not decode %s: %s" % (article_name, e))
exit(1)
self.article_names.append(name)
yield tokens
pool.terminate()
logger.info("Processed %d articles." % articles_processed)
def save_article_names(self, file_path):
logger.info("Saving article names to %s" % file_path)
with open(file_path, "wb") as fout:
for name in self.article_names:
fout.write("%s\n" % name.encode("UTF-8"))
def load_article_names(self, file_path):
logger.info("Loading article names from %s" % file_path)
#clear old list
self.article_names[:] = []
with open(file_path, "r") as file:
for line in file:
article_name = line.strip("\n").decode("UTF-8")
self.article_names.append(article_name)
def save(save_func, path):
try:
save_func(path)
except IOError as e:
logger.error("Could not save to %s: %s" % (path, e))
answer = raw_input("Do you want to try with a different path? (yes/no)")
if answer != "yes":
raise e
else:
new_path = raw_input("Enter the new path:")
save(save_func, new_path)
except Exception as inst:
logger.error("Unknown error on saving \"%s\" %s: %s" %
(file_path, type(inst), inst))
raise
if __name__ == "__main__":
from optparse import OptionParser
p = OptionParser()
p.add_option('-p', '--path', action="store", dest='doc_path',
help="specify path of wiki documents")
p.add_option('-o', '--output-prefix', action="store", dest='prefix',
help="specify path prefix where everything should be saved")
(options, args) = p.parse_args()
logger = logging.getLogger("feature_extractor")
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
#corpus = CleanCorpus(options.doc_path)
#save dictionary: word <-> token id map
#corpus.dictionary.save(options.prefix + "_wordids.dict")
#save(lambda path: corpus.dictionary.save(path),
# options.prefix + "_wordids.dict")
#corpus.dictionary.save_as_text(options.prefix + "_wordids.dict.txt")
#del corpus
'''Bag-of-Words'''
#init corpus reader and word -> id map
#id2token = corpora.Dictionary.load(options.prefix + "_wordids.dict")
#new_corpus = CleanCorpus(options.doc_path, dictionary = id2token)
#create and save bow-representation of corpus
#corpora.MmCorpus.serialize(options.prefix + '_bow_corpus.mm', new_corpus,
# progress_cnt=10000)
#save article names
#new_corpus.save_article_names(options.prefix + "_articles.txt")
#new_corpus.load_article_names(options.prefix + "_articles.txt")
#del new_corpus
#init corpus reader and word -> id map
id2token = corpora.Dictionary.load(options.prefix + "_wordids.dict")
#mm_bow = corpora.MmCorpus(options.prefix + '_bow_corpus.mm')
'''TFIDF Model creation'''
#build tfidf model
#tfidf = models.TfidfModel(mm_bow, id2word=id2token, normalize=True)
#save tfidf model
#tfidf.save(options.prefix + '_tfidf.model')
#save corpus as tfidf vectors in matrix market format
#corpora.MmCorpus.serialize(options.prefix + '_tfidf_corpus.mm', tfidf[mm_bow],
# progress_cnt=10000)
#init tfidf-corpus reader
#mm_tfidf = corpora.MmCorpus(options.prefix + '_tfidf_corpus.mm')
'''LDA Model creation'''
#build lda model
#lda = models.LdaModel(corpus=mm_tfidf, id2word=id2token,
# num_topics=NUM_TOPICS, update_every=1,
# chunksize=10000, passes=2)
#save trained model
#lda.save(options.prefix + '_lda.model')
#save corpus as lda vectors in matrix market format
#corpora.MmCorpus.serialize(options.prefix + '_lda_corpus.mm', lda[mm_tfidf],
# progress_cnt=10000)
#init lda-corpus reader
mm_lda = corpora.MmCorpus(options.prefix + '_lda_corpus.mm')
'''ESA Model creation'''
#document titles
article_titles = DocumentTitles.load(options.prefix + "_articles.txt")
#build esa model
esa = EsaModel(mm_lda, num_clusters = 10000,
document_titles = article_titles,
num_features = NUM_TOPICS)
esa.save(options.prefix + "_esa_on_lda.model")
logger.info("finished transforming")
```
#### File: shared_modules/unit_tests/extractors_test.py
```python
'''
Created on 22.11.2012
@author: <NAME> <<EMAIL>>
'''
from feature_extractor.extractors import (EsaFeatureExtractor,
TfidfFeatureExtractor,
LdaFeatureExtractor)
import logging
import unittest
from utils.helper import load_config
logger = logging.getLogger("unittesting")
class LDAFeatureExtractorTest(unittest.TestCase):
def setUp(self):
self.config = load_config(file_path = "/home/karten/Programmierung/frontend/config.yaml",
logger = logger,
exit_with_error = True)
def tearDown(self):
pass
def test_get_feature_number(self):
feature_extractor = LdaFeatureExtractor(prefix = self.config['prefix'])
num_topics = feature_extractor.get_feature_number()
self.assertEqual(500, num_topics)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
```
#### File: shared_modules/unit_tests/test_kmedoids.py
```python
'''
@author: <NAME> <<EMAIL>>
The unittests are not complete.
'''
from kmedoids import KMedoids
from gensim.corpora import Dictionary, MmCorpus
from gensim.models import tfidfmodel
import logging
from profilehooks import profile
import unittest
logger = logging.getLogger("unittesting")
class TestKMedoids(unittest.TestCase):
def setUp(self):
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.DEBUG)
def tearDown(self):
pass
@profile
def get_kmedoids(self, corpus, num_features, num_clusters, max_iterations):
return KMedoids(corpus = corpus, num_features = num_features,
num_clusters = num_clusters,
max_iterations = max_iterations)
@profile
def cluster(self, clusterer):
return clusterer.cluster()
def test_cluster(self):
#load tf-idf corpus
tfidf_corpus = MmCorpus('/media/sdc1/test_dump/result/test_tfidf_corpus.mm')
#load dictionary
id2token = Dictionary.load("/media/sdc1/test_dump/result/test_wordids.dict")
kmedoids = self.get_kmedoids(tfidf_corpus, len(id2token),
num_clusters = 15,
max_iterations = 5)
clusters = self.cluster(kmedoids)
print clusters
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
```
|
{
"source": "JesDanis/Sweaden_Prueba",
"score": 2
}
|
#### File: doctype/prospectos_socio/prospectos_socio.py
```python
import frappe
from frappe.model.document import Document
class prospectos_socio(Document):
pass
# def validate(self):
# if len(self.nombres)<3:
# frappe.throw("Nombre inválido")
```
|
{
"source": "jesearl/theverse",
"score": 3
}
|
#### File: theverse/classes/refstr.py
```python
from typing import Optional
from ..err import TheVerseError
class RefStr(str):
'''
String that also provides reference information.
'''
def __new__(cls,
string: str,
*,
name=None,
reference: Optional[str]=None,
reference_url: Optional[str]=None):
inst = super().__new__(cls, string)
if name is not None and not isinstance(name, str):
raise TypeError
inst._name = name
inst._object = None
if reference is None and reference_url is None:
raise TypeError('At least one of "reference" and "reference_url" must be given')
if any(x is not None and not isinstance(x, str) for x in (reference, reference_url)):
raise TypeError
inst._reference = reference
inst._reference_url = reference_url
return inst
@property
def name(self):
return self._name
@property
def reference(self):
return self._reference
@property
def reference_url(self):
return self._reference_url
def link_object(self, object):
if self._object is not None:
raise TheVerseError(f'"{self.name}" ({self.__class__.__name__}) is already linked to '
f'"{self._object.name}" ({self._object.__class__.__name__})')
self._object = object
def unlink_object(self, object):
if self._object is object:
if not object.unlinking:
raise TheVerseError('Can only unlink an object by calling its ".unlink()" method')
self._object = None
```
|
{
"source": "jeseem/mesos",
"score": 2
}
|
#### File: mesos/support/test-upgrade.py
```python
import argparse
import os
import subprocess
import sys
import tempfile
import time
DEFAULT_PRINCIPAL = 'foo'
DEFAULT_SECRET = 'bar'
# Helper class to keep track of process lifecycles, i.e., starting processes,
# capturing output, and checking liveness during delays/sleep.
class Process:
def __init__(self, args, environment=None):
outfile = tempfile.mktemp()
fout = open(outfile, 'w')
print 'Run %s, output: %s' % (args, outfile)
# TODO(nnielsen): Enable glog verbose logging.
self.process = subprocess.Popen(args,
stdout=fout,
stderr=subprocess.STDOUT,
env=environment)
# Polls the process for the specified number of seconds, returning the
# process's return value if it ends during that time. Returns `True` if the
# process is still running after that time period.
def sleep(self, seconds):
poll_time = 0.1
while seconds > 0:
seconds -= poll_time
time.sleep(poll_time)
poll = self.process.poll()
if poll != None:
return poll
return True
def __del__(self):
if self.process.poll() == None:
self.process.kill()
# Class representing an agent process.
class Agent(Process):
def __init__(self, path, work_dir, credfile):
Process.__init__(self, [os.path.join(path, 'bin', 'mesos-agent.sh'),
'--master=127.0.0.1:5050',
'--credential=' + credfile,
'--work_dir=' + work_dir,
'--resources=disk:2048;mem:2048;cpus:2'])
pass
# Class representing a master process.
class Master(Process):
def __init__(self, path, work_dir, credfile):
Process.__init__(self, [os.path.join(path, 'bin', 'mesos-master.sh'),
'--ip=127.0.0.1',
'--work_dir=' + work_dir,
'--authenticate',
'--credentials=' + credfile,
'--roles=test'])
pass
# Class representing a framework instance (the test-framework for now).
#
# TODO(greggomann): Add support for multiple frameworks.
class Framework(Process):
def __init__(self, path):
# The test-framework can take these parameters as environment variables,
# but not as command-line parameters.
environment = {
# In Mesos 0.28.0, the `MESOS_BUILD_DIR` environment variable in the
# test framework was changed to `MESOS_HELPER_DIR`, and the '/src'
# subdirectory was added to the variable's path. Both are included
# here for backwards compatibility.
'MESOS_BUILD_DIR': path,
'MESOS_HELPER_DIR': os.path.join(path, 'src'),
'MESOS_AUTHENTICATE': '1',
'DEFAULT_PRINCIPAL': DEFAULT_PRINCIPAL,
'DEFAULT_SECRET': DEFAULT_SECRET
}
Process.__init__(self, [os.path.join(path, 'src', 'test-framework'),
'--master=127.0.0.1:5050'], environment)
pass
# Convenience function to get the Mesos version from the built executables.
def version(path):
p = subprocess.Popen([os.path.join(path, 'bin', 'mesos-master.sh'),
'--version'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, err = p.communicate()
rc = p.returncode
if rc != 0:
return False
return output[:-1]
# Script to test the upgrade path between two versions of Mesos.
#
# TODO(nnielsen): Add support for zookeeper and failover of master.
# TODO(nnielsen): Add support for testing scheduler live upgrade/failover.
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Test upgrade path between two mesos builds')
parser.add_argument('--prev',
type=str,
help='Build path to mesos version to upgrade from',
required=True)
parser.add_argument('--next',
type=str,
help='Build path to mesos version to upgrade to',
required=True)
args = parser.parse_args()
prev_path = args.prev
next_path = args.next
# Get the version strings from the built executables.
prev_version = version(prev_path)
next_version = version(next_path)
if prev_version == False or next_version == False:
print 'Could not get mesos version numbers'
sys.exit(1)
# Write credentials to temporary file.
credfile = tempfile.mktemp()
with open(credfile, 'w') as fout:
fout.write(DEFAULT_PRINCIPAL + ' ' + DEFAULT_SECRET)
# Create a work directory for the master.
master_work_dir = tempfile.mkdtemp()
# Create a work directory for the agent.
agent_work_dir = tempfile.mkdtemp()
print 'Running upgrade test from %s to %s' % (prev_version, next_version)
print """\
+--------------+----------------+----------------+---------------+
| Test case | Framework | Master | Agent |
+--------------+----------------+----------------+---------------+
| #1 | %s\t| %s\t | %s\t |
| #2 | %s\t| %s\t | %s\t |
| #3 | %s\t| %s\t | %s\t |
| #4 | %s\t| %s\t | %s\t |
+--------------+----------------+----------------+---------------+
NOTE: live denotes that master process keeps running from previous case.
""" % (prev_version, prev_version, prev_version,
prev_version, next_version, prev_version,
prev_version, next_version, next_version,
next_version, next_version, next_version)
print ''
print 'Test case 1 (Run of previous setup)'
print '##### Starting %s master #####' % prev_version
prev_master = Master(prev_path, master_work_dir, credfile)
if prev_master.sleep(0.5) != True:
print '%s master exited prematurely' % prev_version
sys.exit(1)
print '##### Starting %s agent #####' % prev_version
prev_agent = Agent(prev_path, agent_work_dir, credfile)
if prev_agent.sleep(0.5) != True:
print '%s agent exited prematurely' % prev_version
sys.exit(1)
print '##### Starting %s framework #####' % prev_version
print 'Waiting for %s framework to complete (10 sec max)...' % prev_version
prev_framework = Framework(prev_path)
if prev_framework.sleep(10) != 0:
print '%s framework failed' % prev_version
sys.exit(1)
print ''
print 'Test case 2 (Upgrade master)'
# NOTE: Need to stop and start the agent because standalone detector does
# not detect master failover.
print '##### Stopping %s agent #####' % prev_version
prev_agent.process.kill()
print '##### Stopping %s master #####' % prev_version
prev_master.process.kill()
print '##### Starting %s master #####' % next_version
next_master = Master(next_path, master_work_dir, credfile)
if next_master.sleep(0.5) != True:
print '%s master exited prematurely' % next_version
sys.exit(1)
print '##### Starting %s agent #####' % prev_version
prev_agent = Agent(prev_path, agent_work_dir, credfile)
if prev_agent.sleep(0.5) != True:
print '%s agent exited prematurely' % prev_version
sys.exit(1)
print '##### Starting %s framework #####' % prev_version
print 'Waiting for %s framework to complete (10 sec max)...' % prev_version
prev_framework = Framework(prev_path)
if prev_framework.sleep(10) != 0:
print '%s framework failed with %s master and %s agent' % (prev_version,
next_version,
prev_version)
sys.exit(1)
print ''
print 'Test case 3 (Upgrade agent)'
print '##### Stopping %s agent #####' % prev_version
prev_agent.process.kill()
print '##### Starting %s agent #####' % next_version
next_agent = Agent(next_path, agent_work_dir, credfile)
if next_agent.sleep(0.5) != True:
print '%s agent exited prematurely' % next_version
sys.exit(1)
print '##### Starting %s framework #####' % prev_version
print 'Waiting for %s framework to complete (10 sec max)...' % prev_version
prev_framework = Framework(prev_path)
if prev_framework.sleep(10) != 0:
print '%s framework failed with %s master and %s agent' % (prev_version,
next_version,
next_version)
sys.exit(1)
print ''
print 'Test case 4 (Run of next setup)'
print '##### Starting %s framework #####' % next_version
print 'Waiting for %s framework to complete (10 sec max)...' % next_version
next_framework = Framework(next_path)
if next_framework.sleep(10) != 0:
print '%s framework failed with %s master and %s agent' % (next_version,
next_version,
next_version)
sys.exit(1)
# Test passed.
sys.exit(0)
```
|
{
"source": "jesenzhang/UnityMisc",
"score": 2
}
|
#### File: Tools/python/texture_compress.py
```python
import re
import os
import sys
import os.path
import argparse
import platform
from PIL import Image
import subprocess
import time # 引入time模块
parser = argparse.ArgumentParser(description='manual to this script')
parser.add_argument('--path', type=str, default = None)
parser.add_argument('--out', type=str, default = None)
args = parser.parse_args()
path = args.path.replace("\\","/")
try:
path = unicode(path, 'GB2312') # 经过编码处理
except:
pass # python3 已经移除 unicode,而且默认是 utf8 编码,所以不用转
out=None
if args.out!=None:
out = args.out.replace("\\","/")
try:
out = unicode(out, 'GB2312') # 经过编码处理
except:
pass # python3 已经移除 unicode,而且默认是 utf8 编码,所以不用转
#判断目标是否目录
print("out :"+ out)
if not os.path.isfile(out):
if (os.path.exists(out)==False):
os.makedirs(out)
abs_file=__file__
print("abs path is %s" %(__file__))
abs_dir=abs_file[:abs_file.rfind("\\")] # windows下用\\分隔路径,linux下用/分隔路径
abs_dir = os.path.abspath(sys.argv[0])
abs_dir=abs_dir[:abs_dir.rfind("\\")]
print("abs path is %s" %(os.path.abspath(sys.argv[0])))
# 得到进程当前工作目录
currentpath = abs_dir;# os.getcwd().replace("\\","/")
systemType = platform.system()
pngquant = systemType=="Windows" and "../pngquant/pngquant.exe" or systemType=="Mac" and "../pngquant/pngquant" or "../pngquant/pngquant.exe"
pngquant = currentpath+"/"+pngquant
print("pngquant "+pngquant)
guetzli = systemType=="Windows" and "../guetzli/guetzli_windows_x86-64.exe" or systemType=="Mac" and "../guetzli/guetzli_darwin_x86-64" or "../guetzli/guetzli_windows_x86-64.exe"
guetzli = currentpath+"/"+guetzli
print("guetzli "+guetzli)
#有损压缩
cjpeg = systemType=="Windows" and "../mozjpeg/windows3.3.1/cjpeg-static.exe" or systemType=="Mac" and "../mozjpeg/mac3.3.1/cjpeg" or "../mozjpeg/windows3.3.1/cjpeg-static.exe"
cjpeg = currentpath+"/"+cjpeg
print("cjpeg "+cjpeg)
#无损压缩
jpegtran = systemType=="Windows" and "../mozjpeg/windows3.3.1/jpegtran-static.exe" or systemType=="Mac" and "../mozjpeg/mac3.3.1/jpegtran" or "../mozjpeg/windows3.3.1/jpegtran-static.exe"
jpegtran = currentpath+"/"+jpegtran
print("jpegtran "+jpegtran)
# 定义函数 创建目录
def makeDir(outpath):
(dirPath, file) = os.path.split(outpath)
if not os.path.isfile(dirPath):
if (os.path.exists(dirPath)==False):
os.makedirs(dirPath)
# 定义函数 压缩图片
def compressTexture( filePath ,fileName , outpath ):
if os.path.isfile(filePath):
# 匹配文件名正则表达式
# pat = "^[a-z0-9_]+\.(png||jpg)"
pat = "(.*)\.(png||jpg)$"
# 进行匹配
matchObj = re.match(pat, fileName)
if matchObj!=None:
img = Image.open(filePath)
#format JPEG PNG
imgFormat = img.format
print("fileName "+ fileName + " imgFormat "+ imgFormat)
oldname = fileName.lower()
oldname = re.sub(' ','',oldname)
#去除后缀
imagename= os.path.splitext(oldname)[0]
suffix= os.path.splitext(oldname)[1]
print(suffix)
img.close()
if imgFormat=="JPEG":
# print(os.system(guetzli +" --quality 84 --verbose "+ filePath + " " + outpath))
#print(os.system(jpegtran + " -outfile " + outpath +" "+ filePath ))
tempPath = outpath
needRename = False
if outpath==filePath:
needRename = True
outpath=filePath.replace(fileName,imagename+suffix)
tempPath=filePath.replace(fileName,imagename+"_c"+suffix)
makeDir(outpath)
print("tempPath " + tempPath + " outpath "+outpath)
cmd = cjpeg + " -quality 75 -smooth 0 -baseline -sample 2x2 -quant-table 3 -outfile " + tempPath +" "+ filePath
ret = subprocess.run(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,encoding="utf-8",timeout=1)
if ret.returncode == 0:
print("success:",ret)
else:
print("error:",ret)
if ret.returncode ==0 and needRename:
print("rename")
# print(os.access(outpath, os.X_OK)) # 文件是否可执行
os.remove(outpath)
os.rename(tempPath , outpath)
elif imgFormat=="PNG":
makeDir(outpath)
print("filePath " +filePath)
print("outpath " +outpath)
print(os.system(pngquant +" --force --skip-if-larger --verbose --speed=1 --quality=45-85 "+ filePath + " --output "+ outpath))
print(os.system(pngquant +" --force --skip-if-larger --verbose --ordered --speed=1 --quality=50-90 --ext=.png "+ outpath))
print(fileName + " Done")
if os.path.isfile(path):
# parent_path = os.path.dirname(path)
(parent_path, file) = os.path.split(path)
# pattern = re.compile(r'([^<>/\\\|:""\*\?]+)\.\w+$')
# filename = pattern.findall(path)
fileList = [file]
# 将当前工作目录修改为待修改文件夹的位置
os.chdir(parent_path)
print("parent_path "+parent_path)
filePath = path.replace("\\","/")
outFile = os.path.isfile(out)
if out==None or (outFile==False):
out=parent_path
outpath = filePath.replace(parent_path,out).replace("\\","/")
elif os.path.isfile(out):
outpath = out
print("filePath "+filePath)
print("file "+file)
print("outpath "+outpath)
compressTexture(path,file,outpath)
else:
parent_path = path
fileList = os.listdir(parent_path)
# 将当前工作目录修改为待修改文件夹的位置
os.chdir(parent_path)
print("parent_path "+parent_path)
for root, dirs, files in os.walk(path):
print('root_dir:', root) # 当前目录路径
print('sub_dirs:', dirs) # 当前路径下所有子目录
print('files:', files) # 当前路径下所有非目录子文件
for fileName in files:
filePath = os.path.join(root,fileName).replace("\\","/")
if out==None:
out=path
outpath = filePath.replace(path,out).replace("\\","/")
print("filePath "+filePath)
print("outpath "+outpath)
compressTexture(filePath,fileName,outpath)
# # 遍历文件夹中所有文件
# for fileName in fileList:
# if os.path.isfile(fileName):
# # 匹配文件名正则表达式
# # pat = "^[a-z0-9_]+\.(png||jpg)"
# pat = "(.*)\.(png||jpg)"
# # 进行匹配
# matchObj = re.match(pat, fileName)
# print(fileName)
# if matchObj!=None:
# oldname = fileName.lower()
# oldname = re.sub(' ','',oldname)
# #去除后缀
# imagename= os.path.splitext(oldname)[0]
# suffix= os.path.splitext(oldname)[1]
# print(suffix)
# if suffix==".jpg":
# if out==None:
# outpath =os.path.join(parent_path,fileName).replace("\\","/")
# else:
# outpath =os.path.join(out,fileName).replace("\\","/")
# print(os.system(guetzli +" --quality 84 --verbose "+ fileName + " " + outpath))
# elif suffix==".png":
# outpath =os.path.join(out,fileName).replace("\\","/")
# if out==None:
# print(os.system(pngquant +" --force --skip-if-larger --verbose --speed=1 --quality=45-85 --ext=.png "+ fileName))
# print(os.system(pngquant +" --force --skip-if-larger --verbose --ordered --speed=1 --quality=50-90 --ext=.png "+ fileName))
# else:
# print(os.system(pngquant +" --force --skip-if-larger --verbose --speed=1 --quality=45-85 "+ fileName + " --output "+ outpath))
# print(os.system(pngquant +" --force --skip-if-larger --verbose --ordered --speed=1 --quality=50-90 --ext=.png "+ outpath))
# print(fileName + " Done")
sys.stdin.flush()
os.chdir(currentpath)
print("Done")
```
|
{
"source": "Jesewe/jpower-main",
"score": 3
}
|
#### File: Jesewe/jpower-main/__init__.py
```python
import ctypes
import webbrowser
# Functions
def window_title(title):
ctypes.windll.kernel32.SetConsoleTitleW(title)
def url_open(title):
webbrowser.open(title)
def msgbox(title, text, style):
return ctypes.windll.user32.MessageBoxW(0, text, title, style)
# Information about Module #
__version__ = '1.1'
__description__ = 'Jpower - module for python.\nIt adds some commands with and notification.'
```
|
{
"source": "Jesewe/Twitch-Channel-Points-Miner---Russian-Version",
"score": 2
}
|
#### File: Jesewe/Twitch-Channel-Points-Miner---Russian-Version/login.py
```python
import getpass
import os
import browser_cookie3
import requests
from cookies import get_cookies_path, save_cookies_to_file
from exceptions import StreamerDoesNotExistException
from twitch_data import get_client_id, get_channel_id
# Based on https://github.com/derrod/twl.py
# Original Copyright (c) 2020 Rodney
# The MIT License (MIT)
def check_login():
if not os.path.exists(get_cookies_path()):
twitch_login = TwitchLogin()
success = twitch_login.login_flow()
if success:
twitch_login.save_cookies()
print("Авторизация успешна!")
return success
else:
return True
class TwitchLogin:
def __init__(self):
self.token = None
self.login_check_result = False
self.session = requests.session()
self.session.headers.update({'Client-ID': get_client_id()})
self.username = None
self.user_id = None
self.email = None
def login_flow(self):
print("Вам нужно будет войти в Twitch!")
post_data = {
'client_id': get_client_id(),
'undelete_user': False,
'remember_me': True
}
use_backup_flow = False
while True:
self.username = input('Введите имя пользователя Twitch:')
password = getpass.getpass('Введите пароль Twitch: ')
post_data['username'] = self.username
post_data['password'] = password
while True:
# Try login without 2FA
login_response = self.send_login_request(post_data)
if 'captcha_proof' in login_response:
post_data['captcha'] = dict(proof=login_response['captcha_proof'])
if 'error_code' in login_response:
err_code = login_response['error_code']
if err_code == 3011 or err_code == 3012: # missing 2fa token
if err_code == 3011:
print('Включена двухфакторная аутентификация, введите токен ниже.')
else:
print('Неверный двухфакторный токен. Повторите попытку.')
twofa = input('2FA token: ')
post_data['authy_token'] = twofa.strip()
continue
elif err_code == 3022 or err_code == 3023: # missing 2fa token
if err_code == 3022:
print('Требуется код подтверждения входа.')
self.email = login_response['obscured_email']
else:
print('Введен неверный код подтверждения входа, попробуйте еще раз.')
twofa = input(f'Пожалуйста, введите 6-значный код, отправленный на {self.email}: ')
post_data['twitchguard_code'] = twofa.strip()
continue
elif err_code == 3001: # invalid password
print('Неправильное имя пользователя или пароль, пожалуйста, попробуйте снова.')
break
elif err_code == 1000:
print('Вход в консоль недоступен (требуется разгадывание CAPTCHA).')
use_backup_flow = True
break
else:
print(f'Неизвестная ошибка: {login_response}')
raise NotImplementedError(f'Unknown TwitchAPI error code: {err_code}')
if 'access_token' in login_response:
self.set_token(login_response['access_token'])
return self.check_login()
if use_backup_flow:
break
if use_backup_flow:
self.set_token(self.login_flow_backup())
return self.check_login()
return False
def set_token(self, new_token):
self.token = new_token
self.session.headers.update({'Authorization': f'Bearer {self.token}'})
def send_login_request(self, json_data):
r = self.session.post('https://passport.twitch.tv/login', json=json_data)
j = r.json()
return j
def login_flow_backup(self):
"""Backup OAuth login flow in case manual captcha solving is required"""
browser = input("Какой браузер вы используете? Chrome (1), Firefox (2), Другое (3):").strip()
if browser not in ("1", "2"):
print("Ваш браузер не поддерживается, извините.")
return None
input("Пожалуйста, войдите в выбранный вами браузер (НЕ в режиме инкогнито) и нажмите Enter...")
print("Загрузка файлов cookie, сохраненных на вашем компьютере...")
twitch_domain = ".twitch.tv"
if browser == "1": # chrome
cookie_jar = browser_cookie3.chrome(domain_name=twitch_domain)
else:
cookie_jar = browser_cookie3.firefox(domain_name=twitch_domain)
cookies_dict = requests.utils.dict_from_cookiejar(cookie_jar)
self.username = cookies_dict.get("login")
return cookies_dict.get("auth-token")
def check_login(self):
if self.login_check_result:
return self.login_check_result
if self.token is None:
return False
try:
self.user_id = get_channel_id(self.username)
self.login_check_result = True
except StreamerDoesNotExistException:
self.login_check_result = False
return self.login_check_result
def save_cookies(self):
cookies_dict = self.session.cookies.get_dict()
cookies_dict["auth-token"] = self.token
if "persistent" not in cookies_dict: # saving user id cookies
cookies_dict["persistent"] = self.user_id
save_cookies_to_file(cookies_dict)
```
#### File: Jesewe/Twitch-Channel-Points-Miner---Russian-Version/raid.py
```python
from claim_bonus import post_gql_request
class Raid:
def __init__(self, raid_id, target_login):
self.raid_id = raid_id
self.target_login = target_login
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.raid_id == other.raid_id
else:
return False
raid_id_for_streamer = {}
# streamer_login is going to raid someone
def update_raid(streamer_login, raid):
if raid_id_for_streamer.get(streamer_login) != raid:
raid_id_for_streamer[streamer_login] = raid
post_gql_request(
{"operationName": "JoinRaid",
"variables": {"input": {"raidID": raid.raid_id}},
"extensions": {"persistedQuery": {"version": 1, "sha256Hash": "c6a332a86d1087fbbb1a8623aa01bd1313d2386e7c63be60fdb2d1901f01a4ae"}}})
print(f"Присоединяюсь к рейду от {streamer_login} до {raid.target_login}!")
```
|
{
"source": "jesford/clusterlensing",
"score": 3
}
|
#### File: clusterlensing/clusterlensing/clusters.py
```python
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from astropy import units
import astropy.cosmology
from clusterlensing.nfw import SurfaceMassDensity
from clusterlensing import cofm
from clusterlensing import utils
try:
from IPython.display import display
notebook_display = True
except:
notebook_display = False
def calc_delta_c(c200):
"""Calculate characteristic overdensity from concentration.
Parameters
----------
c200 : ndarray or float
Cluster concentration parameter.
Returns
----------
ndarray or float
Cluster characteristic overdensity, of same type as c200.
"""
top = (200. / 3.) * c200**3.
bottom = np.log(1. + c200) - (c200 / (1. + c200))
return (top / bottom)
def richness_to_mass(richness, norm=2.7e13, slope=1.4):
"""Calculate mass from richness.
Mass-richness relation assumed is:
mass = norm * (richness / 20) ^ slope.
Parameters
----------
richness : ndarray or float
Cluster richness value(s).
norm : float, optional
Normalization of mass-richness relation in units of solar masses,
defaults to 2.7e13.
slope : float, optional
Slope of mass-richness relation in units of solar masses, defaults
to 1.4.
Returns
----------
ndarray or float
Cluster mass(es) in units of solar masses, of same type as
richness.
See Also
----------
mass_to_richness : The inverse of this function.
"""
mass = norm * ((richness / 20.) ** slope)
return mass
def mass_to_richness(mass, norm=2.7e13, slope=1.4):
"""Calculate richness from mass.
Mass-richness relation assumed is:
mass = norm * (richness / 20) ^ slope.
Parameters
----------
mass : ndarray or float
Cluster mass value(s) in units of solar masses.
norm : float, optional
Normalization of mass-richness relation in units of solar masses,
defaults to 2.7e13.
slope : float, optional
Slope of mass-richness relation in units of solar masses, defaults
to 1.4.
Returns
----------
ndarray or float
Cluster richness(es), of same type as mass.
See Also
----------
richness_to_mass : The inverse of this function.
"""
richness = 20. * (mass / norm)**(1. / slope)
return richness
class ClusterEnsemble(object):
"""Ensemble of galaxy clusters and their properties.
The ClusterEnsemble object contains parameters and calculated
values for every individual cluster in a sample. Initializing with
a collection of redshifts z will fix the number of clusters
described by the object. Setting the n200 values will then populate
the object with the full set of available attributes (except for
the NFW profiles). The values of z, n200, massrich_norm, and
massrich_slope can be altered and these changes will propogate to
the other (dependent) attributes.
In order to generate the attributes containing the NFW halo
profiles, sigma_nfw and deltasigma_nfw, pass the desired radial
bins (and additional optional parameters) to the calc_nfw method.
Parameters
----------
z : array_like
Redshifts for each cluster in the sample. Should be 1D.
Attributes
----------
z
n200
m200
c200
delta_c
r200
rs
Dang_l
dataframe
massrich_norm
massrich_slope
describe : str
Short description of the ClusterEnsemble object.
number : int
Number of clusters in the sample.
sigma_nfw : Quantity
Surface mass density of every cluster, (1D ndarray) with
astropy.units of Msun/pc/pc. Generated for each element of rbins
by running calc_nfw(rbins) method.
deltasigma_nfw : Quantity
Differential surface mass density of every cluster, (1D ndarray)
with astropy.units of Msun/pc/pc. Generated for each element of
rbins by running calc_nfw(rbins) method.
Methods
----------
calc_nfw(rbins, offsets=None, numTh=200, numRoff=200, numRinner=20,
factorRouter=3)
Generate Sigma and DeltaSigma NFW profiles for each cluster,
optionally with miscentering offsets included.
show(notebook=True)
Display table of cluster information and mass-richness
scaling relaton in use.
massrich_parameters()
Print a string showing the mass-richness scaling relation and
current values of the normalization and slope.
Other Parameters
----------------
cosmology : astropy.cosmology instance, optional
Cosmology to use in calculations, default astropy.cosmology.Planck13.
Must be an instance of astropy.cosmology with 'h' and 'Om0' attributes.
cm : str, optional
Concentration-mass relation to use, default 'DuttonMaccio'. Other
choices are 'Prada' and 'Duffy'.
"""
def __init__(self, redshifts, cosmology=astropy.cosmology.Planck13,
cm='DuttonMaccio'):
if type(redshifts) != np.ndarray:
redshifts = np.array(redshifts)
if redshifts.ndim != 1:
raise ValueError("Input redshift array must have 1 dimension.")
if np.sum(redshifts < 0.) > 0:
raise ValueError("Redshifts cannot be negative.")
if hasattr(cosmology, 'h') and hasattr(cosmology, 'Om0'):
self._cosmo = cosmology
else:
raise TypeError("Input cosmology must be an instance of \
astropy.cosmology")
if cm == 'DuttonMaccio':
self._cm = 'DuttonMaccio'
elif cm == 'Prada':
self._cm = 'Prada'
elif cm == 'Duffy':
self._cm = 'Duffy'
else:
raise ValueError('Input concentration-mass relation must be \
one of: DuttonMaccio, Prada, Duffy.')
self.describe = "Ensemble of galaxy clusters and their properties."
self.number = redshifts.shape[0]
self._z = redshifts
self._rho_crit = self._cosmo.critical_density(self._z)
self._massrich_norm = 2.7 * (10**13) * units.Msun
self._massrich_slope = 1.4
self._df = pd.DataFrame(self._z, columns=['z'])
self._Dang_l = self._cosmo.angular_diameter_distance(self._z)
self._m200 = None
self._n200 = None
self._r200 = None
self._rs = None
self._c200 = None
self._deltac = None
@property
def n200(self):
"""Cluster richness values.
If n200 is set directly, then mass m200 is calculated from n200
using the mass-richness scaling relation specified by the
parameters massrich_norm and massrich_slope. If m200 is set
directly, then n200 is calculated from m200 using the same scaling
relation. Changes to n200 will propagate to all mass-dependant
variables.
:property: Returns cluster richness values
:property type: ndarray
:setter: Sets cluster richness values
:setter type: array_like
"""
if self._n200 is None:
raise AttributeError('n200 has not yet been initialized.')
else:
return self._n200
@n200.setter
def n200(self, richness):
# Creates/updates values of cluster N200s & dependant variables.
self._n200 = utils.check_units_and_type(richness, None,
num=self.number)
self._df['n200'] = pd.Series(self._n200, index=self._df.index)
self._richness_to_mass()
@property
def m200(self):
"""Cluster masses.
Mass interior to a sphere of radius r200. If m200 is set directly,
then richness n200 is calculated from m200 using the mass-richness
scaling relation specified by the parameters massrich_norm and
massrich_slope. If n200 is set directly, then m200 is calculated
from n200 using the same scaling relation. Changes to m200 will
propagate to all mass-dependant variables.
:property: Returns cluster masses in Msun
:property type: Quantity
1D ndarray, with astropy.units of Msun.
:setter: Sets cluster mass values in Msun
:setter type: array_like
Should be 1D array or list, optionally with units.
"""
if self._m200 is None:
raise AttributeError('Attribute has not yet been initialized.')
else:
return self._m200
@m200.setter
def m200(self, mass):
# Creates/updates values of cluster M200s & dependant variables.
self._m200 = utils.check_units_and_type(mass, units.Msun,
num=self.number)
self._df['m200'] = pd.Series(self._m200, index=self._df.index)
self._mass_to_richness()
def _richness_to_mass(self):
# Calculates M_200 for simple power-law scaling relation
# (with default parameters from arXiv:1409.3571)
m200 = richness_to_mass(self._n200, norm=self._massrich_norm.value,
slope=self._massrich_slope)
self._m200 = m200 * units.Msun
self._df['m200'] = pd.Series(self._m200, index=self._df.index)
self._update_dependant_variables()
def _mass_to_richness(self):
# Calculates N_200 for simple power-law scaling relation.
# Inverse of _richness_to_mass() function.
n200 = mass_to_richness(self._m200.value,
norm=self._massrich_norm.value,
slope=self._massrich_slope)
# note: units cancel but n200 is still a Quantity
self._n200 = n200
self._df['n200'] = pd.Series(self._n200, index=self._df.index)
self._update_dependant_variables()
@property
def z(self):
"""Cluster redshifts.
:property: Returns cluster redshifts
:property type: ndarray
:setter: Sets cluster redshifts
:setter type: array_like
"""
return self._z
@z.setter
def z(self, redshifts):
# Changes the values of the cluster z's and z-dependant variables.
self._z = utils.check_units_and_type(redshifts, None, num=self.number)
self._Dang_l = self._cosmo.angular_diameter_distance(self._z)
self._df['z'] = pd.Series(self._z, index=self._df.index)
self._rho_crit = self._cosmo.critical_density(self._z)
if self._n200 is not None:
self._update_dependant_variables()
def _update_dependant_variables(self):
self._calculate_r200()
self._calculate_concentrations()
self._calculate_rs()
# what else depends on z or m or?
@property
def Dang_l(self):
"""Angular diameter distances to clusters.
:property: Returns distances in Mpc
:type: Quantity (1D ndarray, with astropy.units of Mpc)
"""
return self._Dang_l
@property
def dataframe(self):
"""Pandas DataFrame of cluster properties.
:property: Returns DataFrame
:type: pandas.core.frame.DataFrame
"""
return self._df
@property
def massrich_norm(self):
"""Normalization of Mass-Richness relation:
M200 = norm * (N200 / 20) ^ slope.
Changes to massrich_norm will propagate to all mass-dependant
variables. (This will take current n200 values and convert them to
m200; in order to retain original values of m200, save them in a
temporary variable and reset them after this change).
:property: Returns normalization in Msun
:property type: Quantity
float, with astropy.units of Msun. Default is 2.7e+13 Msun.
:setter: Sets normalization in Msun
:setter type: float (optionally in astropy.units of Msun)
"""
return self._massrich_norm
@massrich_norm.setter
def massrich_norm(self, norm):
self._massrich_norm = utils.check_units_and_type(norm, units.Msun,
is_scalar=True)
# behavior is to convert current n200 -> new m200
if hasattr(self, 'n200'):
self._richness_to_mass()
@property
def massrich_slope(self):
"""Slope of Mass-Richness relation:
M200 = norm * (N200 / 20) ^ slope.
Changes to massrich_slope will propagate to all mass-dependant
variables. (This will take current n200 values and convert them to
m200; in order to retain original values of m200, save them in a
temporary variable and reset them after this change).
:property: Returns slope
:property type: float
Default value is 1.4.
:setter: Sets slope
:setter type: float
"""
return self._massrich_slope
@massrich_slope.setter
def massrich_slope(self, slope):
if type(slope) == float:
self._massrich_slope = slope
else:
raise TypeError('Expecting input type as float')
# behavior is to convert current n200 -> new m200
if hasattr(self, 'n200'):
self._richness_to_mass()
def massrich_parameters(self):
"""Print values of M200-N200 scaling relation parameters."""
print("\nMass-Richness Power Law: M200 = norm * (N200 / 20) ^ slope")
print(" norm:", self._massrich_norm)
print(" slope:", self._massrich_slope)
def show(self, notebook=notebook_display):
"""Display cluster properties and scaling relation parameters."""
print("\nCluster Ensemble:")
if notebook is True:
display(self._df)
elif notebook is False:
print(self._df)
self.massrich_parameters()
@property
def r200(self):
"""Cluster Radii.
r200 is the cluster radius within which the mean density is 200
times the critical energy density of the universe at that z.
:property: Returns r200 in Mpc
:type: Quantity (1D ndarray, in astropy.units of Mpc)
"""
if self._r200 is None:
raise AttributeError('Attribute has not yet been initialized.')
else:
return self._r200
@property
def c200(self):
"""Cluster concentration parameters.
c200 is calculated from m200 and z using the mass-concentration
relation specified when ClusterEnsemble object was created (default
is relation from Dutton & Maccio 2015). Note that c200 = r200/rs.
:property: Returns c200
:type: ndarray
"""
if self._c200 is None:
raise AttributeError('Attribute has not yet been initialized.')
else:
return self._c200
@property
def rs(self):
"""Cluster scale radii.
:property: Returns scale radius in Mpc
:type: Quantity (1D ndarray, in astropy.units of Mpc)
"""
if self._rs is None:
raise AttributeError('Attribute has not yet been initialized.')
else:
return self._rs
@property
def delta_c(self):
"""Characteristic overdensities of the cluster halos.
:property: Returns characteristic overdensity
:type: ndarray
"""
if self._deltac is None:
raise AttributeError('Attribute has not yet been initialized.')
else:
return self._deltac
def _calculate_r200(self):
# calculate r200 from m200
radius_200 = (3. * self._m200 / (800. * np.pi *
self._rho_crit))**(1. / 3.)
self._r200 = radius_200.to(units.Mpc)
self._df['r200'] = pd.Series(self._r200, index=self._df.index)
def _calculate_concentrations(self):
if self._cm == 'DuttonMaccio':
self._c200 = cofm.c_DuttonMaccio(self._z, self._m200,
h=self._cosmo.h)
elif self._cm == 'Prada':
self._c200 = cofm.c_Prada(self._z, self._m200, h=self._cosmo.h,
Om_M=self._cosmo.Om0,
Om_L=1 - self._cosmo.Om0)
elif self._cm == 'Duffy':
self._c200 = cofm.c_Duffy(self._z, self._m200, h=self._cosmo.h)
self._df['c200'] = pd.Series(self._c200, index=self._df.index)
self._calculate_deltac()
def _calculate_rs(self):
# cluster scale radius
self._rs = self._r200 / self._c200
self._df['rs'] = pd.Series(self._rs, index=self._df.index)
def _calculate_deltac(self):
# calculate concentration parameter from c200
self._deltac = calc_delta_c(self._c200)
self._df['delta_c'] = pd.Series(self._deltac, index=self._df.index)
def calc_nfw(self, rbins, offsets=None, numTh=200, numRoff=200,
numRinner=20, factorRouter=3):
"""Calculates Sigma and DeltaSigma profiles.
Generates the surface mass density (sigma_nfw attribute of parent
object) and differential surface mass density (deltasigma_nfw
attribute of parent object) profiles of each cluster, assuming a
spherical NFW model. Optionally includes the effect of cluster
miscentering offsets.
Parameters
----------
rbins : array_like
Radial bins (in Mpc) for calculating cluster profiles. Should
be 1D, optionally with astropy.units of Mpc.
offsets : array_like, optional
Parameter describing the width (in Mpc) of the Gaussian
distribution of miscentering offsets. Should be 1D, optionally
with astropy.units of Mpc.
Other Parameters
-------------------
numTh : int, optional
Parameter to pass to SurfaceMassDensity(). Number of bins to
use for integration over theta, for calculating offset profiles
(no effect for offsets=None). Default 200.
numRoff : int, optional
Parameter to pass to SurfaceMassDensity(). Number of bins to
use for integration over R_off, for calculating offset profiles
(no effect for offsets=None). Default 200.
numRinner : int, optional
Parameter to pass to SurfaceMassDensity(). Number of bins at
r < min(rbins) to use for integration over Sigma(<r), for
calculating DeltaSigma (no effect for Sigma ever, and no effect
for DeltaSigma if offsets=None). Default 20.
factorRouter : int, optional
Parameter to pass to SurfaceMassDensity(). Factor increase over
number of rbins, at min(r) < r < max(r), of bins that will be
used at for integration over Sigma(<r), for calculating
DeltaSigma (no effect for Sigma, and no effect for DeltaSigma
if offsets=None). Default 3.
"""
if offsets is None:
self._sigoffset = np.zeros(self.number) * units.Mpc
else:
self._sigoffset = utils.check_units_and_type(offsets, units.Mpc,
num=self.number)
self.rbins = utils.check_units_and_type(rbins, units.Mpc)
rhoc = self._rho_crit.to(units.Msun / units.pc**2 / units.Mpc)
smd = SurfaceMassDensity(self.rs, self.delta_c, rhoc,
offsets=self._sigoffset,
rbins=self.rbins,
numTh=numTh,
numRoff=numRoff,
numRinner=numRinner,
factorRouter=factorRouter)
self.sigma_nfw = smd.sigma_nfw()
self.deltasigma_nfw = smd.deltasigma_nfw()
```
|
{
"source": "jesford/twitter_project",
"score": 4
}
|
#### File: jesford/twitter_project/tweet_analyzer.py
```python
query = 'cat'
#HOW MANY TWEETS?
num_tweets = 10
#PERSONAL TWITTER APPLICATION TOKENS/KEYS?
consumer_key = ''
consumer_secret = ''
access_token = ''
access_secret = ''
#==============================================
#from tweepy import Stream
#from tweepy import OAuthHandler
#from tweepy.streaming import StreamListener
#import csv
import tweepy
import urllib2
import json
import re
import unicodedata
#LINK TO ANALYZE SENTIMENT:
url_sentiment = 'http://text-processing.com/api/sentiment/'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)#, secure=True)
#==============================================
# can use .items or .pages to limit # tweets returned
for tw in tweepy.Cursor(api.search, q=query, lang='en', monitor_rate_limit=True, wait_on_rate_limit=True).items(num_tweets):
#tweet = re.sub('[@#$]', '', tw.text)
#tweet = tweet.replace("\\", '')
#tweet = ''.join([ c if (c.isalnum() or c=='?' or c=='!') else ' ' for c in tw.text])
try:
tweet = unicodedata.normalize('NFKD', tw.text).encode('ascii','ignore')
except:
tweet = tw.text
tweet = re.sub('((www\.[\s]+)|(https?://[^\s]+))','URL',tweet)
tweet = re.sub(r'[@#$]', '', tweet)
print '\n-----------\n'
#print tw.text, '\n'
print tweet, '\n'
req = urllib2.Request(url_sentiment, 'text='+tweet)
response = json.load(urllib2.urlopen(req))
sentiment = response['label']
neg = response['probability']['neg']
pos = response['probability']['pos']
neut = response['probability']['neutral']
print 'SENTIMENT: ', sentiment#, '\n',
print ' positive: ', pos
print ' negative: ', neg
print ' neutral: ', neut
user = tw._json['user']
if tw._json['coordinates'] != None:
print '\n\n', tw._json['coordinates']['coordinates'][0]
print tw._json['coordinates']['coordinates'][1], '\n\n'
else:
print 'coord: ', tw._json['coordinates']
print '\nLOCATION: ', user['location']
print 'TIMEZONE: ', user['time_zone']
try:
place = tw._json['place']['full_name']
except:
place = 'None Given'
print 'PLACE: ', place
#print 'HASHTAGS: ', tw._json['entities']['hashtags']
print '\n-----------\n'
#it._json is a dict, which in turn contains the dict it._json['user']
#prints all dict keys: it._json.keys()
################################################################
#-----------
#stuff I copied from a helpful blog post:
#http://sachithdhanushka.blogspot.ca/2014/02/mining-twitter-data-using-python.html
class TweetListener(tweepy.StreamListener):
def on_data(self, data):
#print type(json.loads(data)) #dict
print data #str
return True
def on_error(self, status):
print status
# def on_status(self, status):
# with open('file.txt', 'w') as f:
# f.write('Author,Date,Text')
# writer = csv.writer(f)
# writer.writerow([status.author.screen_name, status.created_at, status.text])
#2nd line prints constant tweets regarding 'cats' (control-C to quit)
#stream = tweepy.streaming.Stream(auth, TweetListener())
#stream.filter(track=['cats'])
#-----------
class TweetListen_byJes(tweepy.StreamListener):
def on_data(self, data):
j = json.loads(data) #dict
print j['text']
return True
def on_error(self, status):
print status
#stream = tweepy.streaming.Stream(auth, TweetListen_byJes())
#stream.filter(track=['cats'])
#-----------
#I think this is equivalent to Cursor method above
#mysearch = api.search(q='cats',count=10)
#,include_rts=False) #to not include native retweets...
#for c in mysearch:
# print '\n', c.text
#mysearch[0].text
#mysearch[0].retweets
#mysearch[0].entities
```
|
{
"source": "jesgadiaz/ckc",
"score": 3
}
|
#### File: jesgadiaz/ckc/exact.py
```python
from gurobipy import *
import math
import numpy as np
import heapq
def heap_sort(items):
heapq.heapify(items)
items[:] = [heapq.heappop(items) for i in range(len(items))]
return items
def createGraph(input_file, instance_format):
global n, m , k, matrix, ordered_sizes
if instance_format == 'orlib':
f = open(input_file, "r")
matrix = []
for i in range(0,n):
list = []
for j in range(0,n):
list.append(float("inf"))
matrix.append(list)
m = sum(1 for line in open(input_file))
#with open(input_file, "r") as f:
for i in range(0, m):
string = f.readline()
string = string.split()
if string is not "EOF":
v1 = int(string[0]) - 1
v2 = int(string[1]) - 1
weight = int(string[2])
matrix[v1][v2] = weight
matrix[v2][v1] = weight
f.close()
for i in range(0, n):
matrix[i][i] = 0
for i in range(0, n):
#print(i)
for j in range(0, n):
for l in range(0, n):
if matrix[i][j] == float("inf") or matrix[i][l] == float("inf"):
cost = float("inf")
else:
cost = matrix[i][j] + matrix[i][l]
if cost < matrix[j][l]:
matrix[j][l] = cost
ordered_sizes = []
for i in range(0, n):
for j in range(i, n):
ordered_sizes.append(matrix[i][j])
ordered_sizes = heap_sort(ordered_sizes)
elif instance_format == 'tsplib':
f = open(input_file, "r")
m = n
matrix = []
for i in range(0,n):
list = []
for j in range(0,n):
list.append(float("inf"))
matrix.append(list)
positions = []
for i in range(0, m):
string = f.readline()
string = string.split()
temp_position = []
temp_position.append(int(string[0])-1)
temp_position.append(float(string[1]))
temp_position.append(float(string[2]))
positions.append(temp_position)
for i in range(0, n):
for j in range(0, n):
dist_temp = math.sqrt(((positions[i][1] - positions[j][1]) * (positions[i][1] - positions[j][1])) + ((positions[i][2] - positions[j][2]) * (positions[i][2] - positions[j][2])))
matrix[i][j] = dist_temp
matrix[j][i] = dist_temp
f.close()
for i in range(0, n):
matrix[i][i] = 0
ordered_sizes = []
for i in range(0, n):
for j in range(i, n):
ordered_sizes.append(matrix[i][j])
ordered_sizes = heap_sort(ordered_sizes)
def run(r):
global total_runtime, k, runtime, num_centers, m, cap, input_file
prunedMatrix = []
for i in range(0,n):
list = []
for j in range(0,n):
list.append(float(0))
prunedMatrix.append(list)
for i in range(0,n):
for j in range(0,n):
if matrix[i][j] <= r:
prunedMatrix[i][j] = 1
try:
global m, num_centers, runtime, cap
m = Model("mip1")
#******************************************************************************************************
m.setParam("MIPGap", 0.0);
#******************************************************************************************************
y = []
for i in range(n):
y.append(0)
for i in range(n):
y[i] = m.addVar(vtype=GRB.BINARY, name="y%s" % str(i+1))
m.setObjective(sum(y), GRB.MINIMIZE)
temp_list = np.array(prunedMatrix).T.tolist()
for i in range(n):
m.addConstr(sum(np.multiply(temp_list[i], y).tolist()) >= 1)
x = []
for i in range(n):
temp = []
for j in range(n):
temp.append(0)
x.append(temp)
for i in range(n):
for j in range(n):
x[i][j] = m.addVar(vtype=GRB.BINARY, name="x%s%s" % (str(i+1), str(j+1)))
temp_list_2 = np.array(x).T.tolist()
for i in range(n):
m.addConstr(sum(temp_list_2[i]) * y[i] <= L)
for i in range(n):
for j in range(n):
#m.addConstr(x[i][j] <= y[j] * prunedMatrix[i][j])
#******************************************************************************************************
m.addConstr(x[i][j] <= y[j] * prunedMatrix[i][j] * (1-y[i]))
#******************************************************************************************************
for i in range(n):
#m.addConstr(sum(x[i]) == 1)
#******************************************************************************************************
m.addConstr(sum(x[i]) == 1 * (1-y[i]))
#******************************************************************************************************
m.optimize()
runtime = m.Runtime
print("The run time is %f" % runtime)
print("Obj:", m.objVal)
#******************************************************************************************************
dom_set_size = 0
solution = []
assignment = []
center = 0
vertex_j = 1
vertex_i = 1
for v in m.getVars():
varName = v.varName
if varName[0] == 'y':
if v.x == 1.0:
dom_set_size = dom_set_size + 1
solution.append(varName[1:])
else:
if vertex_j <= n:
if v.x == 1.0:
assignment.append([vertex_i, vertex_j])
else:
vertex_i = vertex_i + 1
vertex_j = 1
vertex_j = vertex_j + 1
print("Cap. dom. set cardinality: " + str(dom_set_size))
solution = [int(i) for i in solution]
#print("solution: " + str(solution))
#print("assignment: " + str(assignment))
print('{"instance": "%s",' % input_file)
print('"centers": [')
counter = 0
for center in solution:
counter = counter + 1
nodes = []
for node in assignment:
if node[1] == center:
nodes.append(node[0])
if counter == len(solution):
print('{ "center": ' + str(center) + ', "nodes": ' + str(nodes) + '}')
else:
print('{ "center": ' + str(center) + ', "nodes": ' + str(nodes) + '},')
print(']}')
#print('%s %g' % (v.varName, v.x))
#******************************************************************************************************
# {"instance": "/home/ckc/Escritorio/pr124.tsp",
# "outliers": [83,40,115,114],
# "centers": [ { "center": 59, "nodes": [28,32,33,34,35,54,57,58,59,60,61,64,65]},
# { "center": 102, "nodes": [101,102,103,104,105,106,107,108,109,110,111,112,113]},
# { "center": 8, "nodes": [8,9,10,11,12,13,14,15,16,46,47,48,49]},
# { "center": 79, "nodes": [77,78,79,91,92,93,94,95,96,97,98,99,123]},
# { "center": 6, "nodes": [0,1,2,3,4,5,6,7,26,27,29,30,31]},
# { "center": 36, "nodes": [19,20,21,22,23,24,25,36,37,38,39,55,56]},
# { "center": 16, "nodes": [17,18,40,41,42,43,44,45,50,51,52,53]},
# { "center": 96, "nodes": [72,73,74,75,76,80,116,117,118,119,120,121,122]},
# { "center": 89, "nodes": [84,85,86,87,88,89,90,100]},
# { "center": 64, "nodes": [62,63,66,67,68,69,70,71,81,82,83,114,115]}
# ]}
num_centers = dom_set_size
# num_centers = m.objVal
except GurobiError:
print("Error reported")
def binarySearch():
global total_runtime, k, runtime, num_centers, input_file
total_runtime = 0
not_done = True
upper = len(ordered_sizes) - 1
lower = 0
best_solution_size = float("inf")
while not_done:
#mid = math.ceil(lower + ((upper - lower)/2))
mid = math.ceil((upper + lower) /2)
mid_value = ordered_sizes[int(mid)]
if mid == upper:
not_done = False
run(mid_value)
total_runtime = total_runtime + runtime
else:
run(mid_value)
total_runtime = total_runtime + runtime
if num_centers <= k:
upper = mid
print("UPPER = MID")
if mid_value <= best_solution_size:
best_solution_size = mid_value
else:
lower = mid
print("LOWER = MID")
print("best solution size: " + str(best_solution_size))
print("total runtime: " + str(total_runtime))
if __name__ == "__main__":
global total_runtime, k, runtime, num_centers, L, n
if len(sys.argv) != 6:
print ("Wrong number of arguments")
print ("exact input_file_path n k L instance_format")
sys.exit()
input_file = sys.argv[1]
n = int(sys.argv[2])
k = int(sys.argv[3])
L = int(sys.argv[4])
instance_format = sys.argv[5]
createGraph(input_file, instance_format)
binarySearch()
```
|
{
"source": "jesgomez/tools-iuc",
"score": 3
}
|
#### File: tools/ena_upload/check_remote.py
```python
import json
import requests
URL = "https://www.ebi.ac.uk/ena/portal/api/search"
def check_remote_entry(entry_type, query_dict, out_format='json'):
'''
Checks if an entry with that alias exists in the ENA repos
entry_type = [study | sample | experiment | run]
'''
assert entry_type in ['study', 'sample', 'experiment', 'run']
params_dict = {}
query_str = ' AND '.join(['%s=%s' % (key, value) for (key, value) in query_dict.items()])
params_dict['query'] = query_str
params_dict['result'] = 'read_' + entry_type
params_dict['fields'] = entry_type + '_alias'
params_dict['format'] = out_format
response = requests.post(URL, data=params_dict)
if response.content != b'':
return json.loads(response.content)
return []
```
|
{
"source": "jesh-anand/PythonMasterClass",
"score": 3
}
|
#### File: webapp/models/post.py
```python
import datetime
import uuid
from blog_site.common.database import Database
__author__ = '<NAME>'
class Post(object):
COLLECTION_NAME = 'posts'
def __init__(self, blog_id, title, content, author, created_date=datetime.datetime.utcnow(), _id=None):
self.blog_id = blog_id
self.title = title
self.content = content
self.author = author
self.created_date = created_date
self._id = uuid.uuid4().hex if _id is None else _id
def save_to_mongo(self):
Database.insert(collection=Post.COLLECTION_NAME, data=self.get_json())
def get_json(self):
return {
'id': self._id,
'blog_id': self.blog_id,
'author': self.author,
'content': self.content,
'title': self.title,
'created_date': self.created_date
}
@classmethod
def from_mongo_in_post_object(cls, id):
post_data = Database.find_one(collection=Post.COLLECTION_NAME, query={'id': id})
return cls(**post_data)
@staticmethod
def from_blog(id):
return [post for post in Database.find(collection=Post.COLLECTION_NAME, query={'blog_id': id})]
```
#### File: PythonMasterClass/Classes/class_implementation.py
```python
class Employee:
def __init__(self, first, last, sex):
self.first = first
self.last = last
self.sex = sex
self.email = first + '.' + last + '@company.com'
def fullname(self):
return "{} {}".format(self.first, self.last)
def main():
emp_1 = Employee('prajesh', 'ananthan', 'male')
print(emp_1.fullname())
if __name__ == '__main__':
main()
```
#### File: PythonMasterClass/Classes/classmethod_as_constructor.py
```python
class Employee:
raise_amount = 1.04
num_of_emps = 0
def __init__(self, first, last, pay):
self.first = first
self.last = last
self.pay = pay
self.email = first + '.' + last + '@<EMAIL>'
Employee.num_of_emps += 1
def fullname(self):
return "{} {}".format(self.first, self.last)
@classmethod
def set_raise_amount(cls, amount):
cls.raise_amount = amount
@classmethod
def from_string(cls, emp_str):
first, last, pay = emp_str.split('-')
return cls(first, last, pay)
def main():
emp_str_1 = 'prajesh-ananthan-10000'
emp1_new = Employee.from_string(emp_str_1)
print(emp1_new.email)
if __name__ == '__main__':
main()
```
#### File: PythonMasterClass/Classes/staticmethod_in_a_class.py
```python
import datetime
class Employee:
raise_amount = 1.04
num_of_emps = 0
def __init__(self, first, last, pay):
self.first = first
self.last = last
self.pay = pay
self.email = first + '.' + last + '@<EMAIL>'
Employee.num_of_emps += 1
def fullname(self):
return "{} {}".format(self.first, self.last)
@classmethod
def from_string(cls, emp_str):
first, last, pay = emp_str.split('-')
return cls(first, last, pay)
@staticmethod
def is_workday(day):
if day.weekday() == 5 or day.weekday() == 6:
return False
return True
def main():
my_date = datetime.date(2016, 7, 11)
print(Employee.is_workday(my_date))
if __name__ == '__main__':
main()
```
#### File: PythonMasterClass/section_7--functional_programming/lambda_script.py
```python
def square(x):
return x ** 2
print(square(4))
# Similar function above using lambda
result = (lambda x: x ** 2)(30)
print(result)
```
|
{
"source": "jesh-anand/Tools",
"score": 2
}
|
#### File: jesh-anand/Tools/string_seperator.py
```python
def main():
url = 'https://webmail.eprotea-finexus.com/upp/faces/ccpayment.xhtml?h001_MTI=0200&h002_VNO=03&h003_TDT=20170929&h004_TTM=15402900&f001_MID=BIG001&f003_ProcCode=003000&f004_PAN=4444333322221111&f005_ExpDate=2304&f006_TxnDtTm=20170929154029&f007_TxnAmt=000000057280&f008_POSCond=59&f010_CurrCode=458&f012_CVV2=&f014_3DXID=&f015_3DARC=&f016_3DCAVVLen=&f017_3DCAVV=&f019_ExpTxnAmt=2&f022_ECI=00&f247_OrgTxnAmt=&f248_OrgCurrCode=&f249_TxnCh=WEB&f260_ServID=BIG&f261_HostID=&f262_SessID=&f263_MRN=11223344556677&f264_Locale=en&f265_RURL_CCPS=https://webmail.eprotea-finexus.com/upp/faces/sim_ccresponse.jsp&f266_RURL_CCPU=https://webmail.eprotea-finexus.com/upp/faces/sim_ccresponse.jsp&f267_RURL_CCPC=https://webmail.eprotea-finexus.com/upp/faces/sim_ccresponse.jsp&f268_CHName=Ishak Ismail&f269_IssName=CITIBANK&f270_ORN=&f271_ODesc=&f278_EMailAddr=&f279_HP=&f285_IPAddr=&f287_ExpOrgTxnAmt=&f288_IssCntrCde=&t001_SHT=MD5&t002_SHV=FC8BAB4B09C393E874161FD530333360'
list = url.split('&')
for l in list:
(key, value) = l.split('=')
print(key + " => " + value)
if __name__ == '__main__':
main()
```
#### File: jesh-anand/Tools/youtube-downloader.py
```python
from pytube import YouTube
import logger
import os
import winsound
from progressbar import print_status
"""youtube-downloader.py: A Youtube video downloader that is able to download multiple videos from different
channels/playlists simultaneously
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Python"
__license__ = "GPL"
# TODO: Launch a simple GUI that able pick up URL entries and download videos accordingly
# TODO: To have flexible approach to download videos at all resolution
# TODO: To download multiple videos simultaneously
# TODO: Import pytube codebase
def main():
logger.printInfo("Starting Youtube downloader tool...")
_configfile = 'C:/Users/Prajesh/Swiss-Army-Scripts/Python/Tools/config/links.properties'
_path = 'videos/'
_format = 'mp4'
_quality = '360p'
openconfigfile(_configfile)
_links = getlinksfromconfig(_configfile)
createdirectory(_path)
downloadvideos(_links, _path, _quality, _format)
logger.printInfo("Done. Videos downloaded: {}".format(len(_links)))
def openconfigfile(configfile):
os.startfile(configfile)
def getlinksfromconfig(configfile):
list = []
with open(configfile) as f:
for line in f:
if line.startswith('#'):
continue
list.append(line.strip())
return list
def createdirectory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
logger.printDebug('{} created!'.format(directory))
def downloadvideos(videos, directory, quality, format):
video = None
for vid in videos:
yt = YouTube(vid)
logger.printDebug('Downloading => [ {} | {} ]'.format(yt.filename, quality))
video = yt.get(format, quality)
video.download(directory, on_progress=print_status)
winsound.Beep(440, 300) # frequency, duration
print()
if __name__ == '__main__':
main()
```
|
{
"source": "jeshan/botodocs",
"score": 2
}
|
#### File: jeshan/botodocs/resource_collections.py
```python
from boto3.resources.model import Collection, Action
from botocore.model import OperationModel
import pythonic
from clients import get_parameter_declaration_with
from util import create_new_file, get_accepts_redirect_link, get_botostubs_message, get_resource_path_for, write_lines
def create_collection_page(
path, collection_name, resource_name, class_name, parameter_str, client_name, service_path, op_name, resource_path
):
create_new_file(path)
def all():
return f'Creates an iterable of all {resource_name} resources in the collection', ''
def filter():
return f'{all()[0]} filtered by kwargs passed to the method', parameter_str
def limit():
return (
f'Creates an iterable up to a specified number of {resource_name} resources in the collection',
'count=123',
)
def page_size():
return (
f'Creates an iterable of all {resource_name} resources in the collection, but limits the number of items returned by each service call by the specified number',
'count=123',
)
new_resource_path = get_resource_path_for(resource_name, resource_path)
result = [
f'# {collection_name} collection',
f'A collection of [{resource_name}]({new_resource_path}) resources:\n',
'# Actions',
]
for fn in [all, filter, limit, page_size]:
result.append(f'## {fn.__name__}')
doc, param_str = fn()
result.append(doc)
item_name = pythonic.xform_name(resource_name)
result.append(
f"""```python
{item_name}: botostubs.{class_name}.{class_name}Resource.{resource_name}
for {item_name} in resource.{collection_name}.{fn.__name__}({param_str}):
pass # TODO: add your code here
```
"""
)
if fn == filter:
pythonic_op_name = pythonic.xform_name(op_name)
result.append(
f"""#### Accepts
{get_accepts_redirect_link(client_name, pythonic_op_name, service_path)}
"""
)
result.append(get_botostubs_message())
return result
def handle_collections(
collections, resource_list_items, resource_path, class_name, service_model, client_name, service_path
):
if collections:
resource_list_items.extend(['# Collections', 'These are the available collections:'])
collection: Collection
for collection in collections:
name = collection.name
collection_path = f'{resource_path}/collections/{name}'
docs_collection_path = f'docs/{collection_path}.md'
list_item = f'- **[{name}]({collection_path})**'
resource_list_items.append(list_item)
resource_name = collection.resource.model.name
op_name = collection.request.operation
param_str = get_param_str_from_operation(op_name, service_model)
collection_list_items = create_collection_page(
docs_collection_path,
name,
resource_name,
class_name,
param_str,
client_name,
service_path,
op_name,
resource_path,
)
handle_batch_actions(client_name, collection, collection_list_items, service_path)
write_lines(docs_collection_path, collection_list_items)
if collections:
resource_list_items.append('') # newline
def handle_batch_actions(client_name, collection, collection_list_items, service_path):
if collection.batch_actions:
collection_list_items.append('# Batch actions')
action: Action
for action in collection.batch_actions:
op_name = action.request.operation
collection_list_items.append(f'## {action.name}')
collection_list_items.append(
f'> {get_accepts_redirect_link(client_name, pythonic.xform_name(op_name), service_path)}'
)
def get_param_str_from_operation(op_name, service_model):
operation_model: OperationModel = service_model.operation_model(op_name)
input_shape = operation_model.input_shape
parameters = input_shape.members if input_shape else {}
param_str = get_parameter_declaration_with(parameters, parameters.keys())
return param_str
```
#### File: jeshan/botodocs/waiters.py
```python
from boto3.resources.model import Action, Waiter
from botocore.waiter import WaiterModel
import pythonic
from util import create_new_file, get_botostubs_message, get_link_to_client_function, write_lines, get_variable_name_for
def create_waiter_index(path, client_name, service_name, waiter_name):
create_new_file(path)
return [
f'# {service_name} waiters',
f"""You get a waiter by calling `get_waiter` on a certain client:
```python
import boto3
client = boto3.client('{client_name}')
waiter = client.get_waiter('{pythonic.xform_name(waiter_name)}') # type: botostubs.{service_name}.{waiter_name}Waiter
```
""",
get_botostubs_message(),
'The available client waiters are:',
]
def get_example_waiter_snippet(name, pythonic_name, client_name, service, fn_name, service_path):
return f"""```python
import boto3
client = boto3.client('{client_name}')
waiter = client.get_waiter('{pythonic_name}') # type: botostubs.{service}.{name}Waiter
waiter.wait(
WaiterConfig={{'Delay': 123, 'MaxAttempts': 123}}, OtherParams=...
)
```
{get_botostubs_message()}
### Accepts
_See {client_name}_client.[{fn_name}]({service_path}/client/operations/{fn_name}#Accepts) for other parameters that you can pass in._
### Returns
None
"""
def get_waiter_page(name, fn_name, client_name, class_name, waiter_path, service_path):
pythonic_name = pythonic.xform_name(name)
headline = f'# {pythonic_name} waiter'
signature = f"""
{get_example_waiter_snippet(name, pythonic_name, client_name, class_name, fn_name, service_path)}
"""
documentation = f'Polls {client_name}_client.{get_link_to_client_function(fn_name, service_path)} every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.'
list_item = f'- [{pythonic_name}]({waiter_path})'
return list_item, signature, documentation, headline
def handle_waiters(client, client_name, class_name, service_name, service_path, sidebar_lines):
waiter_config = client._get_waiter_config()
waiter_model = WaiterModel(waiter_config) if 'waiters' in waiter_config else None
if not waiter_model:
return
waiters_path = f'{service_path}/waiters'
sidebar_lines.append(f' - [Waiters]({waiters_path})')
docs_waiters_path = f'docs/{waiters_path}.md'
waiter_names = waiter_model.waiter_names
example_waiter_name = waiter_names[0]
waiter_list_items = create_waiter_index(docs_waiters_path, client_name, service_name, example_waiter_name)
for name in waiter_names:
handle_waiter(class_name, client_name, name, service_path, waiter_list_items, waiter_model, waiters_path)
write_lines(docs_waiters_path, waiter_list_items)
def handle_waiter(class_name, client_name, name, service_path, waiter_list_items, waiter_model, waiters_path):
waiter = waiter_model.get_waiter(name)
pythonic_name = pythonic.xform_name(waiter.operation)
waiter_path = f'{waiters_path}/{pythonic.xform_name(name)}'
docs_waiter_path = f'docs/{waiter_path}.md'
create_new_file(docs_waiter_path)
list_item, signature, documentation, headline = get_waiter_page(
name, pythonic_name, client_name, class_name, waiter_path, service_path
)
create_new_file(docs_waiter_path)
write_lines(docs_waiter_path, [headline, documentation, signature])
waiter_list_items.append(list_item)
def handle_sub_resource_waiters(resource: Action, resource_list_items, service_path):
waiters = resource.resource.model.waiters
if waiters:
resource_list_items.extend(['# Waiters', 'The following waiters are available:'])
waiters_path = f'{service_path}/waiters'
waiter: Waiter
for waiter in waiters:
name = pythonic.xform_name(waiter.waiter_name)
variable_name = get_variable_name_for(resource.name)
resource_list_items.append(f'## {waiter.name}')
resource_list_items.append(
f"""```python
{variable_name}.{waiter.name}(...)
```
"""
)
resource_list_items.append(
f'> Note that this waiter delegates to the client [{name}]({waiters_path}/{name}) waiter'
)
```
|
{
"source": "jeshan/botostubs",
"score": 2
}
|
#### File: jeshan/botostubs/main.py
```python
import keyword
import boto3
from boto3.dynamodb.table import register_table_methods
from boto3.ec2.deletetags import delete_tags
from boto3.s3.inject import inject_bucket_methods, inject_object_methods, inject_object_summary_methods, \
inject_s3_transfer_methods
import botocore
from botocore.waiter import WaiterModel
import inspect
import pythonic
primitive_map = {
'string': 'str',
'integer': 'int'
} # TODO: add more
def get_method_signature(service_model, operation_name, shapes, class_name):
pythonic_op_name = pythonic.xform_name(operation_name)
operation_model = service_model.operation_model(operation_name)
input_shape = operation_model.input_shape
output_shape = operation_model.output_shape
parameters = input_shape.members if input_shape else {}
if input_shape:
append_to_shapes(input_shape, class_name, shapes)
if output_shape:
append_to_shapes(output_shape, class_name, shapes)
param_list = get_param_list(input_shape, parameters, shapes, class_name)
param_str = ', '.join(param_list)
operation_doc = operation_model.documentation.replace('<p>', '').replace('</p>', '')
docstr = f'r"""{operation_doc}\n'
append_return_type = ' -> ' + output_shape.name if output_shape else ''
rest_params = f":param {get_doc_str(input_shape)}"
return f""" def {pythonic_op_name}({param_str}){append_return_type}:
{docstr}
:param self:
{rest_params}
:return: {get_doc_str(output_shape)} \"\"\"
pass"""
def get_doc_str(shape, prefix='', level=1):
docstr = ''
if not shape or not hasattr(shape, 'members') or not shape.members.items():
return docstr
if level > 3:
return
indent = " " * level
for param_key, param_value in shape.members.items():
doc = param_value.documentation.replace('"""', 'triple-quotes').replace('<p>', '').replace('</p>', '')
if hasattr(param_value, 'members'):
if level == 1:
doc += ':'
if level > 1:
doc += f"""{indent}<b>{param_key}</b>: {doc}"""
sub_result = get_doc_str(param_value, indent, level + 1)
if not sub_result:
docstr += doc
break
docstr += sub_result
if level == 1:
docstr = f"""{param_key}: {prefix} {doc}<br/>{docstr}"""
else:
docstr = f"""{prefix} <i>{param_key}</i> {doc}<br/>{docstr}"""
return docstr
def get_param_list(input_shape, parameters, shapes, class_name):
param_list = ['self']
for name, param in parameters.items():
item = get_param_name(input_shape, name, param, shapes, class_name)
if name in input_shape.required_members:
param_list.insert(1, item)
else:
param_list.append(item)
return param_list
def append_to_shapes(shape, class_name, shapes):
for item in shapes:
if str(item[0]) == str(shape) and item[1] == class_name:
return
shapes.append((shape, class_name))
def get_param_name(shape, name, param, shapes, class_name):
item = name
if keyword.iskeyword(name):
item += '_'
primitive_name = primitive_map.get(param.type_name)
if primitive_name:
item = item + ': ' + primitive_name
elif param.type_name == 'list':
item = item + ': List[' + param.member.name + ']'
append_to_shapes(param.member, class_name, shapes)
else:
item = item + ': ' + param.name
append_to_shapes(param, class_name, shapes)
if name not in shape.required_members:
item = item + '=None' # what if required/optional ones are not in order?
return item
def get_class_signature(client_name, name, documentation, methods, shapes_in_classes, waiter_model, paginator_model):
method_str = '\n\n'.join(methods)
shape_str = get_shape_str(name, shapes_in_classes)
resource_str = print_resource(client_name)
doc_str = f' r"""{documentation}"""'.replace('<p>', '').replace('</p>', '')
waiter_str = get_waiter_str(waiter_model)
paginator_str = get_paginator_str(paginator_model)
return f"""class {name}(BaseClient):
{doc_str}
{waiter_str}
{paginator_str}
{shape_str}
{method_str}
{resource_str}
"""
def get_shape_str(name, shapes_in_classes):
shape_str = []
for shape_class in shapes_in_classes:
if shape_class[1] != name:
continue
base_type = 'Mapping' if shape_class[0].type_name == 'structure' else 'object'
shape_str.append(f""" class {shape_class[0].name}({base_type}):
pass
""")
return '\n'.join(shape_str)
def get_waiter_str(waiter_model):
value = ''
if not waiter_model:
return value
for name in waiter_model.waiter_names:
waiter = waiter_model.get_waiter(name)
wait_docstr = f'r"""see function `{pythonic.xform_name(waiter.operation)}` for valid parameters"""'
value += f""" class {name}Waiter(Waiter):
def wait(self, **kwargs):
{wait_docstr}
pass
"""
value += '\n'
return value
def get_paginator_str(paginator_model):
value = ''
if not paginator_model:
return value
for name, paginator in paginator_model._paginator_config.items():
wait_docstr = f'r"""see function `{pythonic.xform_name(name)}` for valid parameters"""'
value += f""" class {name}Paginator(Paginator):
def wait(self, **kwargs):
{wait_docstr}
pass
"""
value += '\n'
return value
def print_resource(resource_name):
result = f' class {resource_name.title()}Resource:\n'
try:
resource = boto3.resource(resource_name)
except boto3.exceptions.ResourceNotExistsError:
return ''
for sub_resource in resource.meta.resource_model.subresources:
result += print_sub_resource(resource_name, resource, sub_resource)
result += print_actions(resource.meta.resource_model.actions)
result += print_collections(resource)
result += '\n\n'
return result
def print_sub_waiters(resource):
waiters = resource.resource.model.waiters
result = ''
for waiter in waiters:
result += f""" def {waiter.name}(self):
pass
"""
return result
def print_collections(resource):
result = ''
for collection in resource.meta.resource_model.collections:
item_type = collection.resource.type
if resource.meta.service_name == 'ec2' and item_type == 'KeyPairInfo':
item_type = 'KeyPair'
result += f""" class {collection.resource.type}ResourceCollection(List[{item_type}], ResourceCollection):
pass
"""
result += f""" {collection.name}: {collection.resource.type}ResourceCollection = None
"""
return result
def print_sub_resource(resource_name, resource, sub_resource):
def get_shape_str(name, shapes_in_classes):
shape_str = []
for shape_class in shapes_in_classes:
if shape_class[1] != name:
continue
base_type = 'Mapping' if shape_class[0].type_name == 'structure' else 'object'
shape_str.append(f""" class {shape_class[0].name}({base_type}):
pass
""")
return '\n'.join(set(shape_str))
service_model = resource.meta.client.meta.service_model # sub_resource.resource.meta.client.meta.service_model
attr = getattr(resource, sub_resource.name)
params = []
shape_classes = []
for identifier in sub_resource.resource.identifiers:
params.append(pythonic.xform_name(identifier.target))
model_shape = sub_resource.resource.model.shape
attributes_doc = '\n '
if model_shape:
shape = service_model.shape_for(model_shape)
attributes = resource.meta.resource_model.get_attributes(shape)
for key, value in attributes.items():
type_shape = value[1]
attributes_doc += get_param_name(type_shape, key, type_shape, shape_classes, resource_name) + f"""
"""
resource_doc = f'r"""{inspect.getdoc(attr)}"""'
params_str = ''
if len(params):
params_str = ', ' + ', '.join(params)
return f"""
class {sub_resource.name}:
{resource_doc}
def __init__(self{params_str}):
pass
{get_shape_str(resource_name, shape_classes)} {attributes_doc}
{print_sub_waiters(sub_resource)}{print_sub_actions(sub_resource.resource.model.actions)}{print_injected_resource_methods(resource_name, sub_resource)}
"""
def print_actions(actions):
result = ''
for action in actions:
result += f""" def {action.name}(self, **kwargs):
# type: (object) -> {action.resource.type if action.resource else 'dict'}
pass
"""
result += f""" def get_available_subresources(self) -> List[str]:
pass
"""
return result
def print_sub_actions(actions):
result = ''
for action in actions:
result += f""" def {action.name}(self, **kwargs):
# type: (object) -> {action.resource.type if action.resource else 'dict'}
return {action.resource.type + '()' if action.resource else '{}'}
"""
return result
def add_injected_client_methods(client_name, method_signatures):
resource_fns = {'s3': inject_s3_transfer_methods}
fn = resource_fns.get(client_name)
result = print_injected_functions(fn, {}, 4)
method_signatures.append(result)
def print_injected_resource_methods(resource_name, sub_resource):
s3_fns = {'Bucket': inject_bucket_methods, 'Object': inject_object_methods,
'ObjectSummary': inject_object_summary_methods}
if resource_name == 'dynamodb':
base_classes = []
register_table_methods(base_classes)
methods = {}
for clazz in base_classes:
new_methods = {method_name: getattr(clazz, method_name) for method_name in dir(clazz) if
not method_name.startswith('__')}
methods.update(new_methods)
return print_injected_functions(None, methods, 12)
if resource_name == 'ec2':
return print_injected_functions(None, {'delete_tags': delete_tags}, 8)
if resource_name == 's3':
fn = s3_fns.get(sub_resource.name)
return print_injected_functions(fn, {}, 12)
return ''
def print_injected_functions(fn, methods, spaces):
if fn:
fn(methods)
result = ''
indent = spaces * ' '
for name, method in methods.items():
got_doc = inspect.getdoc(method)
doc = ''
if got_doc:
doc = '\"\"\"{0}\"\"\"'.format(got_doc)
signature = inspect.signature(method)
parameters = signature.parameters
param_str = ''
for param_name, param in parameters.items():
param_str += str(param) + ', '
param_str = param_str[:-2]
result += f"""{indent}def {name}({param_str}):
{indent}{doc}
{indent}pass
"""
return result
def get_class_output(client_name):
method_signatures = []
shapes_in_classes = []
client = boto3.client(client_name)
class_name = type(client).__name__
service_model = client._service_model
waiter_config = client._get_waiter_config()
waiter_model = WaiterModel(waiter_config) if 'waiters' in waiter_config else None
try:
paginator_model = botocore.session.get_session().get_paginator_model(client_name)
except botocore.exceptions.UnknownServiceError:
paginator_model = None # meaning it probably doesn't have paginators
for name in service_model.operation_names:
method_signatures.append(get_method_signature(service_model, name, shapes_in_classes, class_name))
add_injected_client_methods(client_name, method_signatures)
return get_class_signature(client_name, class_name, service_model.documentation, method_signatures,
shapes_in_classes, waiter_model, paginator_model)
def print_header():
print('from collections.abc import Mapping')
print('from typing import List')
print('from boto3.resources.collection import ResourceCollection')
print('from botocore.waiter import Waiter')
print('from botocore.paginate import Paginator')
print('from botocore.client import BaseClient\n\n')
def print_clients():
clients = boto3.DEFAULT_SESSION.get_available_services()
for client_name in clients:
print(get_class_output(client_name))
def go():
print_header()
boto3.setup_default_session()
print_clients()
if __name__ == '__main__':
go()
```
|
{
"source": "jeshan/cfn-failures-to-telegram",
"score": 2
}
|
#### File: jeshan/cfn-failures-to-telegram/put-target-deployment-roles.py
```python
import sys
from glob import glob
from subprocess import check_output, CalledProcessError
fail_count = 0
def run(command):
print('Running', command)
try:
output = check_output(command.split(' ')).decode('utf-8')
return output
except CalledProcessError as exc:
print("Status : FAIL", exc.returncode, exc.output.decode('utf-8'))
global fail_count
fail_count += 1
def go():
for path in glob('config/app/*/config.yaml'):
env = path[:path.rindex('/')]
env = env[env.rindex('/') + 1:]
output = run(f'sceptre --no-colour launch -y app/{env}/base')
print(output)
if __name__ == '__main__':
go()
sys.exit(fail_count)
```
|
{
"source": "jeshan/hypothesis-test-python-versions",
"score": 3
}
|
#### File: jeshan/hypothesis-test-python-versions/demoapp.py
```python
from datetime import datetime
def go():
print(f'Hello world, the time is now {datetime.utcnow()}')
if __name__ == '__main__':
go()
```
#### File: jeshan/hypothesis-test-python-versions/test_app.py
```python
from subprocess import call
from tempfile import NamedTemporaryFile
from hypothesis import settings, note
from hypothesis.stateful import RuleBasedStateMachine, rule
from hypothesis.strategies import sampled_from
def versions():
""" generates only minor versions available on Docker Hub """
# TODO: treat as sem-ver version to allow accurate ordering (exercise left to the reader)
return sampled_from(['3.5', '3.6', '3.7', '3.8'])
class TestPythonVersions(RuleBasedStateMachine):
@rule(version=versions())
def try_build_image(self, version):
with NamedTemporaryFile() as tmp:
print(f"building in Python version {version} ({tmp.name})")
contents = f"""FROM python:{version}-alpine
COPY demoapp.py .
RUN python demoapp.py
"""
tmp.write(contents.encode())
tmp.flush()
note(f'Program does not run on Python {version}')
exit_code = call(f'docker build -f {tmp.name} .'.split(' '))
assert exit_code == 0
TestPythonVersions.TestCase.settings = settings(deadline=None)
test_python_versions = TestPythonVersions.TestCase
```
|
{
"source": "jeshan/prologterms-py",
"score": 3
}
|
#### File: prologterms-py/tests/test_term.py
```python
from prologterms import TermGenerator, PrologRenderer, Program, Var, SExpressionRenderer
P = TermGenerator()
X = Var('X')
Y = Var('Y')
Z = Var('Z')
R = PrologRenderer()
S = SExpressionRenderer()
def test_term():
t = P.member(X, [1, 2, 3])
print("TERM: {}\n".format(R.render(t)))
assert R.render(t) == "member(X, [1, 2, 3])"
assert S.render(t) == "(member ?X (list 1 2 3))"
def test_atom():
t = P.foo()
print("TERM: {}\n".format(R.render(t)))
assert R.render(t) == "foo"
assert S.render(t) == "(foo )"
def test_unary_neg():
t = (-X)
print("TERM: {}\n".format(R.render(t)))
assert R.render(t) == "-(X)"
assert S.render(t) == "(- ?X)"
def test_not():
t = (~ P.true())
print("TERM: {}\n".format(R.render(t)))
assert R.render(t) == "\+(true)"
assert S.render(t) == "(\+ (true ))"
def test_eq():
t = (X == Y)
print("TERM: {}\n".format(R.render(t)))
assert R.render(t) == "=(X, Y)"
assert S.render(t) == "(= ?X ?Y)"
def test_ne():
t = (X != Y)
print("TERM: {}\n".format(R.render(t)))
assert R.render(t) == "\=(X, Y)"
assert S.render(t) == "(\= ?X ?Y)"
def test_quote():
t = P.member(X, ['a', 'B', '$c', '.d', '', ' ', "'x'", "foo\n\n'bar'"])
print("TERM: {}\n".format(R.render(t)))
assert R.render(t) == "member(X, [a, 'B', '$c', '.d', '', ' ', '\\'x\\'', 'foo\\n\\n\\'bar\\''])"
assert S.render(t) == "(member ?X (list a 'B' '$c' '.d' '' ' ' '\\'x\\'' 'foo\\n\\n\\'bar\\''))"
def test_comments():
t = P.member(X, [1, 2, 3])
t.add_comment('foo')
print('Term with comments:')
print("TERM: {}\n".format(R.render(t)))
assert R.render(t) == "% foo\nmember(X, [1, 2, 3])"
def test_comments_infix():
t = P.member(X, [1, 2, 3]) % 'foo'
print('Term with comments:')
print("TERM: {}\n".format(R.render(t)))
assert R.render(t) == "% foo\nmember(X, [1, 2, 3])"
def test_program():
p = Program(
P.ancestor(X,Y) <= (P.parent(X,Z), P.ancestor(Z,Y)),
P.ancestor(X,Y) <= P.parent(X,Z),
P.parent('a','b'),
P.parent('b','c'),
P.parent('c','d')
)
print('PROG:\n')
print(R.render(p))
def test_program_infix_comments():
p = Program(
(P.ancestor(X,Y) <= (P.parent(X,Z), P.ancestor(Z,Y))) % 'recursive',
(P.ancestor(X,Y) <= P.parent(X,Z)) % 'base case',
P.parent('a','b') % 'a isa b',
P.parent('b','c') % 'b isa c',
P.parent('c','d') % 'c isa d'
)
print('PROG:\n')
print(R.render(p))
```
|
{
"source": "jeshan/pytest-cleanup",
"score": 2
}
|
#### File: pytest-cleanup/pytest_cleanup/runtime.py
```python
import functools
import inspect
import os
from glob import glob
from random import shuffle
from types import GeneratorType
from typing import TextIO
from _pytest.python import Metafunc
from loguru import logger
from pytest_cleanup.common import (
get_class_that_defined_method,
mergeFunctionMetadata,
is_async_fn,
try_load_dill,
pytestcleanup_decorated_with_record_test_data,
get_name,
)
from pytest_cleanup.constants import test_data_directory
def deserialise(f):
return deserialise_json(f)
def deserialise_json(f: TextIO):
import jsonpickle
contents = f.read()
return jsonpickle.loads(contents)
def transform_function(f):
if getattr(f, pytestcleanup_decorated_with_record_test_data, False):
# raise Exception('Already decorated')
return f
clazz = get_class_that_defined_method(f)
arg_signature = inspect.getfullargspec(f).args
is_cls_function = clazz and arg_signature and arg_signature[0] == 'cls'
@functools.wraps(f)
def wrapper(*args, **kwargs):
if is_cls_function:
first_arg_is_cls = len(args) and not isinstance(list(args)[0], clazz) or not len(args)
if first_arg_is_cls:
args = remove_first_argument(args)
return_value = f(*args, **kwargs)
if isinstance(return_value, GeneratorType):
# generators aren't really comparable, so we compare lists instead
return list(return_value)
return return_value
def remove_first_argument(args):
return tuple(list(args)[1:])
wrapper.pytestcleanup_decorated_with_record_test_data = True
return wrapper
def deserialise_from_file(filename):
with open(filename, 'r') as f:
try:
return deserialise(f)
except Exception as e:
logger.error(f'Error loading data file {filename}')
logger.error(e)
def load_data_file(filename, is_async):
data = deserialise_from_file(filename)
if not data:
return
fn = data['function']
if (is_async and not is_async_fn(fn)) or (not is_async and is_async_fn(fn)):
return
if not fn:
logger.warning(f'Function was not properly loaded from {filename}')
return
module = data['module']
function_name = fn.__name__
clazz = data['class']
class_or_module = clazz or module
if not class_or_module:
# can happen if user loaded std lib modules
return
# raise Exception(f'no class or module found for {filename}')
fn = getattr(class_or_module, function_name)
new_item = mergeFunctionMetadata(fn, transform_function(fn))
return (
module,
clazz,
[
(new_item, try_load_dill(x['args']), try_load_dill(x['kwargs']), edit_return_value(x['return_value']))
for x in data['test_cases']
],
)
def edit_return_value(return_value):
from _collections_abc import list_iterator
return_value = try_load_dill(return_value)
if isinstance(return_value, list_iterator):
# because jsonpickle serialises things like generators as "list iterators"
return_value = list(return_value)
return return_value
def parametrize_stg_tests(metafunc: Metafunc):
if metafunc.definition.name == 'test_pytest_cleanup_async_test_cases':
_parametrize_stg_tests(metafunc, is_async=True)
if metafunc.definition.name == 'test_pytest_cleanup_sync_test_cases':
_parametrize_stg_tests(metafunc, is_async=False)
def _parametrize_stg_tests(metafunc: Metafunc, is_async):
sep = os.sep
path_list = list(sorted(glob(f'{test_data_directory}{sep}*{sep}**{sep}*.json', recursive=True)))
all_test_data = []
all_ids = []
for data_file_path in path_list:
split = data_file_path.split(sep)
function_name = split[-2]
try:
tuple_result = load_data_file(data_file_path, is_async)
if tuple_result:
module, clazz, test_cases = tuple_result
else:
continue
except Exception as e:
logger.error(f'Could not load data file {data_file_path}')
logger.error(e)
raise e
module_name = get_name(module)
class_name = get_name(clazz)
class_or_module_name = module_name if module_name != class_name else f'{module_name}.{class_name}'
ids = [f'{class_or_module_name}-{function_name}'] * len(test_cases)
all_test_data.extend(test_cases)
all_ids.extend(ids)
metafunc.parametrize(['fn', 'args', 'kwargs', 'expected'], all_test_data, ids=all_ids)
```
#### File: pytest-cleanup/tests/__main__.py
```python
import select
import threading
from collections import deque
import asyncio
from pytest_cleanup import Recorder
from .functions import go_async
from .classes import Inner1, Inner2, Inner3, Inner4, Inner5, Hey
def main():
def hey():
print('hey')
loop = asyncio.get_event_loop()
with Recorder():
loop.run_until_complete(go_async())
# higher_order2()
Inner1().go()
Inner2().go()
Inner3().go()
Inner4().pass_inner(hey)
Inner4().pass_inner(Inner3().go)
Inner5().pass_inner(Inner3().Inner)
Inner5().pass_back(Inner3().Inner)
Hey(1).go_instance()
Hey.go_class()
Hey.go_static()
hey = Hey(2)
hey.call_all_method()
Hey.call_all_class_method()
Hey.call_all_static_method()
loop.close()
if __name__ == '__main__':
main()
```
|
{
"source": "jeshan/simple-python-profiler",
"score": 2
}
|
#### File: simple-python-profiler/simple_python_profiler/main.py
```python
import functools
import inspect
import sys
from time import perf_counter_ns
from typing import List, Dict
from loguru import logger
from recursive_decorator import recursive_decorator
def fn_description(f):
return f'{f.__module__}.{f.__qualname__}'
def sort_fn(invocation):
return invocation.end - invocation.start
def log_call(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
logger.debug(f'Entering {f}')
result = f(*args, **kwargs)
logger.debug(f'Exiting {f}')
return result
return wrapper
@log_call
def sort_invocations_by_individual_time(invocations):
return sorted(invocations, key=sort_fn, reverse=True)
def duration(invocation):
return invocation['end'] - invocation['start']
@log_call
def sort_invocations_by_function_time(group):
name_speed_tuple_list = []
for fn_name, invocations in group.items():
total_per_function = sum(map(lambda x: duration(x), invocations))
name_speed_tuple_list.append((fn_name, total_per_function, len(invocations)))
return sorted(name_speed_tuple_list, key=lambda x: x[1], reverse=True)
@log_call
def group_by_function(invocations: List) -> Dict[object, List]:
result = {}
for invocation in invocations:
f = invocation['f']
if f not in result:
result[f] = []
result[f].append(invocation)
return result
def is_site_package(module):
return 'site-packages' in (module.__dict__.get('__file__') or {})
def exclude_paths(module):
return module.__dict__.get('__file__')
def exclude_importers(module):
loader = module.__dict__.get('__loader__')
loader_type = type(loader)
if hasattr(loader_type, '__name__'):
name = loader_type.__name__
elif hasattr(loader, 'name'):
name = loader.name
if loader:
qualified_name = loader_type.__module__ + '.' + name
else:
qualified_name = ''
return qualified_name.endswith('._SixMetaPathImporter')
def is_system_package(module):
from importlib._bootstrap import BuiltinImporter
loader = module.__dict__.get('__loader__')
return (
loader in [BuiltinImporter]
or (
hasattr(module, '__file__')
and f"python{sys.version_info.major}.{sys.version_info.minor}/{(module.__package__ or '').replace('.', '/')}"
in module.__file__
)
or module.__name__.startswith('typing.')
)
def get_loaded_modules():
import sys
all_modules = []
for name, module in sys.modules.items():
all_modules.append((name, module))
return all_modules
def mergeFunctionMetadata(f, g):
# this function was copied from Twisted core, https://github.com/racker/python-twisted-core
# licence notice in file ../LICENCE-Twisted-core
"""
Overwrite C{g}'s name and docstring with values from C{f}. Update
C{g}'s instance dictionary with C{f}'s.
To use this function safely you must use the return value. In Python 2.3,
L{mergeFunctionMetadata} will create a new function. In later versions of
Python, C{g} will be mutated and returned.
@return: A function that has C{g}'s behavior and metadata merged from
C{f}.
"""
try:
g.__name__ = f.__name__
except TypeError:
try:
import types
merged = types.FunctionType(
g.func_code, g.func_globals, f.__name__, inspect.getargspec(g)[-1], g.func_closure
)
except TypeError:
pass
else:
merged = g
try:
merged.__doc__ = f.__doc__
except (TypeError, AttributeError):
pass
try:
merged.__dict__.update(g.__dict__)
merged.__dict__.update(f.__dict__)
except (TypeError, AttributeError):
pass
merged.__module__ = f.__module__
return merged
def time_fn():
return perf_counter_ns()
def singleton(cls):
obj = cls()
# Always return the same object
cls.__new__ = staticmethod(lambda cls: obj)
# Disable __init__
try:
del cls.__init__
except AttributeError:
pass
return cls
@singleton
class Profiler:
def __init__(self):
logger.debug('creating instance of profiler')
self.invocations = []
def add_invocation(self, start, end, result, f):
i = {'start': start, 'end': end, 'result': result, 'f': f}
self.invocations.append(i)
def __enter__(self):
bootstrap()
logger.debug('Start recording invocations')
def __exit__(self, exc_type, exc_val, exc_tb):
logger.debug(f'stopped recording invocations, got {len(self.invocations)} of them.')
invocation_group = group_by_function(self.invocations)
by_time = sort_invocations_by_function_time(invocation_group)
by_time = limit_results(by_time)
print_results(by_time)
@recursive_decorator
def profile_recursive(f):
return profile(f)
def profile(f):
if f in [time_fn, profile]:
return f
# print('in profile', f)
@functools.wraps(f)
def wrapper(*args, **kwargs):
# print('wrapped', f)
start = time_fn()
result = f(*args, **kwargs)
end = time_fn()
Profiler().add_invocation(start, end, result, f)
return result
return wrapper
def edit_functions(items, module):
for fn_name, fn in items:
if fn == edit_functions:
continue
# print('editing', fn_name, fn)
new_item = mergeFunctionMetadata(fn, profile(fn))
setattr(module, fn.__name__, new_item)
def bootstrap():
for name, module in get_loaded_modules():
# print('loading', name)
try:
items = inspect.getmembers(module, inspect.isfunction)
except Exception as e:
# I saw this could happen when in debug mode
logger.warning(f'Failed getting members for module {module}, skipping')
logger.error(e)
continue
# if 'main' not in name:
exclude_site_package = True
exclude_system_package = True
if 'simple_python_profiler' in module.__name__:
logger.trace('Excluding the profiler itself')
continue
if exclude_site_package and is_site_package(module):
logger.trace(f'excluding site package {module}')
continue
if exclude_importers(module):
logger.trace(f'excluding importer {module}')
continue
if exclude_system_package and is_system_package(module):
logger.trace(f'excluding system module {module}')
continue
logger.debug(f'allowing module {module}')
edit_functions(items, module)
def limit_results(groups):
return groups[:100]
@log_call
def print_results(by_time):
for item in by_time:
logger.info(fn_description(item[0]) + f',invoked={item[2]} times, total={item[1] / 1_000_000}ms')
```
|
{
"source": "JeshuaT/PsyNeuLink",
"score": 2
}
|
#### File: source/_ext/technical_note.py
```python
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from sphinx.util.nodes import nested_parse_with_titles
class TechnicalNote(Directive):
# optional_arguments = 1
final_argument_whitespace = True
option_spec = {'name': directives.unchanged}
has_content = True
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
try:
if self.arguments:
classes = directives.class_option(self.arguments[0])
else:
classes = []
classes.append('technical-note')
except ValueError:
raise self.error(
'Invalid class attribute value for "%s" directive: "%s".'
% (self.name, self.arguments[0]))
node = nodes.container(text)
node['classes'].extend(classes)
nested_parse_with_titles(self.state, self.content, node)
return [node]
def setup(app):
app.add_directive("technical_note", TechnicalNote)
return {
'version': '0.1',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
```
#### File: components/functions/function.py
```python
import abc
import numbers
import types
import warnings
from enum import Enum, IntEnum
import numpy as np
import typecheck as tc
from psyneulink.core.components.component import ComponentError, DefaultsFlexibility
from psyneulink.core.components.shellclasses import Function, Mechanism
from psyneulink.core.globals.context import ContextFlags, handle_external_context
from psyneulink.core.globals.keywords import (
ARGUMENT_THERAPY_FUNCTION, AUTO_ASSIGN_MATRIX, EXAMPLE_FUNCTION_TYPE, FULL_CONNECTIVITY_MATRIX,
FUNCTION_COMPONENT_CATEGORY, FUNCTION_OUTPUT_TYPE, FUNCTION_OUTPUT_TYPE_CONVERSION, HOLLOW_MATRIX,
IDENTITY_MATRIX, INVERSE_HOLLOW_MATRIX, NAME, PREFERENCE_SET_NAME, RANDOM_CONNECTIVITY_MATRIX
)
from psyneulink.core.globals.parameters import Parameter
from psyneulink.core.globals.preferences.basepreferenceset import REPORT_OUTPUT_PREF, is_pref_set
from psyneulink.core.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel
from psyneulink.core.globals.registry import register_category
from psyneulink.core.globals.utilities import (
convert_to_np_array, get_global_seed, object_has_single_value, parameter_spec, safe_len
)
__all__ = [
'ArgumentTherapy', 'EPSILON', 'Function_Base', 'function_keywords', 'FunctionError', 'FunctionOutputType',
'FunctionRegistry', 'get_param_value_for_function', 'get_param_value_for_keyword', 'is_Function',
'is_function_type', 'PERTINACITY', 'PROPENSITY'
]
EPSILON = np.finfo(float).eps
# numeric to allow modulation, invalid to identify unseeded state
DEFAULT_SEED = -1
FunctionRegistry = {}
function_keywords = {FUNCTION_OUTPUT_TYPE, FUNCTION_OUTPUT_TYPE_CONVERSION}
class FunctionError(ComponentError):
pass
class FunctionOutputType(IntEnum):
RAW_NUMBER = 0
NP_1D_ARRAY = 1
NP_2D_ARRAY = 2
DEFAULT = 3
# Typechecking *********************************************************************************************************
# TYPE_CHECK for Function Instance or Class
def is_Function(x):
if not x:
return False
elif isinstance(x, Function):
return True
elif issubclass(x, Function):
return True
else:
return False
def is_function_type(x):
if not x:
return False
elif isinstance(x, (Function, types.FunctionType, types.MethodType, types.BuiltinFunctionType, types.BuiltinMethodType)):
return True
elif isinstance(x, type) and issubclass(x, Function):
return True
else:
return False
# ******************************* get_param_value_for_keyword ********************************************************
def get_param_value_for_keyword(owner, keyword):
"""Return the value for a keyword used by a subclass of Function
Parameters
----------
owner : Component
keyword : str
Returns
-------
value
"""
try:
return owner.function.keyword(owner, keyword)
except FunctionError as e:
# assert(False)
# prefs is not always created when this is called, so check
try:
owner.prefs
has_prefs = True
except AttributeError:
has_prefs = False
if has_prefs and owner.prefs.verbosePref:
print("{} of {}".format(e, owner.name))
# return None
else:
raise FunctionError(e)
except AttributeError:
# prefs is not always created when this is called, so check
try:
owner.prefs
has_prefs = True
except AttributeError:
has_prefs = False
if has_prefs and owner.prefs.verbosePref:
print("Keyword ({}) not recognized for {}".format(keyword, owner.name))
return None
def get_param_value_for_function(owner, function):
try:
return owner.function.param_function(owner, function)
except FunctionError as e:
if owner.prefs.verbosePref:
print("{} of {}".format(e, owner.name))
return None
except AttributeError:
if owner.prefs.verbosePref:
print("Function ({}) can't be evaluated for {}".format(function, owner.name))
return None
# Parameter Mixins *****************************************************************************************************
# KDM 6/21/18: Below is left in for consideration; doesn't really gain much to justify relaxing the assumption
# that every Parameters class has a single parent
# class ScaleOffsetParamMixin:
# scale = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM])
# offset = Parameter(1.0, modulable=True, aliases=[ADDITIVE_PARAM])
# Function Definitions *************************************************************************************************
# KDM 8/9/18: below is added for future use when function methods are completely functional
# used as a decorator for Function methods
# def enable_output_conversion(func):
# @functools.wraps(func)
# def wrapper(*args, **kwargs):
# result = func(*args, **kwargs)
# return convert_output_type(result)
# return wrapper
# this should eventually be moved to a unified validation method
def _output_type_setter(value, owning_component):
# Can't convert from arrays of length > 1 to number
if (
owning_component.defaults.variable is not None
and safe_len(owning_component.defaults.variable) > 1
and owning_component.output_type is FunctionOutputType.RAW_NUMBER
):
raise FunctionError(
f"{owning_component.__class__.__name__} can't be set to return a "
"single number since its variable has more than one number."
)
# warn if user overrides the 2D setting for mechanism functions
# may be removed when
# https://github.com/PrincetonUniversity/PsyNeuLink/issues/895 is solved
# properly(meaning Mechanism values may be something other than 2D np array)
try:
if (
isinstance(owning_component.owner, Mechanism)
and (
value == FunctionOutputType.RAW_NUMBER
or value == FunctionOutputType.NP_1D_ARRAY
)
):
warnings.warn(
f'Functions that are owned by a Mechanism but do not return a '
'2D numpy array may cause unexpected behavior if llvm '
'compilation is enabled.'
)
except (AttributeError, ImportError):
pass
return value
def _seed_setter(value, owning_component, context):
if value in {None, DEFAULT_SEED}:
value = get_global_seed()
value = int(value)
owning_component.parameters.random_state._set(
np.random.RandomState([value]),
context
)
return value
def _random_state_getter(self, owning_component, context):
seed_param = owning_component.parameters.seed
try:
is_modulated = seed_param._port.is_modulated(context)
except AttributeError:
# no ParameterPort
pass
else:
if is_modulated:
# can manage reset_for_context only in getter because we
# don't want to store any copied values from other contexts
# (from _initialize_from_context)
try:
reset_for_context = self._reset_for_context[context.execution_id]
except AttributeError:
self._reset_for_context = {}
reset_for_context = False
except KeyError:
reset_for_context = False
if not reset_for_context:
self._reset_for_context[context.execution_id] = True
return np.random.RandomState([
int(
owning_component._get_current_parameter_value(
seed_param,
context
)
)
])
return self.values[context.execution_id]
class Function_Base(Function):
"""
Function_Base( \
default_variable, \
params=None, \
owner=None, \
name=None, \
prefs=None \
)
Implement abstract class for Function category of Component class
COMMENT:
Description:
Functions are used to "wrap" functions used used by other components;
They are defined here (on top of standard libraries) to provide a uniform interface for managing parameters
(including defaults)
NOTE: the Function category definition serves primarily as a shell, and as an interface to the Function
class, to maintain consistency of structure with the other function categories;
it also insures implementation of .function for all Function Components
(as distinct from other Function subclasses, which can use a FUNCTION param
to implement .function instead of doing so directly)
Function Components are the end of the recursive line; as such:
they don't implement functionParams
in general, don't bother implementing function, rather...
they rely on Function_Base.function which passes on the return value of .function
Variable and Parameters:
IMPLEMENTATION NOTE: ** DESCRIBE VARIABLE HERE AND HOW/WHY IT DIFFERS FROM PARAMETER
- Parameters can be assigned and/or changed individually or in sets, by:
- including them in the initialization call
- calling the _instantiate_defaults method (which changes their default values)
- including them in a call the function method (which changes their values for just for that call)
- Parameters must be specified in a params dictionary:
- the key for each entry should be the name of the parameter (used also to name associated Projections)
- the value for each entry is the value of the parameter
Return values:
The output_type can be used to specify type conversion for single-item return values:
- it can only be used for numbers or a single-number list; other values will generate an exception
- if self.output_type is set to:
FunctionOutputType.RAW_NUMBER, return value is "exposed" as a number
FunctionOutputType.NP_1D_ARRAY, return value is 1d np.array
FunctionOutputType.NP_2D_ARRAY, return value is 2d np.array
- it must be enabled for a subclass by setting params[FUNCTION_OUTPUT_TYPE_CONVERSION] = True
- it must be implemented in the execute method of the subclass
- see Linear for an example
MechanismRegistry:
All Function functions are registered in FunctionRegistry, which maintains a dict for each subclass,
a count for all instances of that type, and a dictionary of those instances
Naming:
Function functions are named by their componentName attribute (usually = componentType)
Class attributes:
+ componentCategory: FUNCTION_COMPONENT_CATEGORY
+ className (str): kwMechanismFunctionCategory
+ suffix (str): " <className>"
+ registry (dict): FunctionRegistry
+ classPreference (PreferenceSet): BasePreferenceSet, instantiated in __init__()
+ classPreferenceLevel (PreferenceLevel): PreferenceLevel.CATEGORY
Class methods:
none
Instance attributes:
+ componentType (str): assigned by subclasses
+ componentName (str): assigned by subclasses
+ variable (value) - used as input to function's execute method
+ value (value) - output of execute method
+ name (str) - if not specified as an arg, a default based on the class is assigned in register_category
+ prefs (PreferenceSet) - if not specified as an arg, default is created by copying BasePreferenceSet
Instance methods:
The following method MUST be overridden by an implementation in the subclass:
- execute(variable, params)
The following can be implemented, to customize validation of the function variable and/or params:
- [_validate_variable(variable)]
- [_validate_params(request_set, target_set, context)]
COMMENT
Arguments
---------
variable : value : default class_defaults.variable
specifies the format and a default value for the input to `function <Function>`.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
owner : Component
`component <Component>` to which to assign the Function.
name : str : default see `name <Function.name>`
specifies the name of the Function.
prefs : PreferenceSet or specification dict : default Function.classPreferences
specifies the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details).
Attributes
----------
variable: value
format and default value can be specified by the :keyword:`variable` argument of the constructor; otherwise,
they are specified by the Function's :keyword:`class_defaults.variable`.
function : function
called by the Function's `owner <Function_Base.owner>` when it is executed.
COMMENT:
enable_output_type_conversion : Bool : False
specifies whether `function output type conversion <Function_Output_Type_Conversion>` is enabled.
output_type : FunctionOutputType : None
used to specify the return type for the `function <Function_Base.function>`; `functionOuputTypeConversion`
must be enabled and implemented for the class (see `FunctionOutputType <Function_Output_Type_Conversion>`
for details).
COMMENT
owner : Component
`component <Component>` to which the Function has been assigned.
name : str
the name of the Function; if it is not specified in the **name** argument of the constructor, a default is
assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names).
prefs : PreferenceSet or specification dict : Function.classPreferences
the `PreferenceSet` for function; if it is not specified in the **prefs** argument of the Function's
constructor, a default is assigned using `classPreferences` defined in __init__.py (see `Preferences`
for details).
"""
componentCategory = FUNCTION_COMPONENT_CATEGORY
className = componentCategory
suffix = " " + className
registry = FunctionRegistry
classPreferenceLevel = PreferenceLevel.CATEGORY
_model_spec_id_parameters = 'args'
_specified_variable_shape_flexibility = DefaultsFlexibility.INCREASE_DIMENSION
class Parameters(Function.Parameters):
"""
Attributes
----------
variable
see `variable <Function_Base.variable>`
:default value: numpy.array([0])
:type: ``numpy.ndarray``
:read only: True
enable_output_type_conversion
see `enable_output_type_conversion <Function_Base.enable_output_type_conversion>`
:default value: False
:type: ``bool``
output_type
see `output_type <Function_Base.output_type>`
:default value: FunctionOutputType.DEFAULT
:type: `FunctionOutputType`
"""
variable = Parameter(np.array([0]), read_only=True, pnl_internal=True, constructor_argument='default_variable')
output_type = Parameter(
FunctionOutputType.DEFAULT,
stateful=False,
loggable=False,
pnl_internal=True,
valid_types=FunctionOutputType
)
enable_output_type_conversion = Parameter(False, stateful=False, loggable=False, pnl_internal=True)
# Note: the following enforce encoding as 1D np.ndarrays (one array per variable)
variableEncodingDim = 1
@abc.abstractmethod
def __init__(
self,
default_variable,
params,
owner=None,
name=None,
prefs=None,
context=None,
**kwargs
):
"""Assign category-level preferences, register category, and call super.__init__
Initialization arguments:
- default_variable (anything): establishes type for the variable, used for validation
Note: if parameter_validation is off, validation is suppressed (for efficiency) (Function class default = on)
:param default_variable: (anything but a dict) - value to assign as self.defaults.variable
:param params: (dict) - params to be assigned as instance defaults
:param log: (ComponentLog enum) - log entry types set in self.componentLog
:param name: (string) - optional, overrides assignment of default (componentName of subclass)
:return:
"""
if self.initialization_status == ContextFlags.DEFERRED_INIT:
self._assign_deferred_init_name(name)
self._init_args[NAME] = name
return
register_category(entry=self,
base_class=Function_Base,
registry=FunctionRegistry,
name=name,
)
self.owner = owner
super().__init__(
default_variable=default_variable,
param_defaults=params,
name=name,
prefs=prefs,
**kwargs
)
def __call__(self, *args, **kwargs):
return self.function(*args, **kwargs)
def __deepcopy__(self, memo):
new = super().__deepcopy__(memo)
# ensure copy does not have identical name
register_category(new, Function_Base, new.name, FunctionRegistry)
try:
# HACK: Make sure any copies are re-seeded to avoid dependent RNG.
new.random_state.seed([get_global_seed()])
except:
pass
return new
@handle_external_context()
def function(self,
variable=None,
context=None,
params=None,
target_set=None,
**kwargs):
assert True
# Validate variable and assign to variable, and validate params
variable = self._check_args(variable=variable,
context=context,
params=params,
target_set=target_set,
)
try:
value = self._function(variable=variable,
context=context,
params=params,
**kwargs)
except ValueError as err:
err_msg = f"Problem with '{self}' in '{self.owner.name if self.owner else self.__class__.__name__}': {err}"
raise FunctionError(err_msg)
self.most_recent_context = context
self.parameters.value._set(value, context=context)
self._reset_runtime_parameters(context)
return value
@abc.abstractmethod
def _function(
self,
variable=None,
context=None,
params=None,
):
pass
def _parse_arg_generic(self, arg_val):
if isinstance(arg_val, list):
return np.asarray(arg_val)
else:
return arg_val
def _validate_parameter_spec(self, param, param_name, numeric_only=True):
"""Validates function param
Replace direct call to parameter_spec in tc, which seems to not get called by Function __init__()'s
"""
if not parameter_spec(param, numeric_only):
owner_name = 'of ' + self.owner_name if self.owner else ""
raise FunctionError(f"{param} is not a valid specification for "
f"the {param_name} argument of {self.__class__.__name__}{owner_name}.")
def _get_current_parameter_value(self, param_name, context=None):
try:
param = getattr(self.parameters, param_name)
except TypeError:
param = param_name
except AttributeError:
# don't accept strings that don't correspond to Parameters
# on this function
raise
return super()._get_current_parameter_value(param, context)
def get_previous_value(self, context=None):
# temporary method until previous values are integrated for all parameters
value = self.parameters.previous_value._get(context)
return value
def convert_output_type(self, value, output_type=None):
if output_type is None:
if not self.enable_output_type_conversion or self.output_type is None:
return value
else:
output_type = self.output_type
value = convert_to_np_array(value)
# Type conversion (specified by output_type):
# MODIFIED 6/21/19 NEW: [JDC]
# Convert to same format as variable
if isinstance(output_type, (list, np.ndarray)):
shape = np.array(output_type).shape
return np.array(value).reshape(shape)
# MODIFIED 6/21/19 END
# Convert to 2D array, irrespective of value type:
if output_type is FunctionOutputType.NP_2D_ARRAY:
# KDM 8/10/18: mimicking the conversion that Mechanism does to its values, because
# this is what we actually wanted this method for. Can be changed to pure 2D np array in
# future if necessary
converted_to_2d = np.atleast_2d(value)
# If return_value is a list of heterogenous elements, return as is
# (satisfies requirement that return_value be an array of possibly multidimensional values)
if converted_to_2d.dtype == object:
pass
# Otherwise, return value converted to 2d np.array
else:
value = converted_to_2d
# Convert to 1D array, irrespective of value type:
# Note: if 2D array (or higher) has more than two items in the outer dimension, generate exception
elif output_type is FunctionOutputType.NP_1D_ARRAY:
# If variable is 2D
if value.ndim >= 2:
# If there is only one item:
if len(value) == 1:
value = value[0]
else:
raise FunctionError(f"Can't convert value ({value}: 2D np.ndarray object "
f"with more than one array) to 1D array.")
elif value.ndim == 1:
value = value
elif value.ndim == 0:
value = np.atleast_1d(value)
else:
raise FunctionError(f"Can't convert value ({value} to 1D array.")
# Convert to raw number, irrespective of value type:
# Note: if 2D or 1D array has more than two items, generate exception
elif output_type is FunctionOutputType.RAW_NUMBER:
if object_has_single_value(value):
value = float(value)
else:
raise FunctionError(f"Can't convert value ({value}) with more than a single number to a raw number.")
return value
@property
def owner_name(self):
try:
return self.owner.name
except AttributeError:
return '<no owner>'
def _is_identity(self, context=None):
# should return True in subclasses if the parameters for context are such that
# the Function's output will be the same as its input
# Used to bypass execute when unnecessary
return False
@property
def _model_spec_parameter_blacklist(self):
return super()._model_spec_parameter_blacklist.union({
'multiplicative_param', 'additive_param',
})
# ***************************************** EXAMPLE FUNCTION *******************************************************
PROPENSITY = "PROPENSITY"
PERTINACITY = "PERTINACITY"
class ArgumentTherapy(Function_Base):
"""
ArgumentTherapy( \
variable, \
propensity=Manner.CONTRARIAN, \
pertinacity=10.0 \
params=None, \
owner=None, \
name=None, \
prefs=None \
)
.. _ArgumentTherapist:
Return `True` or :keyword:`False` according to the manner of the therapist.
Arguments
---------
variable : boolean or statement that resolves to one : default class_defaults.variable
assertion for which a therapeutic response will be offered.
propensity : Manner value : default Manner.CONTRARIAN
specifies preferred therapeutic manner
pertinacity : float : default 10.0
specifies therapeutic consistency
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
owner : Component
`component <Component>` to which to assign the Function.
name : str : default see `name <Function.name>`
specifies the name of the Function.
prefs : PreferenceSet or specification dict : default Function.classPreferences
specifies the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details).
Attributes
----------
variable : boolean
assertion to which a therapeutic response is made.
propensity : Manner value : default Manner.CONTRARIAN
determines therapeutic manner: tendency to agree or disagree.
pertinacity : float : default 10.0
determines consistency with which the manner complies with the propensity.
owner : Component
`component <Component>` to which the Function has been assigned.
name : str
the name of the Function; if it is not specified in the **name** argument of the constructor, a default is
assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names).
prefs : PreferenceSet or specification dict : Function.classPreferences
the `PreferenceSet` for function; if it is not specified in the **prefs** argument of the Function's
constructor, a default is assigned using `classPreferences` defined in __init__.py (see `Preferences`
for details).
"""
# Function componentName and type (defined at top of module)
componentName = ARGUMENT_THERAPY_FUNCTION
componentType = EXAMPLE_FUNCTION_TYPE
classPreferences = {
PREFERENCE_SET_NAME: 'ExampleClassPreferences',
REPORT_OUTPUT_PREF: PreferenceEntry(False, PreferenceLevel.INSTANCE),
}
# Mode indicators
class Manner(Enum):
OBSEQUIOUS = 0
CONTRARIAN = 1
# Parameter class defaults
# These are used both to type-cast the params, and as defaults if none are assigned
# in the initialization call or later (using either _instantiate_defaults or during a function call)
def __init__(self,
default_variable=None,
propensity=10.0,
pertincacity=Manner.CONTRARIAN,
params=None,
owner=None,
prefs: tc.optional(is_pref_set) = None):
super().__init__(
default_variable=default_variable,
propensity=propensity,
pertinacity=pertincacity,
params=params,
owner=owner,
prefs=prefs,
)
def _validate_variable(self, variable, context=None):
"""Validates variable and returns validated value
This overrides the class method, to perform more detailed type checking
See explanation in class method.
Note: this method (or the class version) is called only if the parameter_validation attribute is `True`
:param variable: (anything but a dict) - variable to be validated:
:param context: (str)
:return variable: - validated
"""
if type(variable) == type(self.class_defaults.variable) or \
(isinstance(variable, numbers.Number) and isinstance(self.class_defaults.variable, numbers.Number)):
return variable
else:
raise FunctionError(f"Variable must be {type(self.class_defaults.variable)}.")
def _validate_params(self, request_set, target_set=None, context=None):
"""Validates variable and /or params and assigns to targets
This overrides the class method, to perform more detailed type checking
See explanation in class method.
Note: this method (or the class version) is called only if the parameter_validation attribute is `True`
:param request_set: (dict) - params to be validated
:param target_set: (dict) - destination of validated params
:return none:
"""
message = ""
# Check params
for param_name, param_value in request_set.items():
if param_name == PROPENSITY:
if isinstance(param_value, ArgumentTherapy.Manner):
# target_set[self.PROPENSITY] = param_value
pass # This leaves param in request_set, clear to be assigned to target_set in call to super below
else:
message = "Propensity must be of type Example.Mode"
continue
# Validate param
if param_name == PERTINACITY:
if isinstance(param_value, numbers.Number) and 0 <= param_value <= 10:
# target_set[PERTINACITY] = param_value
pass # This leaves param in request_set, clear to be assigned to target_set in call to super below
else:
message += "Pertinacity must be a number between 0 and 10"
continue
if message:
raise FunctionError(message)
super()._validate_params(request_set, target_set, context)
def _function(self,
variable=None,
context=None,
params=None,
):
"""
Returns a boolean that is (or tends to be) the same as or opposite the one passed in.
Arguments
---------
variable : boolean : default class_defaults.variable
an assertion to which a therapeutic response is made.
params : Dict[param keyword: param value] : default None
a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the
function. Values specified for parameters in the dictionary override any assigned to those parameters in
arguments of the constructor.
Returns
-------
therapeutic response : boolean
"""
# Compute the function
statement = variable
propensity = self._get_current_parameter_value(PROPENSITY, context)
pertinacity = self._get_current_parameter_value(PERTINACITY, context)
whim = np.random.randint(-10, 10)
if propensity == self.Manner.OBSEQUIOUS:
value = whim < pertinacity
elif propensity == self.Manner.CONTRARIAN:
value = whim > pertinacity
else:
raise FunctionError("This should not happen if parameter_validation == True; check its value")
return self.convert_output_type(value)
kwEVCAuxFunction = "EVC AUXILIARY FUNCTION"
kwEVCAuxFunctionType = "EVC AUXILIARY FUNCTION TYPE"
kwValueFunction = "EVC VALUE FUNCTION"
CONTROL_SIGNAL_GRID_SEARCH_FUNCTION = "EVC CONTROL SIGNAL GRID SEARCH FUNCTION"
CONTROLLER = 'controller'
class EVCAuxiliaryFunction(Function_Base):
"""Base class for EVC auxiliary functions
"""
componentType = kwEVCAuxFunctionType
class Parameters(Function_Base.Parameters):
"""
Attributes
----------
variable
see `variable <Function_Base.variable>`
:default value: numpy.array([0])
:type: numpy.ndarray
:read only: True
"""
variable = Parameter(None, pnl_internal=True, constructor_argument='default_variable')
classPreferences = {
PREFERENCE_SET_NAME: 'ValueFunctionCustomClassPreferences',
REPORT_OUTPUT_PREF: PreferenceEntry(False, PreferenceLevel.INSTANCE),
}
@tc.typecheck
def __init__(self,
function,
variable=None,
params=None,
owner=None,
prefs:is_pref_set=None,
context=None):
self.aux_function = function
super().__init__(default_variable=variable,
params=params,
owner=owner,
prefs=prefs,
context=context,
function=function,
)
def get_matrix(specification, rows=1, cols=1, context=None):
"""Returns matrix conforming to specification with dimensions = rows x cols or None
Specification can be a matrix keyword, filler value or np.ndarray
Specification (validated in _validate_params):
+ single number (used to fill self.matrix)
+ matrix keyword:
+ AUTO_ASSIGN_MATRIX: IDENTITY_MATRIX if it is square, othwerwise FULL_CONNECTIVITY_MATRIX
+ IDENTITY_MATRIX: 1's on diagonal, 0's elsewhere (must be square matrix), otherwise generates error
+ HOLLOW_MATRIX: 0's on diagonal, 1's elsewhere (must be square matrix), otherwise generates error
+ INVERSE_HOLLOW_MATRIX: 0's on diagonal, -1's elsewhere (must be square matrix), otherwise generates error
+ FULL_CONNECTIVITY_MATRIX: all 1's
+ RANDOM_CONNECTIVITY_MATRIX (random floats uniformly distributed between 0 and 1)
+ 2D list or np.ndarray of numbers
Returns 2D array with length=rows in dim 0 and length=cols in dim 1, or none if specification is not recognized
"""
# Matrix provided (and validated in _validate_params); convert to array
if isinstance(specification, (list, np.matrix)):
return convert_to_np_array(specification)
if isinstance(specification, np.ndarray):
if specification.ndim == 2:
return specification
# FIX: MAKE THIS AN np.array WITH THE SAME DIMENSIONS??
elif specification.ndim < 2:
return np.atleast_2d(specification)
else:
raise FunctionError("Specification of np.array for matrix ({}) is more than 2d".
format(specification))
if specification == AUTO_ASSIGN_MATRIX:
if rows == cols:
specification = IDENTITY_MATRIX
else:
specification = FULL_CONNECTIVITY_MATRIX
if specification == FULL_CONNECTIVITY_MATRIX:
return np.full((rows, cols), 1.0)
if specification == IDENTITY_MATRIX:
if rows != cols:
raise FunctionError("Sender length ({}) must equal receiver length ({}) to use {}".
format(rows, cols, specification))
return np.identity(rows)
if specification == HOLLOW_MATRIX:
if rows != cols:
raise FunctionError("Sender length ({}) must equal receiver length ({}) to use {}".
format(rows, cols, specification))
return 1 - np.identity(rows)
if specification == INVERSE_HOLLOW_MATRIX:
if rows != cols:
raise FunctionError("Sender length ({}) must equal receiver length ({}) to use {}".
format(rows, cols, specification))
return (1 - np.identity(rows)) * -1
if specification == RANDOM_CONNECTIVITY_MATRIX:
return np.random.rand(rows, cols)
# Function is specified, so assume it uses random.rand() and call with sender_len and receiver_len
if isinstance(specification, types.FunctionType):
return specification(rows, cols)
# (7/12/17 CW) this is a PATCH (like the one in MappingProjection) to allow users to
# specify 'matrix' as a string (e.g. r = RecurrentTransferMechanism(matrix='1 2; 3 4'))
if type(specification) == str:
try:
return np.array(np.matrix(specification))
except (ValueError, NameError, TypeError):
# np.matrix(specification) will give ValueError if specification is a bad value (e.g. 'abc', '1; 1 2')
# [JDC] actually gives NameError if specification is a string (e.g., 'abc')
pass
# Specification not recognized
return None
```
#### File: modulatory/learning/learningmechanism.py
```python
import numpy as np
import typecheck as tc
import warnings
from enum import Enum
from psyneulink.core.components.component import parameter_keywords
from psyneulink.core.components.functions.nonstateful.learningfunctions import BackPropagation
from psyneulink.core.components.mechanisms.modulatory.modulatorymechanism import ModulatoryMechanism_Base
from psyneulink.core.components.mechanisms.mechanism import Mechanism_Base
from psyneulink.core.components.mechanisms.processing.objectivemechanism import ObjectiveMechanism
from psyneulink.core.components.shellclasses import Mechanism
from psyneulink.core.components.ports.modulatorysignals.learningsignal import LearningSignal
from psyneulink.core.components.ports.parameterport import ParameterPort
from psyneulink.core.globals.context import ContextFlags, handle_external_context
from psyneulink.core.globals.keywords import \
ADDITIVE, AFTER, ASSERT, ENABLED, INPUT_PORTS, \
LEARNED_PARAM, LEARNING, LEARNING_MECHANISM, LEARNING_PROJECTION, LEARNING_SIGNAL, LEARNING_SIGNALS, \
MATRIX, NAME, ONLINE, OUTPUT_PORT, OWNER_VALUE, PARAMS, PROJECTIONS, SAMPLE, PORT_TYPE, VARIABLE
from psyneulink.core.globals.parameters import FunctionParameter, Parameter
from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set
from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel
from psyneulink.core.globals.utilities import ContentAddressableList, convert_to_np_array, is_numeric, parameter_spec, convert_to_list
__all__ = [
'ACTIVATION_INPUT', 'ACTIVATION_INPUT_INDEX', 'ACTIVATION_OUTPUT', 'ACTIVATION_OUTPUT_INDEX',
'DefaultTrainingMechanism', 'ERROR_SIGNAL', 'ERROR_SIGNAL_INDEX', 'ERROR_SOURCES',
'LearningMechanism', 'LearningMechanismError', 'input_port_names', 'output_port_names'
]
def _is_learning_spec(spec, include_matrix_spec=True):
"""Evaluate whether spec is a valid learning specification
Return `True` if spec is LEARNING or a valid projection_spec (see Projection_Base._is_projection_spec)
Otherwise, return `False`
"""
# MODIFIED 11/28/17 OLD:
from psyneulink.core.components.projections.projection import _is_projection_spec
try:
if spec in {LEARNING, ENABLED}:
return True
else:
return _is_projection_spec(spec=spec,
type=LEARNING_PROJECTION,
include_matrix_spec=include_matrix_spec)
except:
return False
class LearningType(Enum):
"""
Denotes whether LearningMechanism requires a target input.
Attributes
----------
UNSUPERVISED
implements (and requires a Projection to) a *ERROR_SIGNAL* InputPort.
SUPERVISED
does not implement a *ERROR_SIGNAL* InputPort.
"""
UNSUPERVISED = 0
SUPERVISED = 1
class LearningTiming(Enum):
"""
Denotes
Attributes
----------
EXECUTION_PHASE
LearningMechanism (and associated `LearningProjections(s) <LearningProjection>`) executed during the
`execution phase <System_Execution>` of the System to which they belong, usually immediately after execution of
the `Mechanism <Mechanism>` that receives the `primary_learned_projection`
LEARNING_PHASE
LearningMechanism (and associated `LearningProjections(s) <LearningProjection>`) executed during the
`learning phase <System_Execution>` of the System to which they belong.
"""
EXECUTION_PHASE = 0
LEARNING_PHASE = 1
# Parameters:
parameter_keywords.update({LEARNING_PROJECTION, LEARNING})
LEARNING_TYPE = 'learning_type'
LEARNING_TIMING = 'learning_timing'
# Used to index variable:
ACTIVATION_INPUT_INDEX = 0
ACTIVATION_OUTPUT_INDEX = 1
ERROR_SIGNAL_INDEX = 2
# Used to name input_ports and output_ports:
ACTIVATION_INPUT = 'activation_input' # InputPort
ACTIVATION_OUTPUT = 'activation_output' # InputPort
ERROR_SIGNAL = 'error_signal'
input_port_names = [ACTIVATION_INPUT, ACTIVATION_OUTPUT, ERROR_SIGNAL]
output_port_names = [LEARNING_SIGNAL, ERROR_SIGNAL]
ERROR_SOURCES = 'error_sources'
DefaultTrainingMechanism = ObjectiveMechanism
class LearningMechanismError(Exception):
def __init__(self, error_value):
self.error_value = error_value
def __str__(self):
return repr(self.error_value)
def _learning_signal_getter(owning_component=None, context=None):
try:
return owning_component.parameters.value._get(context)[0]
except (TypeError, IndexError):
return None
def _error_signal_getter(owning_component=None, context=None):
try:
return owning_component.parameters.value._get(context)[1]
except (TypeError, IndexError):
return None
class LearningMechanism(ModulatoryMechanism_Base):
"""
LearningMechanism( \
variable, \
error_sources, \
function=BackPropagation, \
learning_rate=None, \
learning_signals=LEARNING_SIGNAL, \
modulation=ADDITIVE, \
learning_enabled=True)
Subclass of ModulatoryMechanism that modifies the `matrix <MappingProjection.matrix>` parameter of a
`MappingProjection`. See `Mechanism <Mechanism_Class_Reference>` for additional arguments and attributes.
COMMENT:
Description:
LearningMechanism is a subtype of the ModulatoryMechanism Type of the Mechanism Category of Component
It implements a Mechanism that calculates changes to a Projection's parameters.
Its function takes the output of an ObjectiveMechanism and generates a
learning_signal (ndarray of parameter changes) to be used by the recipient of a LearningProjection
that projects from the LearningMechanism to a MappingProjection.
# DOCUMENT: ??NOT SURE WHETHER THIS IS STILL RELEVANT
# IF objective_mechanism IS None, IT IS LEFT UNSPECIFIED (FOR FURTHER IMPLEMENTATION BY COMPOSITION)
# THESE ARE HANDLED BY A MODULE METHOD _instantiate_objective_mechanism (AS PER OBJECTIVE MECHANISM):
# IF objective_mechanism IS SPECIFIED AS ObjectiveMechanism, AN OBJECTIVE MECHANISM IS CREATED FOR IT
# IF objective_mechanism IS SPECIFIED AS A MECHANISM OR OutputPort,
# a MappingProjection WITH AN IDENTITY MATRIX IS IMPLEMENTED FROM IT TO THE LearningMechanism
Learning function:
Generalized delta rule:
dE/dW = learning_rate * dE/dA * dA/dW * I
weight = weight + (learning_rate * error_derivative * activation_derivative * input)
for sumSquared error fct = (target - output)
for logistic activation fct = output * (1-output)
where:
output = activity of output (target) units (higher layer)
input = activity of sending units (lower layer)
Needs:
- activation_derivative: get from FUNCTION of sample_activation_mechanism/receiver_mech
assumes derivative of Logistic unless otherwise specified
- error_derivative: get from FUNCTION of error_sources/next_level_mech; but handled in ObjectiveMechanism
COMMENT
Arguments
---------
variable : List or 2d np.array
it must have three items that correspond to the three values required by the LearningMechanism's `function
<LearningMechanism.function>`; they must each be compatible (in number and type) with the `value
<InputPort.value>` of the corresponding `InputPort <LearningMechanism_InputPorts>` (see `variable
<LearningMechanism.variable>` for additional details).
error_sources : ComparatorMechanism, LearningMechanism, OutputPort or list of them
specifies the source(s) of the error signal(s) used by the LearningMechanism's `function
<LearningMechanism.function>`. Each must be a `ComparatorMechanism` for `single layer learning
<LearningMechanism_Single_Layer_Learning>`, or for the last `MappingProjection` in a learning pathway in
`multilayer learning <LearningMechanism_Multilayer_Learning>`; otherwise they must be a `LearningMechanism`
or the *ERROR_SIGNAL* OutputPort of one.
function : LearningFunction or function : default BackPropagation
specifies the function used to calculate the LearningMechanism's `learning_signal
<LearningMechanism.learning_signal>` and `error_signal <LearningMechanism.error_signal>` attributes. It's
`variable <Function_Base.variable>` must have three items, each of which must be a list or 1d array of
numeric values, corresponding to values provided by the LearningMechanism's *ACTIVATION_INPUT*,
*ACTIVATION_OUTPUT*, and *ERROR_SOURCES* InputPorts, respectively (see `LearningMechanism_InputPorts
`LearningMechanism_Function` and `LearningMechanism_InputPorts` for additional details).
learning_rate : float : default None
specifies the learning rate for the LearningMechanism (see `learning_rate <LearningMechanism.learning_rate>`
for details).
learning_signals : List[parameter of Projection, ParameterPort, Projection, tuple[str, Projection] or dict] :
default *LEARNING_SIGNAL*
specifies the parameter(s) to be learned (see `learning_signals <LearningMechanism.learning_signals>` for
details).
modulation : str : default ADDITIVE
specifies the default form of modulation used by the LearningMechanism's LearningSignals,
unless they are `individually specified <LearningSignal_Specification>`.
learning_enabled : bool or Enum[ONLINE|AFTER] : True
specifies whether and when the LearningMechanism's `LearningProjections <LearningProjection>` are executed
(see `learning_enabled <LearningMechanism.learning_enabled>` for additional details).
Attributes
----------
COMMENT:
componentType : LEARNING_MECHANISM
COMMENT
variable : 2d np.array
has three items that serve as the template for the three inputs required by the LearningMechanism's `function
<LearningMechanism.function>` (corresponding to its three `InputPorts <LearningMechanism_InputPorts>`:
the input to the `primary_learned_projection` (from `input_source`), the output of the Mechanism to which
that projects (i.e., of `output_source`); and the error signal (from `LearningMechanism.error_sources`).
input_ports : ContentAddressableList[OutputPort]
list containing the LearningMechanism's three `InputPorts <LearningMechanism_InputPorts>`:
*ACTIVATION_INPUT*, *ACTIVATION_OUTPUT*, and *ERROR_SIGNAL*.
error_signal_input_ports : list[InputPorts]
list of InputPorts that receive error_signals from the LearningMechanism's `error_sources
<LearningMechanism.error_sources>`.
input_source : ProcessingMechanism
the Mechanism that sends the `primary_learned_projection`, and projects to the
LearningMechanism's *ACTIVATION_INPUT* `InputPort <LearningMechanism_Activation_Input>`.
output_source : ProcessingMechanism
the Mechanism that receives the `primary_learned_projection`, and projects to the
LearningMechanism's *ACTIVATION_OUTPUT* `InputPort <LearningMechanism_Activation_Output>`.
error_sources : list[ComparatorMechanism or LearningMechanism]
the Mechanism(s) that calculate the error signal(s) provided to the
LearningMechanism's *ERROR_SIGNAL(s)* `InputPort(s) <LearningMechanism_Input_Error_Signal>`.
error_matrices : list[ParameterPort]
the matrices of the Projections associated with the `error_sources <LearningMechanism.error_sources>`,
(i.e., for the next Projection(s) in the learning_sequence, or to the `ComparatorMechanism`);
note: these are *not* for the LearningMechanism's `learned_projections <LearningMechanism.learned_projections>`.
primary_learned_projection : MappingProjection
the Projection with the `matrix <MappingProjection.matrix>` parameter used to generate the
LearningMechanism's `error_signal <LearningMechanism.error_signal>` and `learning_signal
<LearningMechanism.learning_signal>` attributes. It is always the first Projection listed in the
LearningMechanism's `learned_projections <LearningMechanism.learned_projections>` attribute.
learned_projections : List[MappingProjection]
all of the MappingProjections modified by the LearningMechanism; the first item in the list is always the
`primary_learned_projection <LearningMechanism.primary_learned_projection>`.
function : LearningFunction or function : default BackPropagation
specifies the function used to calculate the `learning_signal <LearningMechanism.learning_signal>` (assigned
to the LearningMechanism's `LearningSignal(s) <LearningMechanism_LearningSignal>`), and the `error_signal
<LearningMechanism.error_signal>` (passed to the LearningMechanism for the preceding `MappingProjection` in a
`multilayer learning pathway <LearningMechanism_Multilayer_Learning>`). It takes the following
three arguments, each of which must be a list or 1d array: **input**, **output**, and **error** (see
`LearningMechanism_Function` for additional details).
learning_rate : float : None
determines the learning rate for the LearningMechanism. It is used to specify the :keyword:`learning_rate`
parameter for the LearningMechanism's `learning function <LearningMechanism.function>`
(see description of `learning_rate <LearningMechanism_Learning_Rate>` for additional details).
error_signal : 1d np.array
one of two values returned by the LearningMechanism's `function <LearningMechanism.function>`. For
`single layer learning <LearningMechanism_Single_Layer_Learning>`, this is the same as the value received in
the LearningMechanism's *ERROR_SIGNAL* `InputPort <LearningMechanism_Input_Error_Signal>`; for `multilayer
learning <LearningMechanism_Multilayer_Learning>`, it is a modified version of the value received, that takes
account of the contribution made by the learned_projection and its input to the error signal received. This
is assigned as the `value <OutputPort.value>` of the LearningMechanism's *ERROR_SIGNAL* `OutputPort
<LearningMechanism_Output_Error_Signal>`.
learning_signal : number, ndarray or matrix
one of two values returned by the LearningMechanism's `function <LearningMechanism.function>`, that specifies
the changes to the weights of the `matrix <MappingProjection.matrix>` parameter for the LearningMechanism's
`learned_projections <LearningMechanism.learned_projections>`; it is calculated to reduce the error signal
associated with the `primary_learned_projection <LearningMechanism.primary_learned_projection>` and received
from the LearningMechanism's `error_sources`. It is assigned as the value of the LearningMechanism's
`LearningSignal(s) <LearningMechanism_LearningSignal>` and, in turn, its LearningProjection(s).
learning_signals : ContentAddressableList[LearningSignal]
list of all of the `LearningSignals <LearningSignal>` for the LearningMechanism, each of which sends one or
more `LearningProjections <LearningProjection>` to the `ParameterPort(s) <ParameterPort>` for the `matrix
<MappingProjection.matrix>` parameter of the `MappingProjection(s) <MappingProjection>` trained by the
LearningMechanism. The `value <LearningSignal>` of each LearningSignal is the LearningMechanism's
`learning_signal <LearningMechanism.learning_signal>` attribute. Since LearningSignals are `OutputPorts
<OutputPort>`, they are also listed in the LearningMechanism's `output_ports
<LearningMechanism.output_ports>` attribute, after it *ERROR_SIGNAL* `OutputPort
<LearningMechanism_Output_Error_Signal>`.
learning_projections : List[LearningProjection]
list of all of the LearningProjections <LearningProject>` from the LearningMechanism, listed in the order of
the `LearningSignals <LearningSignal>` to which they belong (that is, in the order they are listed in
the `learning_signals <LearningMechanism>` attribute).
learning_enabled : bool or Enum[ONLINE|AFTER]
determines whether and when the `learning_projections <LearningMechanism.learning_projections>` are executed.
If set to False, they are never updated; however, the LearningMechanism is still executed in any `Composition`
to which it belongs, so that the error signals it calculates can be passed to any other LearningMechanism(s)
to which it projects (see `LearningMechanism_Multilayer_Learning`). If set to True or `ONLINE`,
`learning_projections <LearningMechanism.learning_projections>` are updated when the LearningMechanism
executes. If set to `AFTER`, `learning_projections <LearningMechanism.learning_projections>` are updated at the
end of each `TRIAL <TimeScale.TRIAL>` of execution of the Composition to which the LearningMechanism belongs.
.. note::
the `learning_enabled <LearningMechanism.learning_enabled>` attribute of a LearningMechanism determines the
default behavior of its `learning_projections <LearningMechanism.learning_projections>`. However, this
can be overridden for individual `LearningProjections <LearningProjection>` by assigning their
`learning_enabled <LearningProjection.learning_enabled>` attributes either at or after construction.
output_ports : ContentAddressableList[OutputPort]
list of the LearningMechanism's `OutputPorts <OutputPort>`, including its *ERROR_SIGNAL* `OutputPort
<LearningMechanism_Output_Error_Signal>`, followed by its `LearningSignal(s)
<LearningMechanism_LearningSignal>`, and then any additional (user-specified) `OutputPorts <OutputPort>`.
COMMENT:
# FIX: THIS MAY NEED TO BE A 3d array (TO ACCOMDATE 2d array (MATRICES) AS ENTRIES)\
COMMENT
output_values : 2d np.array
the first item is the `value <OutputPort.value>` of the LearningMechanism's *ERROR_SIGNAL* `OutputPort
<LearningMechanism_Output_Error_Signal>`, followed by the `value <LearningSignal.value>` \\(s) of its
`LearningSignal(s) <LearningMechanism_LearningSignal>`, and then those of any additional (user-specified)
`OutputPorts <OutputPort>`.
modulation : str
the default form of modulation used by the LearningMechanism's `LearningSignal(s)
<LearningMechanism_LearningSignal>`, unless they are `individually specified <LearningSignal_Specification>`.
"""
componentType = LEARNING_MECHANISM
className = componentType
suffix = " " + className
outputPortTypes = LearningSignal
portListAttr = Mechanism_Base.portListAttr.copy()
portListAttr.update({LearningSignal:LEARNING_SIGNALS})
classPreferenceLevel = PreferenceLevel.TYPE
class Parameters(ModulatoryMechanism_Base.Parameters):
"""
Attributes
----------
error_matrix
see `error_matrix <LearningMechanism.error_matrix>`
:default value: None
:type:
error_signal
see `error_signal <LearningMechanism_Error_Signal>`
:default value: None
:type:
:read only: True
function
see `function <LearningMechanism_Function>`
:default value: `BackPropagation`
:type: `Function`
input_ports
see `input_ports <LearningMechanism.input_ports>`
:default value: [`ACTIVATION_INPUT`, `ACTIVATION_OUTPUT`, `ERROR_SIGNAL`]
:type: ``list``
:read only: True
learning_enabled
see `learning_enabled <LearningMechanism.learning_enabled>`
:default value: True
:type: ``bool``
learning_rate
see `learning_rate <LearningMechanism_Learning_Rate>`
:default value: None
:type:
learning_signal
see `learning_signal <LearningMechanism_Learning_Signal>`
:default value: None
:type:
:read only: True
learning_signals
see `learning_signals <LearningMechanism_Learning_Signals>`
:default value: ["{name: LearningSignal, variable: (OWNER_VALUE, 0)}"]
:type: ``list``
:read only: True
modulation
see `modulation <LearningMechanism.modulation>`
:default value: `ADDITIVE_PARAM`
:type: ``str``
output_ports
see `output_ports <LearningMechanism.output_ports>`
:default value: ["{name: error_signal, port_type: OutputPort, variable: (OWNER_VALUE, 1)}"]
:type: ``list``
:read only: True
"""
function = Parameter(BackPropagation, stateful=False, loggable=False)
error_matrix = Parameter(None, modulable=True)
learning_signal = Parameter(None, read_only=True, getter=_learning_signal_getter)
error_signal = Parameter(None, read_only=True, getter=_error_signal_getter)
learning_rate = FunctionParameter(None)
learning_enabled = True
modulation = ADDITIVE
input_ports = Parameter(
[ACTIVATION_INPUT, ACTIVATION_OUTPUT, ERROR_SIGNAL],
stateful=False,
loggable=False,
read_only=True,
structural=True,
parse_spec=True,
)
output_ports = Parameter(
[
{
NAME: ERROR_SIGNAL,
PORT_TYPE: OUTPUT_PORT,
VARIABLE: (OWNER_VALUE, 1)
},
],
stateful=False,
loggable=False,
read_only=True,
structural=True,
)
learning_signals = Parameter(
[
{
NAME: LEARNING_SIGNAL,
VARIABLE: (OWNER_VALUE, 0)
}
],
stateful=False,
loggable=False,
read_only=True,
structural=True,
)
@tc.typecheck
def __init__(self,
# default_variable:tc.any(list, np.ndarray),
default_variable=None,
size=None,
error_sources:tc.optional(tc.any(Mechanism, list))=None,
function=None,
learning_signals:tc.optional(tc.optional(list)) = None,
output_ports=None,
modulation:tc.optional(str)=None,
learning_rate:tc.optional(parameter_spec)=None,
learning_enabled:tc.optional(tc.any(bool, tc.enum(ONLINE, AFTER)))=None,
in_composition=False,
params=None,
name=None,
prefs:is_pref_set=None,
**kwargs
):
# IMPLEMENTATION NOTE:
# assign to private attribute as self.error_sources is used as a property
# private attribute is used for validation and in _instantiate_attribute_before_function;
# thereafter, self.error_sources contains actual error_sources
if error_sources:
error_sources = convert_to_list(error_sources)
self._error_sources = error_sources
self.in_composition = in_composition
# # USE FOR IMPLEMENTATION OF deferred_init()
# # Store args for deferred initialization
# self._init_args = locals().copy()
# self._init_args['context'] = self
# self._init_args['name'] = name
# delete self._init_args[ERROR_SOURCES]
# # Flag for deferred initialization
# self.initialization_status = ContextFlags.DEFERRED_INIT
super().__init__(
default_variable=default_variable,
size=size,
modulation=modulation,
function=function,
params=params,
name=name,
prefs=prefs,
learning_enabled=learning_enabled,
learning_signals=learning_signals,
learning_rate=learning_rate,
output_ports=output_ports,
**kwargs
)
def _check_type_and_timing(self):
try:
self.learning_type
except:
raise LearningMechanismError("{} subclass of {} must implement {} attribute".
format(self.__class__.__name__, LearningMechanism.__name__,
repr(LEARNING_TYPE)))
try:
self.learning_timing
except:
raise LearningMechanismError("{} subclass of {} must implement {} attribute".
format(self.__class__.__name__, LearningMechanism.__name__,
repr(LEARNING_TIMING)))
def _parse_function_variable(self, variable, context=None):
function_variable = np.zeros_like(
variable[np.array([ACTIVATION_INPUT_INDEX, ACTIVATION_OUTPUT_INDEX, ERROR_SIGNAL_INDEX])]
)
function_variable[ACTIVATION_INPUT_INDEX] = variable[ACTIVATION_INPUT_INDEX]
function_variable[ACTIVATION_OUTPUT_INDEX] = variable[ACTIVATION_OUTPUT_INDEX]
function_variable[ERROR_SIGNAL_INDEX] = variable[ERROR_SIGNAL_INDEX]
return function_variable
def _validate_variable(self, variable, context=None):
"""Validate that variable has exactly three items: activation_input, activation_output and error_signal
"""
variable = super()._validate_variable(variable, context)
if len(variable) < 3:
raise LearningMechanismError("Variable for {} ({}) must have at least three items ({}, {}, and {}{})".
format(self.name, variable,
ACTIVATION_INPUT,
ACTIVATION_OUTPUT,
ERROR_SIGNAL,"(s)"))
# Validate that activation_input, activation_output are numeric and lists or 1d np.ndarrays
# and that there is the correct number of error_signal_input_ports and and error_matrices:
# (which should be the number of items for error_signals in variable)
assert ASSERT, "ADD TEST FOR LEN OF VARIABLE AGAINST NUMBER OF ERROR_SIGNALS AND ERROR_MATRICES"
for i in range(len(variable)):
item_num_string = "Item {} ".format(i)
try:
item_name = self.input_ports.names[i]
except:
try:
item_name = input_port_names[i]
except IndexError:
item_name = f'{ERROR_SIGNAL}-{i-2}'
if not np.array(variable[i]).ndim == 1:
raise LearningMechanismError(f"{item_num_string} of variable for {self.name} ({item_name}:{variable[i]}) "
f"is not a list or 1d np.array.")
if not (is_numeric(variable[i])):
raise LearningMechanismError("{} of variable for {} ({}:{}) is not numeric".
format(item_num_string, self.name, item_name, variable[i]))
return variable
def _validate_params(self, request_set, target_set=None, context=None):
"""Validate error_sources
`error_sources` argument must be an `ObjectiveMechanism`, another `LearningMechanism`, an *ERROR_SIGNAL*
OutputPort of a LearningMechanism, or a list of these, and there must be the same number as there are
ERROR_SIGNAL InputPorts.
"""
super()._validate_params(request_set=request_set, target_set=target_set,context=context)
from psyneulink.core.components.ports.port import _parse_port_spec
from psyneulink.core.components.ports.outputport import OutputPort
from psyneulink.core.components.ports.modulatorysignals.learningsignal import LearningSignal
from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection
from psyneulink.core.components.projections.projection import _validate_receiver
if self._error_sources:
error_sources = self._error_sources
if not isinstance(error_sources, list):
error_sources = [error_sources]
if not len(error_sources) == len(self.defaults.variable[ERROR_SIGNAL_INDEX:]):
raise LearningMechanismError(f"Number of items specified in {repr(ERROR_SOURCES)} arg "
f"for {self.name} ({len(error_sources)}) must equal the number "
f"of its {InputPort.__name__} {ERROR_SIGNAL.upper()}s "
f"({len(self.error_signal_input_ports)}).")
for error_source in error_sources:
if (not isinstance(error_source, (ObjectiveMechanism, LearningMechanism, OutputPort))
or (isinstance(error_source, OutputPort)
and error_source not in error_source.owner.output_ports[ERROR_SIGNAL])):
raise LearningMechanismError(f"{repr(ERROR_SOURCES)} arg for {self.name} ({error_source}) "
f"must be an {ObjectiveMechanism.__name__}, "
f"another {LearningMechanism.__name__}, an {repr(ERROR_SIGNAL)} "
f"{OutputPort.__name__} of one, or list of any of these.")
if LEARNING_SIGNALS in target_set and target_set[LEARNING_SIGNALS]:
if not isinstance(target_set[LEARNING_SIGNALS], list):
raise LearningMechanismError("{} arg of {} must be list".
format(LEARNING_SIGNAL, self.name))
for spec in target_set[LEARNING_SIGNALS]:
learning_signal = _parse_port_spec(port_type=LearningSignal, owner=self, port_spec=spec)
# Validate that the receiver of the LearningProjection (if specified)
# is a MappingProjection and in the same System as self (if specified)
if learning_signal[PARAMS] and PROJECTIONS in learning_signal[PARAMS]:
for learning_projection in learning_signal[PARAMS][PROJECTIONS]:
_validate_receiver(sender_mech=self,
projection=learning_projection,
expected_owner_type=MappingProjection,
spec_type=LEARNING_SIGNAL,
context=context)
else:
pass
def _instantiate_attributes_before_function(self, function=None, context=None):
"""Instantiates MappingProjection(s) from error_sources (if specified) to LearningMechanism
Also determines and assigns `error_matrices` from the `error_sources`, identified as the matrix for the
Projection with which each error_source is associated.
:param function:
"""
if self._error_sources:
self.parameters.input_ports._set(
self.input_ports[:2] + [ERROR_SIGNAL] * len(self._error_sources),
context
)
super()._instantiate_attributes_before_function(function=function, context=context)
self.error_matrices = None
if self._error_sources:
self.error_matrices = [None] * len(self._error_sources)
for i, error_source in enumerate(self._error_sources):
if not self.in_composition:
# IMPLEMENTATION NOTE:
# _create_terminal_backprop_sequence_components and _create_multilayer_backprop_components
# in Composition take care of creating projections from _error_sources to LearningMechanisms
warnings.warn("Instantiation of a LearningMechanism outside of a Composition is tricky!")
if isinstance(error_source, ObjectiveMechanism):
self.error_matrices[i] = np.identity(len(error_source.input_ports[SAMPLE].value))
else:
# IMPLEMENTATION NOTE:
# This assumes that error_source has only one LearningSignal or,
# if it has more, that they are all equivalent
self.error_matrices[i] = error_source.primary_learned_projection.parameter_ports[MATRIX]
def _instantiate_output_ports(self, context=None):
from psyneulink.core.globals.registry import register_category
from psyneulink.core.components.ports.modulatorysignals.learningsignal import LearningSignal
from psyneulink.core.components.ports.port import Port_Base, _instantiate_port
# Create registry for LearningSignals (to manage names)
register_category(entry=LearningSignal,
base_class=Port_Base,
registry=self._portRegistry,
)
# Instantiate LearningSignals if they are specified, and assign to self.output_ports
# Notes:
# - if any LearningSignals are specified they will replace the default LEARNING_SIGNAL OutputPort
# - the LearningSignals are appended to _output_ports, leaving ERROR_SIGNAL as the first entry.
# Instantiate LearningSignals and assign to self.output_ports
for learning_signal in self.learning_signals:
# Instantiate LearningSignal
params = {LEARNED_PARAM: MATRIX}
# Parses learning_signal specifications (in call to Port._parse_port_spec)
# and any embedded Projection specifications (in call to <Port>._instantiate_projections)
learning_signal = _instantiate_port(port_type=LearningSignal,
owner=self,
variable=(OWNER_VALUE,0),
params=params,
reference_value=self.parameters.learning_signal._get(context),
modulation=self.defaults.modulation,
# port_spec=self.learning_signal)
port_spec=learning_signal,
context=context)
# Add LearningSignal to output_ports list
self.output_ports.append(learning_signal)
# Assign LEARNING_SIGNAL as the name of the 1st LearningSignal; the names of any others can be user-defined
first_learning_signal = next(port for port in self.output_ports if isinstance(port, LearningSignal))
first_learning_signal.name = LEARNING_SIGNAL
super()._instantiate_output_ports(context=context)
# Reassign learning_signals to capture any user_defined LearningSignals instantiated in call to super
# and assign them to a ContentAddressableList
self.parameters.learning_signals._set(
ContentAddressableList(
component_type=LearningSignal,
list=[port for port in self.output_ports if isinstance(port, LearningSignal)]
),
context
)
# Initialize _error_signals; this is assigned for efficiency (rather than just using the property)
# since it is used by the execute method
self._error_signal_input_ports = self.error_signal_input_ports
@handle_external_context()
def add_ports(self, error_sources, context=None):
"""Add error_source and error_matrix for each InputPort added"""
ports = super().add_ports(ports=error_sources, update_variable=False, context=context)
instantiated_input_ports = []
for input_port in ports[INPUT_PORTS]:
error_source = input_port.path_afferents[0].sender.owner
self.error_matrices.append(error_source.primary_learned_projection.parameter_ports[MATRIX])
if ERROR_SIGNAL in input_port.name:
self._error_signal_input_ports.append(input_port)
instantiated_input_ports.append(input_port)
# TODO: enable this. fails because LearningMechanism does not have a
# consistent _parse_function_variable
# self._update_default_variable(np.asarray(self.input_values, dtype=int), context)
return instantiated_input_ports
# FIX 7/28/19 [JDC]: REMOVE THIS ONCE error_input_ports HAS SETTER OR IS OTHERWISE REFACTORED
def remove_ports(self, ports):
"""Keep error_signal_input_ports and error_matrices in sych with error_signals in input_ports"""
ports = convert_to_list(ports)
for i, port in enumerate([s for s in ports if s in self.error_signal_input_ports]):
del self.error_matrices[i]
super().remove_ports(ports=ports)
self._error_signal_input_ports = [s for s in self.input_ports if ERROR_SIGNAL in s.name]
def _execute(
self,
variable=None,
context=None,
runtime_params=None,
):
"""Execute LearningMechanism function and return learning_signal
Identify error_signals received from LearningMechanisms currently being executed
Assign them, and the corresponding error_matrices to a pair of arrays
Execute function for each error_signal, error_matrix pair
Sum the learning_signal and error_signal values received from each execution
Returns
-------
List[ndarray, ndarray] : summed learning_signal, summed error_signal
"""
# Get error_signals (from ERROR_SIGNAL InputPorts) and error_matrices relevant for the current execution:
error_signal_indices = self.error_signal_indices
error_signal_inputs = variable[error_signal_indices]
# FIX 7/22/19 [JDC]: MOVE THIS TO ITS OWN METHOD CALLED ON INITALIZATION AND UPDTATED AS NECESSARY
if self.error_matrices is None:
# KAM 6/28/19 Hack to get the correct shape and contents for initial error matrix in backprop
if self.function is BackPropagation or isinstance(self.function, BackPropagation):
mat = []
for i in range(len(error_signal_inputs[0])):
row = []
for j in range(len(error_signal_inputs[0])):
if i == j:
row.append(1.)
else:
row.append(0.)
mat.append(row)
self.error_matrices = mat
error_matrices = mat
else:
self.error_matrices = [[0.]]
error_matrices = \
np.array(self.error_matrices)[np.array([c - ERROR_SIGNAL_INDEX for c in error_signal_indices])]
else:
error_matrices = \
np.array(self.error_matrices)[np.array([c - ERROR_SIGNAL_INDEX for c in error_signal_indices])]
for i, matrix in enumerate(error_matrices):
if isinstance(error_matrices[i], ParameterPort):
error_matrices[i] = error_matrices[i].parameters.value._get(context)
summed_learning_signal = 0
summed_error_signal = 0
# Compute learning_signal for each error_signal (and corresponding error-Matrix):
for error_signal_input, error_matrix in zip(error_signal_inputs, error_matrices):
function_variable = convert_to_np_array(
[
variable[ACTIVATION_INPUT_INDEX],
variable[ACTIVATION_OUTPUT_INDEX],
error_signal_input
]
)
learning_signal, error_signal = super()._execute(variable=function_variable,
# MODIFIED CROSS_PATHWAYS 7/22/19 END
context=context,
error_matrix=error_matrix,
runtime_params=runtime_params,
)
# Sum learning_signals and error_signals
summed_learning_signal += learning_signal
summed_error_signal += error_signal
from psyneulink.core.compositions.report import ReportOutput
if (self.reportOutputPref is not ReportOutput.OFF
and self.initialization_status != ContextFlags.INITIALIZING):
print("\n{} weight change matrix: \n{}\n".format(self.name, summed_learning_signal))
# Durning initialization return zeros so that the first "real" trial for Backprop does not start
# with the error computed during initialization
if (self.in_composition and
isinstance(self.function, BackPropagation) and
self.initialization_status == ContextFlags.INITIALIZING):
return [0 * summed_learning_signal, 0 * summed_error_signal]
return [summed_learning_signal, summed_error_signal]
# @property
# def learning_enabled(self):
# try:
# return self._learning_enabled
# except AttributeError:
# self._learning_enabled = True
# return self._learning_enabled
#
# @learning_enabled.setter
# def learning_enabled(self, assignment:tc.any(bool, tc.enum(ONLINE, AFTER))):
# self._learning_enabled = assignment
@property
def input_source(self):
try:
return self.input_ports[ACTIVATION_INPUT].path_afferents[0].sender.owner
except IndexError:
return None
@property
def output_source(self):
try:
return self.input_ports[ACTIVATION_OUTPUT].path_afferents[0].sender.owner
except IndexError:
return None
# FIX 7/28/19 [JDC]: PROPERLY MANAGE BACKGING FIELD
# (?WITH SETTER, AND LINKED TO INPUT_PORTS PROPERTY?/LIST?)
@property
def error_signal_input_ports(self):
try:
# This is maintained for efficiency (since it is called by execute method)
return self._error_signal_input_ports
except AttributeError:
try:
return [s for s in self.input_ports if ERROR_SIGNAL in s.name]
except:
return [s for s in self.input_ports if ERROR_SIGNAL in s]
@property
def error_signal_indices(self):
current_error_signal_inputs = self.error_signal_input_ports
return [self.input_ports.index(s) for s in current_error_signal_inputs]
@property
def error_sources(self):
error_sources = []
for error_signal_input_port in self.error_signal_input_ports:
for error_signal_projection in error_signal_input_port.path_afferents:
error_sources.append(error_signal_projection.sender.owner)
return error_sources
@property
def primary_learned_projection(self):
return self.learned_projections[0]
@property
def learned_projections(self):
return [lp.receiver.owner for ls in self.learning_signals for lp in ls.efferents]
@property
def dependent_learning_mechanisms(self):
return [p.parameter_ports[MATRIX].mod_afferents[0].sender.owner for p in self.input_source.path_afferents
if p.has_learning_projection]
```
#### File: components/ports/parameterport.py
```python
import collections
import inspect
import operator
import types
import warnings
from copy import deepcopy
import numpy as np
import typecheck as tc
from psyneulink.core.components.component import Component, parameter_keywords
from psyneulink.core.components.functions.function import get_param_value_for_keyword
from psyneulink.core.components.ports.modulatorysignals.modulatorysignal import ModulatorySignal
from psyneulink.core.components.ports.port import PortError, Port_Base, _instantiate_port, port_type_keywords
from psyneulink.core.components.shellclasses import Mechanism, Projection, Function
from psyneulink.core.globals.context import ContextFlags
from psyneulink.core.globals.keywords import \
CONTEXT, CONTROL_PROJECTION, CONTROL_SIGNAL, CONTROL_SIGNALS, FUNCTION, FUNCTION_PARAMS, \
LEARNING_SIGNAL, LEARNING_SIGNALS, MECHANISM, NAME, PARAMETER_PORT, PARAMETER_PORT_PARAMS, PATHWAY_PROJECTION, \
PROJECTION, PROJECTIONS, PROJECTION_TYPE, REFERENCE_VALUE, SENDER, VALUE
from psyneulink.core.globals.parameters import ParameterBase, ParameterAlias, SharedParameter
from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set
from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel
from psyneulink.core.globals.utilities \
import ContentAddressableList, ReadOnlyOrderedDict, is_iterable, is_numeric, is_value_spec, iscompatible, \
is_instance_or_subclass, UtilitiesError, gen_friendly_comma_str
__all__ = [
'ParameterPort', 'ParameterPortError', 'port_type_keywords',
]
port_type_keywords = port_type_keywords.update({PARAMETER_PORT})
class ParameterPortList(ContentAddressableList):
separator = '-'
legal_key_type_strings = ContentAddressableList.legal_key_type_strings + ['Parameter']
_owner_port_suffix = 'self'
def __init__(
self,
component_type,
key=None,
list=None,
name=None,
owner=None,
**kwargs
):
# cache, Parameter keys added when creating Ports, others upon lookup
self.parameter_mapping = {}
self.owner = owner
super().__init__(component_type, key, list, name, **kwargs)
def __contains__(self, item):
try:
return super().__contains__(item)
except ParameterPortError:
return False
def __getitem__(self, key):
try:
return self.parameter_mapping[key]
except KeyError:
pass
try:
return super().__getitem__(key)
except TypeError as e:
# ContentAddressableList throws TypeError when key/index lookup fails
names = self._get_possible_port_names(key)
possible_ports = set()
for name in names:
try:
r = super().__getitem__(name)
possible_ports.add(r)
except TypeError:
pass
if len(possible_ports) == 0:
raise e from None
elif len(possible_ports) == 1:
res = next(iter(possible_ports))
else:
raise ParameterPortError(
f'Multiple ParameterPorts for {key} exist. Did you want'
f' {gen_friendly_comma_str(sorted([p.name for p in possible_ports]))}?'
) from None
except UtilitiesError as e:
# ContentAddressableList throws UtilitiesError if key is not an int
# or string. handle only Parameter key here
if not isinstance(key, ParameterBase):
raise e from None
try:
final_source = key.final_source
except AttributeError:
final_source = key
try:
res = self.parameter_mapping[final_source]
except KeyError:
try:
raise ParameterPortError(
f'No ParameterPort corresponds to {key._owner._owner}'
f'.parameters.{key.name}'
) from None
except AttributeError:
raise e from None
if res is not None:
self.parameter_mapping[key] = res
return res
def __delitem__(self, key):
main_port = self[key]
rem_mapping_keys = set()
for m, port in self.parameter_mapping.items():
if port is main_port:
rem_mapping_keys.add(m)
for m in rem_mapping_keys:
del self.parameter_mapping[m]
del self.data[self.data.index(main_port)]
def _get_possible_port_names(self, param_name):
"""
Returns:
a list of possible parameter port names to check if
*param_name* is actually an alias or alias-with-suffix
(e.g. "leak" is an alias of "integration_rate", and
"leak__integrator_function" should refer to
"integration_rate__integrator_function")
"""
unsuffixed_name = ParameterPortList._get_base_name(param_name)
if unsuffixed_name == param_name:
# all possible function-suffixed names
names = sorted(
[
p.name for p in self.owner.parameters
if is_instance_or_subclass(p.default_value, Function)
]
+ [self._owner_port_suffix]
)
# put 'function' at beginning
try:
function_index = names.index(FUNCTION)
names = (
[names[function_index]]
+ names[0:function_index]
+ names[function_index + 1:]
)
except ValueError:
pass
names = [self._get_explicit_name(param_name, name) for name in names]
else:
names = []
# try to get a Parameter that corresponds to param_name, which
# can have a "shared parameter suffix" that disambiguates which
# desired port it refers to if there are multiple
try:
param = getattr(self.owner.parameters, param_name)
except AttributeError:
try:
param = getattr(self.owner.parameters, unsuffixed_name)
except AttributeError:
return names
# if it's a shared parameter with identical name, there are no
# other aliases we need to add
try:
source_name = param.source.name
except AttributeError:
return names
if source_name != param.name:
if unsuffixed_name == param_name:
# basic alias, e.g. "leak" -> "integration_rate"
names.append(source_name)
else:
# alias with suffix, e.g. "leak__function"
# -> "integration_rate__function"
suffix = ParameterPortList._get_suffix(param_name)
names.append(
ParameterPortList._get_explicit_name(source_name, suffix)
)
if isinstance(param, ParameterAlias):
# alias to another alias or a shared parameter
# e.g. leak -> integration_rate -> rate
names.extend(self._get_possible_port_names(source_name))
else:
# e.g. integration_rate__integrator_function
# -> rate__integrator_function
names.append(
ParameterPortList._get_explicit_name(
source_name,
param.attribute_name
)
)
return names
@classmethod
def _get_explicit_name(cls, port_name, parameter_name=None):
return f'{port_name}{cls.separator}{parameter_name}'
@classmethod
def _get_base_name(cls, explicit_name):
try:
return explicit_name.split(cls.separator)[0]
except IndexError:
return explicit_name
@classmethod
def _get_suffix(cls, explicit_name):
try:
return explicit_name.split(cls.separator)[1]
except IndexError:
return ''
class ParameterPortError(Exception):
def __init__(self, error_value):
self.error_value = error_value
def __str__(self):
return repr(self.error_value)
class ParameterPort(Port_Base):
"""
ParameterPort( \
owner, \
reference_value=None \
function=LinearCombination(operation=PRODUCT), \
Subclass of `Port <Port>` that represents and possibly modifies the parameter of a `Mechanism <Mechanism>`,
`Projection <Projection>`, or its `Function`. See `Port_Class_Reference` for additional arguments and attributes.
COMMENT:
PortRegistry
-------------
All ParameterPorts are registered in PortRegistry, which maintains an entry for the subclass,
a count for all instances of it, and a dictionary of those instances
COMMENT
Arguments
---------
owner : Mechanism or MappingProjection
the `Mechanism <Mechanism>` or `MappingProjection` to which to which the ParameterPort belongs; it must be
specified or determinable from the context in which the ParameterPort is created (the initialization of a
ParameterPort cannot be `deferred <Port_Deferred_Initialization>`. The owner of a ParameterPort
for the parameter of a `function <Component.function>` should be specified as the Mechanism or Projection to
which the function belongs.
reference_value : number, list or np.ndarray
specifies the default value of the parameter for which the ParameterPort is responsible.
variable : number, list or np.ndarray
specifies the parameter's initial value and attribute value — that is, the value of the attribute of the
ParameterPort's owner or its `function <Component.function>` assigned to the parameter.
function : Function or method : default LinearCombination(operation=SUM)
specifies the function used to convert the parameter's attribute value (same as the ParameterPort's
`variable <ParameterPort.variable>`) to the ParameterPort's `value <ParameterPort.value>`.
Attributes
----------
mod_afferents : Optional[List[Projection]]
a list of the `ModulatoryProjection <ModulatoryProjection>` that project to the ParameterPort (i.e.,
for which it is a `receiver <Projection_Base.receiver>`); these can be `ControlProjection(s)
<ControlProjection>` and/or `LearningProjection(s) <LearningProjection>`, but not `GatingProjection
<GatingProjection>`. The `value <ModulatoryProjection_Base.value>` of each must match the format
(number and types of elements) of the ParameterPort's `variable <ParameterPort.variable>`.
variable : number, list or np.ndarray
the parameter's attribute value — that is, the value of the attribute of the
ParameterPort's owner or its `function <Component.function>` assigned to the parameter.
function : Function : default Linear
converts the parameter's attribute value (same as the ParameterPort's `variable <ParameterPort.variable>`)
to the ParameterPort's `value <ParameterPort.value>`, under the influence of any
`ModulatoryProjections <ModulatoryProjection>` received by the ParameterPort (and listed in its
`mod_afferents <ParameterPort.mod_afferents>` attribute. The result is assigned as the ParameterPort's
`value <ParameterPort>`.
value : number, List[number] or np.ndarray
the result returned by the ParameterPort's `function <ParameterPort.function>`, and used by the
ParameterPort's owner or its `function <Component.function>` as the value of the parameter for which the
ParmeterPort is responsible. Note that this is not necessarily the same as the parameter's attribute value
(that is, the value of the owner's attribute for the parameter), since the ParameterPort's
`function <ParameterPort.function>` may modify the latter under the influence of its
`mod_afferents <ParameterPort.mod_afferents>`.
"""
#region CLASS ATTRIBUTES
componentType = PARAMETER_PORT
paramsType = PARAMETER_PORT_PARAMS
portAttributes = Port_Base.portAttributes
connectsWith = [CONTROL_SIGNAL, LEARNING_SIGNAL]
connectsWithAttribute = [CONTROL_SIGNALS, LEARNING_SIGNALS]
projectionSocket = SENDER
modulators = [CONTROL_SIGNAL, LEARNING_SIGNAL]
canReceive = modulators
projection_type = CONTROL_PROJECTION
classPreferenceLevel = PreferenceLevel.TYPE
# Any preferences specified below will override those specified in TYPE_DEFAULT_PREFERENCES
# Note: only need to specify setting; level will be assigned to TYPE automatically
# classPreferences = {
# PREFERENCE_SET_NAME: 'ParameterPortCustomClassPreferences',
# PREFERENCE_KEYWORD<pref>: <setting>...}
#endregion
tc.typecheck
def __init__(self,
owner,
reference_value=None,
variable=None,
size=None,
function=None,
projections=None,
params=None,
name=None,
parameter_name=None,
prefs:is_pref_set=None,
**kwargs):
# If context is not COMPONENT or CONSTRUCTOR, raise exception
context = kwargs.pop(CONTEXT, None)
if context is None:
raise ParameterPortError(f"Contructor for {self.__class__.__name__} cannot be called directly"
f"(context: {context}")
# FIX: UPDATED TO INCLUDE LEARNING [CHANGE THIS TO INTEGRATOR FUNCTION??]
# # Reassign default for MATRIX param of MappingProjection
# if isinstance(owner, MappingProjection) and name is MATRIX:
# function = LinearCombination(operation=SUM)
self.reference_value = reference_value
# Validate sender (as variable) and params
# Note: pass name of Mechanism (to override assignment of componentName in super.__init__)
super(ParameterPort, self).__init__(owner,
variable=variable,
size=size,
projections=projections,
function=function,
params=params,
name=name,
prefs=prefs,
context=context)
def _validate_against_reference_value(self, reference_value):
"""Validate that value of the Port is compatible with the reference_value
reference_value is the value of the parameter to which the ParameterPort is assigned
"""
if reference_value is not None and not iscompatible(np.squeeze(reference_value), np.squeeze(self.defaults.value)):
iscompatible(np.squeeze(reference_value), np.squeeze(self.defaults.value))
name = self.name or ""
raise ParameterPortError("Value specified for {} {} of {} ({}) is not compatible "
"with its expected format ({})".
format(name, self.componentName, self.owner.name, self.defaults.value, reference_value))
def _instantiate_projections(self, projections, context=None):
"""Instantiate Projections specified in PROJECTIONS entry of params arg of Port's constructor
Disallow any PathwayProjections
Call _instantiate_projections_to_port to assign ModulatoryProjections to .mod_afferents
"""
# MODIFIED 7/8/17
# FIX: THIS SHOULD ALSO LOOK FOR OTHER FORMS OF SPECIFICATION
# FIX: OF A PathwayProjection (E.G., TARGET PORT OR MECHANISM)
from psyneulink.core.components.projections.pathway.pathwayprojection import PathwayProjection_Base
pathway_projections = [proj for proj in projections if isinstance(proj, PathwayProjection_Base)]
if pathway_projections:
pathway_proj_names = []
for proj in pathway_projections:
pathway_proj_names.append(proj.name + ' ')
raise PortError("{} not allowed for {}: {}".
format(PathwayProjection_Base.__self__.__name__,
self.__class__.__name__,
pathway_proj_names))
self._instantiate_projections_to_port(projections=projections, context=context)
def _check_for_duplicate_projections(self, projection):
"""Check if projection is redundant with one in mod_afferents of ParameterPort
Check for any instantiated projection in mod_afferents with the same sender as projection
or one in deferred_init status with sender specification that is the same type as projection.
Returns redundant Projection if found, otherwise False.
"""
duplicate = next(iter([proj for proj in self.mod_afferents
if ((proj.sender == projection.sender and proj != projection)
or (proj.initialization_status == ContextFlags.DEFERRED_INIT
and proj._init_args[SENDER] == type(projection.sender)))]), None)
if duplicate and self.verbosePref or self.owner.verbosePref:
from psyneulink.core.components.projections.projection import Projection
warnings.warn(f'{Projection.__name__} from {projection.sender.name} {projection.sender.__class__.__name__}'
f' of {projection.sender.owner.name} to {self.name} {self.__class__.__name__} of '
f'{self.owner.name} already exists; will ignore additional one specified ({projection.name}).')
return duplicate
@tc.typecheck
def _parse_port_specific_specs(self, owner, port_dict, port_specific_spec):
"""Get connections specified in a ParameterPort specification tuple
Tuple specification can be:
(port_spec, projections)
Assumes that port_spec has already been extracted and used by _parse_port_spec
Returns params dict with PROJECTIONS entries if any of these was specified.
"""
from psyneulink.core.components.projections.projection import _parse_connection_specs, _is_projection_spec
params_dict = {}
port_spec = port_specific_spec
if isinstance(port_specific_spec, dict):
return None, port_specific_spec
elif isinstance(port_specific_spec, tuple):
tuple_spec = port_specific_spec
# GET PORT_SPEC (PARAM VALUE) AND ASSIGN PROJECTIONS_SPEC **********************************************
# 2-item tuple specification
if len(tuple_spec) == 2:
# 1st item is a value, so treat as Port spec (and return to _parse_port_spec to be parsed)
# and treat 2nd item as Projection specification
if is_numeric(tuple_spec[0]):
port_spec = tuple_spec[0]
reference_value = port_dict[REFERENCE_VALUE]
# Assign value so sender_dim is skipped below
# (actual assignment is made in _parse_port_spec)
if reference_value is None:
port_dict[REFERENCE_VALUE]=port_spec
elif not iscompatible(port_spec, reference_value):
raise PortError("Value in first item of 2-item tuple specification for {} of {} ({}) "
"is not compatible with its {} ({})".
format(ParameterPort.__name__, owner.name, port_spec,
REFERENCE_VALUE, reference_value))
projections_spec = tuple_spec[1]
elif _is_projection_spec(tuple_spec[0], include_matrix_spec=True):
port_spec, projections_spec = tuple_spec
# Tuple is Projection specification that is used to specify the Port,
else:
# return None in port_spec to suppress further, recursive parsing of it in _parse_port_spec
port_spec = None
if tuple_spec[0] != self:
# If 1st item is not the current port (self), treat as part of the projection specification
projections_spec = tuple_spec
else:
# Otherwise, just use 2nd item as projection spec
port_spec = None
projections_spec = tuple_spec[1]
# 3- or 4-item tuple specification
elif len(tuple_spec) in {3,4}:
# Tuple is projection specification that is used to specify the Port,
# so return None in port_spec to suppress further, recursive parsing of it in _parse_port_spec
port_spec = None
# Reduce to 2-item tuple Projection specification
projection_item = tuple_spec[3] if len(tuple_spec)==4 else None
projections_spec = (tuple_spec[0],projection_item)
# GET PROJECTIONS IF SPECIFIED *************************************************************************
try:
projections_spec
except UnboundLocalError:
pass
else:
try:
params_dict[PROJECTIONS] = _parse_connection_specs(self,
owner=owner,
connections=projections_spec)
# Parse the value of all of the Projections to get/validate parameter value
from psyneulink.core.components.projections.modulatory.controlprojection import ControlProjection
from psyneulink.core.components.projections.modulatory.learningprojection import LearningProjection
for projection_spec in params_dict[PROJECTIONS]:
if port_dict[REFERENCE_VALUE] is None:
# FIX: - PUTTING THIS HERE IS A HACK...
# FIX: MOVE TO _parse_port_spec UNDER PROCESSING OF ProjectionTuple SPEC
# FIX: USING _get_port_for_socket
# from psyneulink.core.components.projections.projection import _parse_projection_spec
# defaults.value?
mod_signal_value = projection_spec.port.value \
if isinstance(projection_spec.port, Port_Base) else None
mod_projection = projection_spec.projection
if isinstance(mod_projection, dict):
if mod_projection[PROJECTION_TYPE] not in {ControlProjection, LearningProjection}:
raise ParameterPortError("PROGRAM ERROR: {} other than {} or {} ({}) found "
"in specification tuple for {} param of {}".
format(Projection.__name__,
ControlProjection.__name__,
LearningProjection.__name__,
mod_projection, port_dict[NAME], owner.name))
elif VALUE in mod_projection:
mod_proj_value = mod_projection[VALUE]
else:
mod_proj_value = None
elif isinstance(mod_projection, Projection):
if not isinstance(mod_projection, (ControlProjection, LearningProjection)):
raise ParameterPortError("PROGRAM ERROR: {} other than {} or {} ({}) found "
"in specification tuple for {} param of {}".
format(Projection.__name__,
ControlProjection.__name__,
LearningProjection.__name__,
mod_projection, port_dict[NAME], owner.name))
elif mod_projection.initialization_status == ContextFlags.DEFERRED_INIT:
continue
mod_proj_value = mod_projection.defaults.value
else:
raise ParameterPortError("Unrecognized Projection specification for {} of {} ({})".
format(self.name, owner.name, projection_spec))
# FIX: 11/25/17 THIS IS A MESS: CHECK WHAT IT'S ACTUALLY DOING
# If ModulatoryProjection's value is not specified, try to assign one
if mod_proj_value is None:
# If not specified for Port, assign that
if VALUE not in port_dict or port_dict[VALUE] is None:
port_dict[VALUE] = mod_signal_value
# If value has been assigned, make sure value is the same for ModulatorySignal
elif port_dict[VALUE] != mod_signal_value:
# If the values differ, assign None so that Port's default is used
port_dict[VALUE] = None
# No need to check any more ModulatoryProjections
break
#
else:
port_dict[VALUE] = mod_proj_value
except ParameterPortError:
raise ParameterPortError("Tuple specification in {} specification dictionary "
"for {} ({}) is not a recognized specification for one or more "
"{}s, {}s, or {}s that project to it".
format(ParameterPort.__name__,
owner.name,
projections_spec,
Mechanism.__name__,
ModulatorySignal.__name__,
Projection.__name__))
elif port_specific_spec is not None:
raise ParameterPortError("PROGRAM ERROR: Expected tuple or dict for {}-specific params but, got: {}".
format(self.__class__.__name__, port_specific_spec))
return port_spec, params_dict
@staticmethod
def _get_port_function_value(owner, function, variable):
"""Return parameter variable (since ParameterPort's function never changes the form of its variable"""
return variable
def _get_variable_from_projections(self, context=None):
"""
Get backingfield ("base") value of param of function of Mechanism to which the ParameterPort belongs.
"""
# FIX 3/6/19: source does not yet seem to have been assigned to owner.function
return self.source._get(context)
@property
def pathway_projections(self):
raise ParameterPortError("PROGRAM ERROR: Attempt to access {} for {}; {}s do not have {}s".
format(PATHWAY_PROJECTION, self.name, PARAMETER_PORT, PATHWAY_PROJECTION))
@pathway_projections.setter
def pathway_projections(self, value):
raise ParameterPortError("PROGRAM ERROR: Attempt to assign {} to {}; {}s cannot accept {}s".
format(PATHWAY_PROJECTION, self.name, PARAMETER_PORT, PATHWAY_PROJECTION))
def _instantiate_parameter_ports(owner, function=None, context=None):
"""Call _instantiate_parameter_port for all modulable parameters to instantiate ParameterPorts for them
If owner.parameter_port is None or False:
- no ParameterPorts will be instantiated.
Otherwise, instantiate ParameterPort for each modulable parameter
:param function:
"""
# TBI / IMPLEMENT: use specs to implement ParameterPorts below
owner._parameter_ports = ParameterPortList(
component_type=ParameterPort,
name=owner.name + '.parameter_ports',
owner=owner,
)
# Check that all ParameterPorts for owner have not been explicitly suppressed
try:
if owner.parameter_ports is NotImplemented:
return
except KeyError:
# PARAMETER_PORTS not specified at all, so OK to continue and construct them
pass
# Instantiate ParameterPort for each modulable Parameter on
# function and owner. function is first because in some
# cases a Parameter will be specified on both, and the function's
# values/defaults should take precedence
def skip_parameter_port(parameter):
return (
isinstance(parameter, (ParameterAlias, SharedParameter))
or parameter.name in owner.exclude_from_parameter_ports
or not parameter.modulable
)
port_parameters = collections.defaultdict(set)
port_aliases = set()
owner_ports = set()
# function may be a custom function not yet parsed to a UDF
# function may also be a Function class, in which case parameter
# ports are still created for the modulable Parameters
for p in owner.parameters:
func = p.default_value
if (
not p.reference
and is_instance_or_subclass(func, Function)
and not isinstance(p, (ParameterAlias, SharedParameter))
):
for func_param in func.parameters:
if not skip_parameter_port(func_param):
port_parameters[func_param.name].add(p.name)
if isinstance(p, ParameterAlias):
port_aliases.add(p.name)
if not skip_parameter_port(p):
owner_ports.add(p.name)
for parameter_port_name in port_parameters:
if (
len(port_parameters[parameter_port_name]) > 1
or parameter_port_name in port_aliases
or parameter_port_name in owner_ports
):
add_suffix = True
else:
add_suffix = False
for corresponding_parameter_component_name in port_parameters[parameter_port_name]:
corresponding_parameter_component = getattr(
owner.parameters,
corresponding_parameter_component_name
)._get(context)
p = getattr(
corresponding_parameter_component.parameters,
parameter_port_name
)
# .function is not finalized yet, because this happens before
# _instantiate_function
if corresponding_parameter_component_name is FUNCTION:
source = operator.attrgetter(f'{FUNCTION}.parameters.{p.name}')
else:
source = p
# use Shared/FunctionParameter value as fallback
try:
value = owner.initial_shared_parameters[corresponding_parameter_component_name][p.name]
except (KeyError, TypeError):
value = None
# if parameter value on actual Parameter was specified or there is
# no Shared/FunctionParameter value, use the actual Parameter default
if p._user_specified or value is None:
if p.spec is not None:
value = p.spec
else:
value = p.default_value
if add_suffix:
explicit_name = ParameterPortList._get_explicit_name(
p.name,
corresponding_parameter_component_name
)
else:
explicit_name = p.name
_instantiate_parameter_port(
owner,
p.name,
value,
context=context,
function=corresponding_parameter_component,
source=source,
explicit_name=explicit_name
)
for p in owner.parameters:
if not skip_parameter_port(p):
if (
p.name in port_parameters
or p.name in port_aliases
):
explicit_name = ParameterPortList._get_explicit_name(
p.name,
ParameterPortList._owner_port_suffix
)
else:
explicit_name = p.name
if p.spec is not None:
value = p.spec
else:
value = p.default_value
_instantiate_parameter_port(
owner,
p.name,
value,
context=context,
function=function,
source=p,
explicit_name=explicit_name,
)
owner.parameter_ports.sort(key=lambda port: port.name)
def _instantiate_parameter_port(
owner,
param_name,
param_value,
context,
function=None,
source=None,
explicit_name=None
):
"""Call _instantiate_port for allowable params, to instantiate a ParameterPort for it
Include ones in function.parameters
Exclude if it is a:
ParameterPort that already exists
non-numeric value (including NotImplemented, False or True)
unless it is:
a tuple (could be one specifying Modulatory Component)
a dict with the name FUNCTION_PARAMS (otherwise exclude)
function or method
IMPLEMENTATION NOTE: FUNCTION_RUNTIME_PARAM_NOT_SUPPORTED
(this is because self.defaults.function could be a class rather than an bound method;
i.e., not yet instantiated; could be rectified by assignment in _instantiate_function)
# FIX: UPDATE WITH MODULATION_MODS
# FIX: CHANGE TO IntegratorFunction FUnction ONCE LearningProjection MODULATES ParameterPort Function:
If param_name is FUNCTION_PARAMS and param is a matrix (presumably for a MappingProjection)
modify ParameterPort's function to be LinearCombination (rather Linear which is the default)
"""
from psyneulink.core.components.ports.modulatorysignals.modulatorysignal import _is_modulatory_spec
from psyneulink.core.components.projections.modulatory.modulatoryprojection import ModulatoryProjection_Base
def _get_tuple_for_single_item_modulatory_spec(obj, name, value):
"""Return (<default param value>, <modulatory spec>) for modulatory spec
"""
try:
param_default_value = getattr(obj.defaults, name)
# Only assign default value if it is not None
if param_default_value is not None:
return (param_default_value, value)
else:
return value
except AttributeError:
raise ParameterPortError("Unrecognized specification for {} paramater of {} ({})".
format(param_name, owner.name, param_value))
if explicit_name is None:
explicit_name = param_name
# EXCLUSIONS:
# # Skip if ParameterPort already exists
# if param_name in owner.ParameterPorts:
# return
if param_value is NotImplemented:
return
# Allow numerics but omit booleans (which are treated by is_numeric as numerical)
if is_numeric(param_value) and not isinstance(param_value, bool):
pass
# Only allow a FUNCTION_PARAMS dict
elif isinstance(param_value, (ReadOnlyOrderedDict, dict)) and param_name == FUNCTION_PARAMS:
pass
# Allow ModulatoryProjection
elif isinstance(param_value, Projection):
if isinstance(param_value, ModulatoryProjection_Base):
pass
else:
return
# Allow Projection class
elif inspect.isclass(param_value) and issubclass(param_value, Projection):
if issubclass(param_value, (ModulatoryProjection_Base)):
pass
else:
return
elif _is_modulatory_spec(param_value, include_matrix_spec=False) and not isinstance(param_value, tuple):
# If parameter is a single Modulatory specification (e.g., ControlSignal, or CONTROL, etc.)
# try to place it in a tuple (for interpretation by _parse_port_spec) using default value as 1st item
# (note: exclude matrix since it is allowed as a value specification but not a projection reference)
try:
param_value = _get_tuple_for_single_item_modulatory_spec(function, param_name, param_value)
except ParameterPortError:
param_value = _get_tuple_for_single_item_modulatory_spec(owner, param_name, param_value)
# Allow tuples (could be spec that includes a Projection or Modulation)
elif isinstance(param_value, tuple):
# # FIX: EXTRACT VALUE HERE (AS IN Component.__init__?? [4/18/17]
# param_value = owner._get_param_value_from_tuple(param_value)
pass
# Allow if it is a keyword for a parameter
elif isinstance(param_value, str) and param_value in parameter_keywords:
pass
# Exclude function (see docstring above)
elif param_name == FUNCTION:
return
# (7/19/17 CW) added this if statement below while adding `hetero` and `auto` and AutoAssociativeProjections: this
# allows `hetero` to be specified as a matrix, while still generating a ParameterPort
elif isinstance(param_value, np.ndarray) or isinstance(param_value, np.matrix):
pass
# allow function parameters
elif param_name in function.parameters.names():
pass
# Exclude all others
else:
return
# Assign ParameterPorts to Component for parameters of its function (function_params), except for ones that are:
# - another component
# - a function or method
# - have a value of None (see IMPLEMENTATION_NOTE below)
# - they have the same name as another parameter of the component (raise exception for this)
# IMPLEMENTATION NOTE:
# The following is necessary since, if ANY parameters of a function are specified, entries are made
# in the FUNCTION_PARAMS dict of its owner for ALL of the function's params; however, their values
# will be set to None (and there may not be a way to determine a
# default; e.g., the length of the array for the weights or exponents params for LinearCombination).
# Therefore, None will be passed as the reference_value, which will cause validation of the
# ParameterPort's function (in _instantiate_function()) to fail.
# Current solution is to simply not instantiate a ParameterPort for any function_param that has
# not been explicitly specified
if param_value is None:
return
if not _is_legal_param_value(owner, param_value):
return
elif (_is_modulatory_spec(param_value, include_matrix_spec=False)
and not isinstance(param_value, tuple)):
# If parameter is a single Modulatory specification (e.g., ControlSignal, or CONTROL, etc.)
# try to place it in a tuple (for interpretation by _parse_port_spec) using default value as 1st item
# (note: exclude matrix since it is allowed as a value specification vs. a projection reference)
try:
param_value = _get_tuple_for_single_item_modulatory_spec(
function,
param_name,
param_value
)
except ParameterPortError:
param_value = _get_tuple_for_single_item_modulatory_spec(
owner,
param_name,
param_value
)
# # FIX: 10/3/17 - ??MOVE THIS TO _parse_port_specific_specs ----------------
# # Use param_value as constraint
# # IMPLEMENTATION NOTE: need to copy, since _instantiate_port() calls _parse_port_value()
# # for constraints before port_spec, which moves items to subdictionaries,
# # which would make them inaccessible to the subsequent parse of port_spec
from psyneulink.core.components.ports.modulatorysignals.modulatorysignal import ModulatorySignal
from psyneulink.core.components.mechanisms.modulatory.modulatorymechanism import ModulatoryMechanism_Base
if (
is_iterable(param_value)
and any(isinstance(item, (ModulatorySignal, ModulatoryProjection_Base, ModulatoryMechanism_Base)) for item in param_value)
):
reference_value = param_value
else:
reference_value = deepcopy(param_value)
# Assign parameterPort for function_param to the component
port = _instantiate_port(
owner=owner,
port_type=ParameterPort,
name=explicit_name,
port_spec=param_value,
reference_value=reference_value,
reference_value_name=param_name,
params=None,
context=context
)
if port:
owner._parameter_ports[explicit_name] = port
# will be parsed on assignment of function
# FIX: if the function is manually changed after assignment,
# FIX: the source will remain pointing to the original Function
port.source = source
# if the source parameter is not added here, we can't reference
# a ParameterPort by Parameter
owner.parameter_ports.parameter_mapping[source] = port
return port
def _is_legal_param_value(owner, value):
from psyneulink.core.components.mechanisms.modulatory.control.controlmechanism import _is_control_spec
from psyneulink.core.components.mechanisms.modulatory.control.gating.gatingmechanism import _is_gating_spec
# LEGAL PARAMETER VALUES:
# # lists, arrays or numeric values
if is_value_spec(value):
return True
# tuple, first item of which is a legal parameter value
# note: this excludes (param_name, Mechanism) tuples used to specify a ParameterPort
# (e.g., if specified for the control_signals param of ControlMechanism)
if isinstance(value, tuple):
if _is_legal_param_value(owner, value[0]):
return True
if isinstance(value, dict) and VALUE in value:
return True
if _is_control_spec(value) or _is_gating_spec(value):
return True
# keyword that resolves to one of the above
if get_param_value_for_keyword(owner, value) is not None:
return True
# Assignment of ParameterPort for Component objects, function or method are not currently supported
if isinstance(value, (types.FunctionType, types.MethodType, Component)):
return False
def _get_parameter_port(sender_owner, sender_type, param_name, component):
"""Return ParameterPort for named parameter of a Mechanism requested by owner
"""
# Validate that component is a Mechanism or Projection
if not isinstance(component, (Mechanism, Projection)):
raise ParameterPortError("Request for {} of a component ({}) that is not a {} or {}".
format(PARAMETER_PORT, component, MECHANISM, PROJECTION))
try:
return component._parameter_ports[param_name]
except KeyError:
# Check that param (named by str) is an attribute of the Mechanism
if not (hasattr(component, param_name) or hasattr(component.function, param_name)):
raise ParameterPortError("{} (in specification of {} {}) is not an attribute "
"of {} or its function"
.format(param_name, sender_type, sender_owner.name, component))
# Check that the Mechanism has a ParameterPort for the param
if param_name not in component._parameter_ports.names:
raise ParameterPortError("There is no ParameterPort for the parameter ({}) of {} "
"specified in {} for {}".
format(param_name, component.name, sender_type, sender_owner.name))
```
#### File: core/rpc/graph_pb2_grpc.py
```python
import grpc
from psyneulink.core.rpc import graph_pb2 as graph__pb2
class ServeGraphStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.LoadCustomPnl = channel.unary_unary(
'/graph.ServeGraph/LoadCustomPnl',
request_serializer=graph__pb2.PNLPath.SerializeToString,
response_deserializer=graph__pb2.NullArgument.FromString,
)
self.LoadScript = channel.unary_unary(
'/graph.ServeGraph/LoadScript',
request_serializer=graph__pb2.ScriptPath.SerializeToString,
response_deserializer=graph__pb2.ScriptCompositions.FromString,
)
self.LoadGraphics = channel.unary_unary(
'/graph.ServeGraph/LoadGraphics',
request_serializer=graph__pb2.ScriptPath.SerializeToString,
response_deserializer=graph__pb2.StyleJSON.FromString,
)
self.GetLoggableParameters = channel.unary_unary(
'/graph.ServeGraph/GetLoggableParameters',
request_serializer=graph__pb2.ComponentName.SerializeToString,
response_deserializer=graph__pb2.ParameterList.FromString,
)
self.GetCompositions = channel.unary_unary(
'/graph.ServeGraph/GetCompositions',
request_serializer=graph__pb2.NullArgument.SerializeToString,
response_deserializer=graph__pb2.ScriptCompositions.FromString,
)
self.GetComponents = channel.unary_unary(
'/graph.ServeGraph/GetComponents',
request_serializer=graph__pb2.GraphName.SerializeToString,
response_deserializer=graph__pb2.ScriptComponents.FromString,
)
self.GetJSON = channel.unary_unary(
'/graph.ServeGraph/GetJSON',
request_serializer=graph__pb2.GraphName.SerializeToString,
response_deserializer=graph__pb2.GraphJSON.FromString,
)
self.HealthCheck = channel.unary_unary(
'/graph.ServeGraph/HealthCheck',
request_serializer=graph__pb2.NullArgument.SerializeToString,
response_deserializer=graph__pb2.HealthStatus.FromString,
)
self.UpdateStylesheet = channel.stream_unary(
'/graph.ServeGraph/UpdateStylesheet',
request_serializer=graph__pb2.StyleJSON.SerializeToString,
response_deserializer=graph__pb2.NullArgument.FromString,
)
self.RunComposition = channel.unary_stream(
'/graph.ServeGraph/RunComposition',
request_serializer=graph__pb2.RunTimeParams.SerializeToString,
response_deserializer=graph__pb2.Entry.FromString,
)
class ServeGraphServicer(object):
"""Missing associated documentation comment in .proto file."""
def LoadCustomPnl(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def LoadScript(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def LoadGraphics(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetLoggableParameters(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetCompositions(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetComponents(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetJSON(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def HealthCheck(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateStylesheet(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RunComposition(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ServeGraphServicer_to_server(servicer, server):
rpc_method_handlers = {
'LoadCustomPnl': grpc.unary_unary_rpc_method_handler(
servicer.LoadCustomPnl,
request_deserializer=graph__pb2.PNLPath.FromString,
response_serializer=graph__pb2.NullArgument.SerializeToString,
),
'LoadScript': grpc.unary_unary_rpc_method_handler(
servicer.LoadScript,
request_deserializer=graph__pb2.ScriptPath.FromString,
response_serializer=graph__pb2.ScriptCompositions.SerializeToString,
),
'LoadGraphics': grpc.unary_unary_rpc_method_handler(
servicer.LoadGraphics,
request_deserializer=graph__pb2.ScriptPath.FromString,
response_serializer=graph__pb2.StyleJSON.SerializeToString,
),
'GetLoggableParameters': grpc.unary_unary_rpc_method_handler(
servicer.GetLoggableParameters,
request_deserializer=graph__pb2.ComponentName.FromString,
response_serializer=graph__pb2.ParameterList.SerializeToString,
),
'GetCompositions': grpc.unary_unary_rpc_method_handler(
servicer.GetCompositions,
request_deserializer=graph__pb2.NullArgument.FromString,
response_serializer=graph__pb2.ScriptCompositions.SerializeToString,
),
'GetComponents': grpc.unary_unary_rpc_method_handler(
servicer.GetComponents,
request_deserializer=graph__pb2.GraphName.FromString,
response_serializer=graph__pb2.ScriptComponents.SerializeToString,
),
'GetJSON': grpc.unary_unary_rpc_method_handler(
servicer.GetJSON,
request_deserializer=graph__pb2.GraphName.FromString,
response_serializer=graph__pb2.GraphJSON.SerializeToString,
),
'HealthCheck': grpc.unary_unary_rpc_method_handler(
servicer.HealthCheck,
request_deserializer=graph__pb2.NullArgument.FromString,
response_serializer=graph__pb2.HealthStatus.SerializeToString,
),
'UpdateStylesheet': grpc.stream_unary_rpc_method_handler(
servicer.UpdateStylesheet,
request_deserializer=graph__pb2.StyleJSON.FromString,
response_serializer=graph__pb2.NullArgument.SerializeToString,
),
'RunComposition': grpc.unary_stream_rpc_method_handler(
servicer.RunComposition,
request_deserializer=graph__pb2.RunTimeParams.FromString,
response_serializer=graph__pb2.Entry.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'graph.ServeGraph', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class ServeGraph(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def LoadCustomPnl(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/graph.ServeGraph/LoadCustomPnl',
graph__pb2.PNLPath.SerializeToString,
graph__pb2.NullArgument.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def LoadScript(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/graph.ServeGraph/LoadScript',
graph__pb2.ScriptPath.SerializeToString,
graph__pb2.ScriptCompositions.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def LoadGraphics(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/graph.ServeGraph/LoadGraphics',
graph__pb2.ScriptPath.SerializeToString,
graph__pb2.StyleJSON.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetLoggableParameters(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/graph.ServeGraph/GetLoggableParameters',
graph__pb2.ComponentName.SerializeToString,
graph__pb2.ParameterList.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetCompositions(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/graph.ServeGraph/GetCompositions',
graph__pb2.NullArgument.SerializeToString,
graph__pb2.ScriptCompositions.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetComponents(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/graph.ServeGraph/GetComponents',
graph__pb2.GraphName.SerializeToString,
graph__pb2.ScriptComponents.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetJSON(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/graph.ServeGraph/GetJSON',
graph__pb2.GraphName.SerializeToString,
graph__pb2.GraphJSON.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def HealthCheck(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/graph.ServeGraph/HealthCheck',
graph__pb2.NullArgument.SerializeToString,
graph__pb2.HealthStatus.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateStylesheet(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_unary(request_iterator, target, '/graph.ServeGraph/UpdateStylesheet',
graph__pb2.StyleJSON.SerializeToString,
graph__pb2.NullArgument.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RunComposition(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/graph.ServeGraph/RunComposition',
graph__pb2.RunTimeParams.SerializeToString,
graph__pb2.Entry.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
```
#### File: core/scheduling/scheduler.py
```python
import typing
import graph_scheduler
import pint
from psyneulink import _unit_registry
from psyneulink.core.globals.context import Context, handle_external_context
from psyneulink.core.globals.json import JSONDumpable
from psyneulink.core.scheduling.condition import _create_as_pnl_condition
__all__ = [
'Scheduler', 'SchedulingMode'
]
SchedulingMode = graph_scheduler.scheduler.SchedulingMode
class Scheduler(graph_scheduler.Scheduler, JSONDumpable):
def __init__(
self,
composition=None,
graph=None,
conditions=None,
termination_conds=None,
default_execution_id=None,
mode: SchedulingMode = SchedulingMode.STANDARD,
default_absolute_time_unit: typing.Union[str, pint.Quantity] = 1 * _unit_registry.ms,
**kwargs
):
"""
:param composition: (Composition) - the Composition this scheduler is scheduling for
"""
if composition is not None:
# dependency dict
graph = composition.graph_processing.prune_feedback_edges()[0]
if default_execution_id is None:
default_execution_id = composition.default_execution_id
super().__init__(
graph=graph,
conditions=conditions,
termination_conds=termination_conds,
default_execution_id=default_execution_id,
mode=mode,
default_absolute_time_unit=default_absolute_time_unit,
**kwargs,
)
def replace_term_conds(term_conds):
return {
ts: _create_as_pnl_condition(cond) for ts, cond in term_conds.items()
}
self.default_termination_conds = replace_term_conds(self.default_termination_conds)
self.termination_conds = replace_term_conds(self.termination_conds)
def add_condition(self, owner, condition):
super().add_condition(owner, _create_as_pnl_condition(condition))
def add_condition_set(self, conditions):
try:
conditions = conditions.conditions
except AttributeError:
pass
super().add_condition_set({
node: _create_as_pnl_condition(cond)
for node, cond in conditions.items()
})
@handle_external_context(fallback_default=True)
def run(
self,
termination_conds=None,
context=None,
base_context=Context(execution_id=None),
skip_trial_time_increment=False,
):
yield from super().run(
termination_conds=termination_conds,
context=context,
execution_id=context.execution_id,
base_execution_id=base_context.execution_id,
skip_environment_state_update_time_increment=skip_trial_time_increment,
)
@property
def _dict_summary(self):
return {
'conditions': {
'termination': {
str.lower(k.name): v._dict_summary for k, v in self.termination_conds.items()
},
'node_specific': {
n.name: self.conditions[n]._dict_summary for n in self.nodes if n in self.conditions
}
}
}
@handle_external_context()
def get_clock(self, context):
return super().get_clock(context.execution_id)
_doc_subs = {
None: [
(
'(When creating a Scheduler explicitly, the set of nodes)',
'A Scheduler can be created explicitly using its constructor. However, more commonly it is created automatically for a `Composition <Composition>` when it is created.\\1',
),
(
r'(\n\* a \*graph specification dictionary\* in the \*\*graph\*\* argument)',
"\n* a `Composition` in the **composition** argument - if a Composition is specified, the Scheduler is created using the nodes and edges in the Composition's `graph <Composition.graph_processing>`, with any `feedback Projections <Composition_Feedback>` pruned as needed to produce an acyclic graph. If there is a cycle comprised of all non-feedback projections, this cycle is reduced to a single `consideration set <consideration_set>`\n\\1",
),
('origin nodes', '`ORIGIN` nodes',),
(
r'Examples\n--------.*\n\.\. _Scheduler_Class_Reference',
"""
Examples
--------
Please see `Condition` for a list of all supported Conditions and their behavior.
* Basic phasing in a linear process::
>>> import psyneulink as pnl
>>> A = pnl.TransferMechanism(name='A')
>>> B = pnl.TransferMechanism(name='B')
>>> C = pnl.TransferMechanism(name='C')
>>> comp = pnl.Composition()
>>> pway = comp.add_linear_processing_pathway([A, B, C])
>>> pway.pathway
[(TransferMechanism A), (MappingProjection MappingProjection from A[RESULT] to B[InputPort-0]), (TransferMechanism B), (MappingProjection MappingProjection from B[RESULT] to C[InputPort-0]), (TransferMechanism C)]
>>> # implicit condition of Always for A
>>> comp.scheduler.add_condition(B, pnl.EveryNCalls(A, 2))
>>> comp.scheduler.add_condition(C, pnl.EveryNCalls(B, 3))
>>> # implicit AllHaveRun Termination condition
>>> execution_sequence = list(comp.scheduler.run())
>>> execution_sequence
[{(TransferMechanism A)}, {(TransferMechanism A)}, {(TransferMechanism B)}, {(TransferMechanism A)}, {(TransferMechanism A)}, {(TransferMechanism B)}, {(TransferMechanism A)}, {(TransferMechanism A)}, {(TransferMechanism B)}, {(TransferMechanism C)}]
* Alternate basic phasing in a linear process::
>>> comp = pnl.Composition()
>>> pway = comp.add_linear_processing_pathway([A, B])
>>> pway.pathway
[(TransferMechanism A), (MappingProjection MappingProjection from A[RESULT] to B[InputPort-0]), (TransferMechanism B)]
>>> comp.scheduler.add_condition(
... A,
... pnl.Any(
... pnl.AtPass(0),
... pnl.EveryNCalls(B, 2)
... )
... )
>>> comp.scheduler.add_condition(
... B,
... pnl.Any(
... pnl.EveryNCalls(A, 1),
... pnl.EveryNCalls(B, 1)
... )
... )
>>> termination_conds = {
... pnl.TimeScale.TRIAL: pnl.AfterNCalls(B, 4, time_scale=pnl.TimeScale.TRIAL)
... }
>>> execution_sequence = list(comp.scheduler.run(termination_conds=termination_conds))
>>> execution_sequence # doctest: +SKIP
[{(TransferMechanism A)}, {(TransferMechanism B)}, {(TransferMechanism B)}, {(TransferMechanism A)}, {(TransferMechanism B)}, {(TransferMechanism B)}]
* Basic phasing in two processes::
>>> comp = pnl.Composition()
>>> pway = comp.add_linear_processing_pathway([A, C])
>>> pway.pathway
[(TransferMechanism A), (MappingProjection MappingProjection from A[RESULT] to C[InputPort-0]), (TransferMechanism C)]
>>> pway = comp.add_linear_processing_pathway([B, C])
>>> pway.pathway
[(TransferMechanism B), (MappingProjection MappingProjection from B[RESULT] to C[InputPort-0]), (TransferMechanism C)]
>>> comp.scheduler.add_condition(A, pnl.EveryNPasses(1))
>>> comp.scheduler.add_condition(B, pnl.EveryNCalls(A, 2))
>>> comp.scheduler.add_condition(
... C,
... pnl.Any(
... pnl.AfterNCalls(A, 3),
... pnl.AfterNCalls(B, 3)
... )
... )
>>> termination_conds = {
... pnl.TimeScale.TRIAL: pnl.AfterNCalls(C, 4, time_scale=pnl.TimeScale.TRIAL)
... }
>>> execution_sequence = list(comp.scheduler.run(termination_conds=termination_conds))
>>> execution_sequence # doctest: +SKIP
[{(TransferMechanism A)}, {(TransferMechanism A), (TransferMechanism B)}, {(TransferMechanism A)}, {(TransferMechanism C)}, {(TransferMechanism A), (TransferMechanism B)}, {(TransferMechanism C)}, {(TransferMechanism A)}, {(TransferMechanism C)}, {(TransferMechanism A), (TransferMechanism B)}, {(TransferMechanism C)}]
.. _Scheduler_Class_Reference
"""
),
(
r'linear graph with two nodes.*In standard mode',
"""linear composition with two nodes::
>>> import psyneulink as pnl
>>> A = pnl.TransferMechanism()
>>> B = pnl.TransferMechanism()
>>> comp = pnl.Composition()
>>> pway = comp.add_linear_processing_pathway([A, B])
>>> comp.scheduler.add_condition(A, pnl.TimeInterval(start=10))
>>> comp.scheduler.add_condition(B, pnl.TimeInterval(start=10))
In standard mode"""
),
(
r'(earlier or later \(e\.g\., when the Condition\nfor a particular node or set of nodes is met\)\.)',
'\\1 When the Scheduler terminates a `TRIAL <TimeScale.TRIAL>`, the `Composition <Composition>` begins processing the next input specified in the call to its `run <Composition.run>` method. Thus, a `TRIAL <TimeScale.TRIAL>` is defined as the scope of processing associated with a given input to the Composition.'
),
(
'(is when all of its constituent environment state updates have terminated.)',
"""\\1 These defaults may be overriden when running a Composition, by passing a dictionary mapping `TimeScales <TimeScale>` to `Conditions <Condition>` in the **termination_processing** argument of a call to `Composition.run` (to terminate the execution of processing)::
Composition.run(
...,
termination_processing={TimeScale.TRIAL: WhenFinished(ddm)}
)
"""
),
(
r'(however, using the `indices in the original consideration queue<.*?\.Scheduler\.consideration_queue_indices>`\.)',
'\\1 `Composition`\\ s will execute nodes in this order, however independent usages of the scheduler may not. The user should be aware of this and set up defaults and inputs to nodes accordingly.'
),
],
'Scheduler': [
(
r'(Arguments\n ---------\n)',
"""\\1
composition : Composition
specifies the `Components <Component>` to be ordered for execution, and any dependencies among them,
based on the `Composition <Composition>`\\'s `graph <Composition.graph_processing>`.
"""
)
]
}
```
#### File: library/models/MontagueDayanSejnowski96.py
```python
import argparse
import numpy as np
import psyneulink as pnl
all_figures = ['5a', '5b', '5c']
parser = argparse.ArgumentParser()
parser.add_argument('--no-plot', action='store_false', help='Disable plotting', dest='enable_plot')
parser.add_argument('--figure', nargs='+', help='Figure(s) to plot (default=all)', choices=all_figures, default=all_figures)
args = parser.parse_args()
if args.enable_plot:
import matplotlib.pyplot as plt
def build_stimulus_dictionary(sample_mechanism, target_mechanism, no_reward_trials):
stimulus_onset = 41
reward_delivery = 54
samples = []
targets = []
for trial in range(120):
target = [0.] * 60
target[reward_delivery] = 1.
if trial in no_reward_trials:
target[reward_delivery] = 0.
targets.append(target)
sample = [0.] * 60
for i in range(stimulus_onset, 60):
sample[i] = 1.
samples.append(sample)
return {sample_mechanism: samples,
target_mechanism: targets}
def build_stimulus_dictionary_figure_5c(sample_mechanism, target_mechanism):
stimulus_onset = 42
reward_delivery = 54
# build input dictionary
samples = []
targets = []
for trial in range(150):
target = [0.] * 60
target[reward_delivery] = 1.
if trial > 70:
target[reward_delivery] = 0.
targets.append(target)
sample = [0.] * 60
for i in range(stimulus_onset, 60):
sample[i] = 1.
samples.append(sample)
return {sample_mechanism: samples,
target_mechanism: targets}
def figure_5a():
"""
This creates the plot for figure 5A in the Montague paper. Figure 5A is
a 'plot of ∂(t) over time for three trials during training (1, 30, and 50).'
"""
# Create Processing Components
sample_mechanism = pnl.TransferMechanism(default_variable=np.zeros(60),
name=pnl.SAMPLE)
action_selection = pnl.TransferMechanism(default_variable=np.zeros(60),
function=pnl.Linear(slope=1.0,
intercept=0.01),
name='Action Selection')
sample_to_action_selection = pnl.MappingProjection(sender=sample_mechanism,
receiver=action_selection,
matrix=np.zeros((60, 60)))
# Create Composition
composition_name = 'TD_Learning_Figure_5A'
comp = pnl.Composition(name=composition_name)
# Add Processing Components to the Composition
pathway = [sample_mechanism, sample_to_action_selection, action_selection]
# Add Learning Components to the Composition
learning_related_components = comp.add_td_learning_pathway(pathway, learning_rate=0.3).learning_components
# Unpack Relevant Learning Components
prediction_error_mechanism = learning_related_components[pnl.OBJECTIVE_MECHANISM]
target_mechanism = learning_related_components[pnl.TARGET_MECHANISM]
# Create Log
prediction_error_mechanism.log.set_log_conditions(pnl.VALUE)
# Create Stimulus Dictionary
no_reward_trials = {14, 29, 44, 59, 74, 89}
inputs = build_stimulus_dictionary(sample_mechanism, target_mechanism, no_reward_trials)
# Run Composition
comp.learn(inputs=inputs)
if args.enable_plot:
# Get Delta Values from Log
delta_vals = prediction_error_mechanism.log.nparray_dictionary()[composition_name][pnl.VALUE]
# Plot Delta Values form trials 1, 30, and 50
with plt.style.context('seaborn'):
plt.plot(delta_vals[0][0], "-o", label="Trial 1")
plt.plot(delta_vals[29][0], "-s", label="Trial 30")
plt.plot(delta_vals[49][0], "-o", label="Trial 50")
plt.title("Montague et. al. (1996) -- Figure 5A")
plt.xlabel("Timestep")
plt.ylabel("∂")
plt.legend()
plt.xlim(xmin=35)
plt.xticks()
plt.show(block=not pnl._called_from_pytest)
return comp
def figure_5b():
"""
This creates the plot for figure 5B in the Montague paper. Figure 5B shows
the 'entire time course of model responses (trials 1-150).' The setup is
the same as in Figure 5A, except that training begins at trial 10.
"""
# Create Processing Components
sample_mechanism = pnl.TransferMechanism(default_variable=np.zeros(60),
name=pnl.SAMPLE)
action_selection = pnl.TransferMechanism(default_variable=np.zeros(60),
function=pnl.Linear(slope=1.0,
intercept=1.0),
name='Action Selection')
sample_to_action_selection = pnl.MappingProjection(sender=sample_mechanism,
receiver=action_selection,
matrix=np.zeros((60, 60)))
# Create Composition
composition_name = 'TD_Learning_Figure_5B'
comp = pnl.Composition(name=composition_name)
# Add Processing Components to the Composition
pathway = [sample_mechanism, sample_to_action_selection, action_selection]
# Add Learning Components to the Composition
learning_related_components = comp.add_td_learning_pathway(pathway, learning_rate=0.3).learning_components
# Unpack Relevant Learning Components
prediction_error_mechanism = learning_related_components[pnl.OBJECTIVE_MECHANISM]
target_mechanism = learning_related_components[pnl.TARGET_MECHANISM]
# Create Log
prediction_error_mechanism.log.set_log_conditions(pnl.VALUE)
# Create Stimulus Dictionary
no_reward_trials = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 14, 29, 44, 59, 74,
89, 104, 119}
inputs = build_stimulus_dictionary(sample_mechanism, target_mechanism, no_reward_trials)
# Run Composition
comp.learn(inputs=inputs)
if args.enable_plot:
# Get Delta Values from Log
delta_vals = prediction_error_mechanism.log.nparray_dictionary()[composition_name][pnl.VALUE]
with plt.style.context('seaborn'):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x_vals, y_vals = np.meshgrid(np.arange(120), np.arange(40, 60, step=1))
d_vals = np.array([d[0][40:60] for d in delta_vals]).transpose()
ax.plot_surface(x_vals, y_vals, d_vals)
ax.set_xlabel("Trial")
ax.set_ylabel("Timestep")
ax.set_zlabel("∂")
ax.set_ylim(y_vals.max(), y_vals.min())
ax.set_xlim(0, 120)
ax.set_zlim(-1, 1)
ax.set_title("Montague et. al. (1996) -- Figure 5B")
plt.show(block=not pnl._called_from_pytest)
return comp
def figure_5c():
"""
This creates the plot for Figure 5C in the Montague paper. Figure 5C shows
'extinction of response to the sensory cue.' The setup is the same as
Figure 5A, except that reward delivery stops at trial 70
"""
# Create Processing Components
sample_mechanism = pnl.TransferMechanism(default_variable=np.zeros(60),
name=pnl.SAMPLE)
action_selection = pnl.TransferMechanism(default_variable=np.zeros(60),
function=pnl.Linear(slope=1.0,
intercept=1.0),
name='Action Selection')
sample_to_action_selection = pnl.MappingProjection(sender=sample_mechanism,
receiver=action_selection,
matrix=np.zeros((60, 60)))
# Create Composition
composition_name = 'TD_Learning_Figure_5C'
comp = pnl.Composition(name=composition_name)
# Add Processing Components to the Composition
pathway = [sample_mechanism, sample_to_action_selection, action_selection]
# Add Learning Components to the Composition
learning_related_components = comp.add_td_learning_pathway(pathway, learning_rate=0.3).learning_components
# Unpack Relevant Learning Components
prediction_error_mechanism = learning_related_components[pnl.OBJECTIVE_MECHANISM]
target_mechanism = learning_related_components[pnl.TARGET_MECHANISM]
# Create Log
prediction_error_mechanism.log.set_log_conditions(pnl.VALUE)
# Create Stimulus Dictionary
inputs = build_stimulus_dictionary_figure_5c(sample_mechanism, target_mechanism)
# Run Composition
comp.learn(inputs=inputs)
if args.enable_plot:
# Get Delta Values from Log
delta_vals = prediction_error_mechanism.log.nparray_dictionary()[composition_name][pnl.VALUE]
with plt.style.context('seaborn'):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x_vals, y_vals = np.meshgrid(np.arange(150), np.arange(40, 60, step=1))
d_vals = np.array([d[0][40:60] for d in delta_vals]).transpose()
ax.plot_surface(x_vals, y_vals, d_vals)
ax.set_ylim(y_vals.max(), y_vals.min())
ax.set_xlim(0, 140)
ax.set_zlim(-1, 1)
ax.set_xlabel("Trial")
ax.set_ylabel("Timestep")
ax.set_zlabel("∂")
ax.set_title("Montague et. al. (1996) -- Figure 5C")
plt.show(block=not pnl._called_from_pytest)
return comp
if '5a' in args.figure:
comp_5a = figure_5a()
if '5b' in args.figure:
comp_5b = figure_5b()
if '5c' in args.figure:
comp_5c = figure_5c()
```
#### File: Scripts/Debug/Jason_Reward_rate_with_penalty_with_inputs.py
```python
import time
import numpy as np
import psyneulink as pnl
from Scripts.Debug.Jason_Stroop_Stimuli import get_stimulus_set, TASKS, COLORS, CONDITIONS
# import matplotlib.pyplot as plt
# from matplotlib.lines import Line2D
# import seaborn as sns
# sns.set(style='white', context='talk', palette="colorblind")
np.random.seed(0)
# %matplotlib inline
# %autosave 5
#%%
# constants
experiment_info = f"""
stroop experiment info
- all colors:\t {COLORS}
- all words:\t {COLORS}
- all tasks:\t {TASKS}
- all conditions:{CONDITIONS}
"""
print(experiment_info)
# calculate experiment metadata
n_conditions = len(CONDITIONS)
n_tasks = len(TASKS)
n_colors = len(COLORS)
# OTHER CONSTANTS
N_UNITS = 2
#%% md
## Set up the model
#%%
'''define the stroop model'''
def object_function(x):
return (x[0] * x[2] - x[1] * x[3])/(x[4])
def power_func(input=1,power=2):
return input ** power
def get_stroop_model(unit_noise_std=.01, dec_noise_std=.1):
# model params
integration_rate = 1
hidden_func = pnl.Logistic(gain=1.0, x_0=4.0)
# input layer, color and word
reward = pnl.TransferMechanism(name='reward')
punish = pnl.TransferMechanism(name='punish')
inp_clr = pnl.TransferMechanism(
size=N_UNITS, function=pnl.Linear, name='COLOR INPUT'
)
inp_wrd = pnl.TransferMechanism(
size=N_UNITS, function=pnl.Linear, name='WORD INPUT'
)
# task layer, represent the task instruction; color naming / word reading
inp_task = pnl.TransferMechanism(
size=N_UNITS, function=pnl.Linear, name='TASK'
)
# hidden layer for color and word
hid_clr = pnl.TransferMechanism(
size=N_UNITS,
function=hidden_func,
integrator_mode=True,
integration_rate=integration_rate,
# noise=pnl.NormalDist(standard_deviation=unit_noise_std).function,
noise=pnl.NormalDist(standard_deviation=unit_noise_std),
name='COLORS HIDDEN'
)
hid_wrd = pnl.TransferMechanism(
size=N_UNITS,
function=hidden_func,
integrator_mode=True,
integration_rate=integration_rate,
# noise=pnl.NormalDist(standard_deviation=unit_noise_std).function,
noise=pnl.NormalDist(standard_deviation=unit_noise_std),
name='WORDS HIDDEN'
)
# output layer
output = pnl.TransferMechanism(
size=N_UNITS,
function=pnl.Logistic,
integrator_mode=True,
integration_rate=integration_rate,
# noise=pnl.NormalDist(standard_deviation=unit_noise_std).function,
noise=pnl.NormalDist(standard_deviation=unit_noise_std),
name='OUTPUT'
)
# decision layer, some accumulator
signalSearchRange = pnl.SampleSpec(start=0.05, stop=5, step=0.05)
decision = pnl.DDM(name='Decision',
input_format=pnl.ARRAY,
function=pnl.DriftDiffusionAnalytical(drift_rate=1,
threshold =1,
noise=1,
starting_point=0,
t0=0.35),
output_ports=[pnl.RESPONSE_TIME,
pnl.PROBABILITY_UPPER_THRESHOLD,
pnl.PROBABILITY_LOWER_THRESHOLD]
)
driftrate_control_signal = pnl.ControlSignal(projections=[(pnl.SLOPE, inp_clr)],
variable=1.0,
intensity_cost_function=pnl.Exponential(rate=1),#pnl.Exponential(rate=0.8),#pnl.Exponential(rate=1),
allocation_samples=signalSearchRange)
threshold_control_signal = pnl.ControlSignal(projections=[(pnl.THRESHOLD, decision)],
variable=1.0,
intensity_cost_function=pnl.Linear(slope=0),
allocation_samples=signalSearchRange)
reward_rate = pnl.ObjectiveMechanism(function=pnl.LinearCombination(operation=pnl.PRODUCT,
exponents=[[1],[1],[-1]]),
monitor=[reward,
decision.output_ports[pnl.PROBABILITY_UPPER_THRESHOLD],
decision.output_ports[pnl.RESPONSE_TIME]])
punish_rate = pnl.ObjectiveMechanism(function=pnl.LinearCombination(operation=pnl.PRODUCT,
exponents=[[1],[1],[-1]]),
monitor=[punish,
decision.output_ports[pnl.PROBABILITY_LOWER_THRESHOLD],
decision.output_ports[pnl.RESPONSE_TIME]])
objective_mech = pnl.ObjectiveMechanism(function=pnl.LinearCombination(operation=pnl.SUM,
weights=[[1],[-1]]),
monitor=[reward_rate, punish_rate])
# objective_mech = pnl.ObjectiveMechanism(function=object_function,
# monitor=[reward,
# punish,
# decision.output_ports[pnl.PROBABILITY_UPPER_THRESHOLD],
# decision.output_ports[pnl.PROBABILITY_LOWER_THRESHOLD],
# (decision.output_ports[pnl.RESPONSE_TIME])])
# PROJECTIONS, weights copied from cohen et al (1990)
wts_clr_ih = pnl.MappingProjection(
matrix=[[2.2, -2.2], [-2.2, 2.2]], name='COLOR INPUT TO HIDDEN')
wts_wrd_ih = pnl.MappingProjection(
matrix=[[2.6, -2.6], [-2.6, 2.6]], name='WORD INPUT TO HIDDEN')
wts_clr_ho = pnl.MappingProjection(
matrix=[[1.3, -1.3], [-1.3, 1.3]], name='COLOR HIDDEN TO OUTPUT')
wts_wrd_ho = pnl.MappingProjection(
matrix=[[2.5, -2.5], [-2.5, 2.5]], name='WORD HIDDEN TO OUTPUT')
wts_tc = pnl.MappingProjection(
matrix=[[4.0, 4.0], [0, 0]], name='COLOR NAMING')
wts_tw = pnl.MappingProjection(
matrix=[[0, 0], [4.0, 4.0]], name='WORD READING')
# build the model
model = pnl.Composition(name='STROOP model')
model.add_node(decision, required_roles=pnl.NodeRole.OUTPUT)
model.add_node(reward, required_roles=pnl.NodeRole.OUTPUT)
model.add_node(punish, required_roles=pnl.NodeRole.OUTPUT)
model.add_linear_processing_pathway([inp_clr, wts_clr_ih, hid_clr])
model.add_linear_processing_pathway([inp_wrd, wts_wrd_ih, hid_wrd])
model.add_linear_processing_pathway([hid_clr, wts_clr_ho, output])
model.add_linear_processing_pathway([hid_wrd, wts_wrd_ho, output])
model.add_linear_processing_pathway([inp_task, wts_tc, hid_clr])
model.add_linear_processing_pathway([inp_task, wts_tw, hid_wrd])
model.add_linear_processing_pathway([output, pnl.IDENTITY_MATRIX, decision]) # 3/15/20
# model.add_linear_processing_pathway([output, [[1,-1]], (decision, pnl.NodeRole.OUTPUT)]) # 3/15/20
# model.add_linear_processing_pathway([output, [[1],[-1]], decision]) # 3/15/20
model.add_nodes([reward_rate, punish_rate])
controller = pnl.OptimizationControlMechanism(agent_rep=model,
state_features=[inp_clr.input_port,
inp_wrd.input_port,
inp_task.input_port,
reward.input_port,
punish.input_port],
state_feature_function=pnl.AdaptiveIntegrator(rate=0.1),
objective_mechanism=objective_mech,
function=pnl.GridSearch(),
control_signals=[driftrate_control_signal,
threshold_control_signal])
model.add_controller(controller=controller)
# collect the node handles
nodes = [inp_clr, inp_wrd, inp_task, hid_clr, hid_wrd, output, decision, reward, punish,controller]
metadata = [integration_rate, dec_noise_std, unit_noise_std]
return model, nodes, metadata
#%%
"""
get the stroop model
"""
# turn off noise
unit_noise_std = 0
dec_noise_std = 0
# define the model
model, nodes, model_params = get_stroop_model(unit_noise_std, dec_noise_std)
# fetch the params
[integration_rate, dec_noise_std, unit_noise_std] = model_params
[inp_color, inp_word, inp_task, hid_color, hid_word, output, decision, reward, punish, controller] = nodes
#%%
model.show_graph(show_controller=True)
#%% md
## Input for the model
#%%
model.enable_controller = True
input_reward_list = [10,20,30]
input_punish_list = [10,20,30]
stim_dict_list = []
for input_reward in input_reward_list:
for input_punish in input_punish_list:
input_set = get_stimulus_set(inp_color, inp_word, inp_task, 1)
stim_dict_list.append({inp_color: input_set['color naming']['conflict'][inp_color],
inp_word: input_set['color naming']['conflict'][inp_word],
inp_task: input_set['color naming']['conflict'][inp_task],
reward: input_reward,
punish: input_punish})
# #%%
#
# stim_dict_list
#
# #%% md
## Run the model
#%%
controller.set_log_conditions('control_allocation')
# run the model
execution_id = 0
for task in stim_dict_list:
print(f'Running Reward: {task[reward]} and Punish: {task[punish]} ... ')
model.run(
inputs=task,
num_trials=1,
context=execution_id,
bin_execute='LLVMRun'
)
execution_id += 1
#%%
```
#### File: Scripts/Debug/markus_test_umemoto.py
```python
import numpy as np
import psyneulink as pnl
# here we implement a test demo as in the EVC paper example:
#in v2 we add control signals and a EVC mechanism to the model
# EVC params for Umemoto et al
import psyneulink.core.components.functions.nonstateful.transferfunctions
w_t = 0.065
w_d = 0.065
f_t = 1
f_d = 1
# EVC params for Umemoto et al
t0 = 0.2
c = 0.19
thresh = 0.21
x_0 = 0 # starting point
#wTarget = 0.065 # I think this has to do with learning and is constant over trials in Umemoto
costParam1 = 0.35
reconfCostParam1 = 5
rewardTaskA = 50
rewardTaskBToA = 0.7
# Control Parameters
signalSearchRange = np.arange(0.0, 4.1, 0.2) #like in MATLAB Umemoto[0.0:0.2:4.0]# needs to be adjusted
print(signalSearchRange)
# Stimulus Mechanisms
Target_Stim = pnl.TransferMechanism(name='Target Stimulus', function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear)
Target_Stim.set_log_conditions('value') # Log Target_Rep
Distractor_Stim = pnl.TransferMechanism(name='Distractor Stimulus', function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear)
Distractor_Stim.set_log_conditions('value') # Log Target_Rep
# Processing Mechanisms (Control)
Target_Rep = pnl.TransferMechanism(name='Target Representation',
function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear(
slope=(1.0)))#, pnl.ControlProjection(
# control_signal_params={
# pnl.ALLOCATION_SAMPLES: signalSearchRange}))))
Target_Rep.set_log_conditions('value') # Log Target_Rep
Target_Rep.loggable_items
Distractor_Rep = pnl.TransferMechanism(name='Distractor Representation',
function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear(
slope=(1.0)))#, pnl.ControlProjection(
# control_signal_params={
# pnl.ALLOCATION_SAMPLES: signalSearchRange}))))
Distractor_Rep.set_log_conditions('value') # Log Flanker_Rep
Distractor_Rep.loggable_items
# Processing Mechanism (Automatic)
Automatic_Component_Target = pnl.TransferMechanism(name='Automatic Component Target', function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear)
Automatic_Component_Target.loggable_items
Automatic_Component_Target.set_log_conditions('value')
# Markus october 25 2018: I think we need 2 automatic components
Automatic_Component_Flanker = pnl.TransferMechanism(name='Automatic Component Flanker', function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear)
Automatic_Component_Flanker.loggable_items
Automatic_Component_Flanker.set_log_conditions('value')
#
# Decision Mechanisms
Decision = pnl.DDM(function=psyneulink.core.components.functions.nonstateful.distributionfunctions.DriftDiffusionAnalytical(
# drift_rate=(0.3),
threshold=(thresh),
noise=(c),
starting_point=(x_0),
t0=t0
),name='Decision',
output_ports=[
pnl.DECISION_VARIABLE,
pnl.RESPONSE_TIME,
pnl.PROBABILITY_UPPER_THRESHOLD,
{
pnl.NAME: 'OFFSET RT',
pnl.VARIABLE: (pnl.OWNER_VALUE, 2),
pnl.FUNCTION: psyneulink.core.components.functions.nonstateful.transferfunctions.Linear(0, slope=1.0, intercept=1)
}
],) #drift_rate=(1.0),threshold=(0.2645),noise=(0.5),starting_point=(0), t0=0.15
print(Decision.execute([1]))
# Decision.set_log_conditions('DECISION_VARIABLE')
# Decision.set_log_conditions('value')
# Decision.set_log_conditions('PROBABILITY_UPPER_THRESHOLD')
Decision.set_log_conditions('InputPort-0')
# Decision.set_log_conditions('RESPONSE_TIME')
# Decision.loggable_items
# Outcome Mechanisms:
Reward = pnl.TransferMechanism(size = 1,
name='Reward')
# Processes:
TargetControlProcess = pnl.Process(
default_variable=[0],
pathway=[Target_Stim, Target_Rep, Decision],
name='Target Control Process'
)
FlankerControlProcess = pnl.Process(
default_variable=[0],
pathway=[Distractor_Stim, Distractor_Rep, Decision],
name='Flanker Control Process'
)
TargetAutomaticProcess = pnl.Process(
default_variable=[0],
pathway=[Target_Stim, Automatic_Component_Target, Decision],
name='Target Automatic Process'
)
FlankerAutomaticProcess = pnl.Process(
default_variable=[0],
pathway=[Distractor_Stim, Automatic_Component_Flanker, Decision], #
name='Flanker1 Automatic Process'
)
RewardProcess = pnl.Process(
pathway=[Reward],
name='RewardProcess'
)
# System:
mySystem = pnl.System(processes=[TargetControlProcess,
FlankerControlProcess,
TargetAutomaticProcess,
FlankerAutomaticProcess,
RewardProcess],
controller=pnl.EVCControlMechanism(
control_signals=pnl.ControlSignal(modulates=[(pnl.SLOPE, Target_Rep),
(pnl.SLOPE, Distractor_Rep)
],
function=psyneulink.core.components.functions.nonstateful.transferfunctions.Logistic,
cost_options=[pnl.CostFunctions.INTENSITY,
pnl.CostFunctions.ADJUSTMENT],
allocation_samples=signalSearchRange
)),
enable_controller=True,
monitor_for_control=[
# (None, None, np.ones((2,1))), # what the **** is this for? Markus October 25 2018
Reward,
Decision.PROBABILITY_UPPER_THRESHOLD,
('OFFSET RT', 1, -1),
],
name='EVC Markus System')
# log controller
mySystem.loggable_items
# Show characteristics of system:
mySystem.show()
# mySystem.controller.show()
# Show graph of system
mySystem.show_graph(show_control=True, show_dimensions=True)
#Markus: incongruent trial weights:
# f = np.array([1,1])
# W_inc = np.array([[1.0, 0.0],[0.0, 1.5]])
# W_con = np.array([[1.0, 0.0],[1.5, 0.0]])
# generate stimulus environment
nTrials = 3
targetFeatures = [w_t]
flankerFeatures_inc = [w_d]
reward = [100]
targetInputList = targetFeatures
flankerInputList = flankerFeatures_inc
rewardList = reward
stim_list_dict = {
Target_Stim: targetInputList,
Distractor_Stim: flankerInputList,
Reward: rewardList
}
def x():
#print(mySystem.conroller.)
# print(mySystem.controller.control_signals.values)
print("============== ")
print("decision input vale:", Decision.input_values)
print("============== ")
# print(Decision.output_ports[pnl.PROBABILITY_UPPER_THRESHOLD].value)
# print(Decision.output_ports[pnl.DECISION_VARIABLE].value)
# print(Decision.output_ports[pnl.RESPONSE_TIME].value)
# print(Target_Rep.input_values)
# print("target rep variable:", Target_Rep.input_ports[0].variable)
# print("target rep input ports:", Target_Rep.input_ports)
# print("output target stim", Target_Stim.output_values)
#
# print(Target_Rep.path_afferents)
# print("control proj sender value:", Target_Rep.mod_afferents[0].sender.value)
#
# # print(Target_Rep.path_afferents)
#
#
# print("distractor rep input: ", Distractor_Rep.input_values)
# print("my system controller: ", mySystem.controller.control_signals.values)
# print("my system controller SLOPE: ", mySystem.controller.control_signals.values)
#
# print("InputPort bla bla:", Target_Rep.input_ports[0].function.exponents)
# print("============== ")
# print("my system stuff: ", mySystem.controller.control_signals.values)
#
# print(Target_Rep.output_values)
# print(Automatic_Component_Target.output_values)
#
# print(Distractor_Rep.output_values)
# print(Automatic_Component_Flanker.output_values)
mySystem.run(num_trials=nTrials,
inputs=stim_list_dict,
call_after_trial=x)
# Flanker_Rep.log.print_entries()
# Target_Rep.log.print_entries()
from pprint import pprint
a = Decision.log.nparray_dictionary()
pprint(a)
# Target_Stim.log.print_entries()
# Distractor_Stim.log.print_entries()
# Target_Rep.log.print_entries()
# Distractor_Rep.log.print_entries()
#
Decision.log.print_entries()
# mySystem.controller.control_signals.values
```
#### File: Scripts/Models (Under Development)/build_stimuli_VZ.py
```python
def to1HOT(subfeatureNum, colValue):
i = 1
thisarr = []
while i < colValue:
thisarr.append(0)
i += 1
thisarr.append(1)
while (subfeatureNum - i) > 0:
thisarr.append(0)
i += 1
return thisarr
# initialize fields
color_stim = []
word_stim = []
color_task = []
word_task = []
reward = []
xor_dict = []
# number of feature stimuli
total_features = 8
subj_id = 1
trial_num = 0
feature_num = 1
while (trial_num < 100):
# while (trial_num <= 65):
# if trial_num == 65:
# trial_num = 0
# if feature_num == 8:
# feature_num = 1
# xor_dict.append([color_stim, word_stim, color_task, word_task, reward])
# if subj_id == 30:
# break
# else:
# subj_id += 1
# color_stim = []
# word_stim = []
# color_task = []
# word_task = []
# reward = []
# freq = []
# trial_type = []
# else:
# feature_num += 1
# continue
color_stim.append(to1HOT(total_features, 1)) # congruent stimuli, x65
word_stim.append(to1HOT(total_features, 8))
color_task.append([1.32]) # given 1.32
word_task.append([-3.22]) # given 3.22
# reward.append([1,0])
# if feature_num <= 2:
# reward.append([1,0]) # CN reward
# else:
# reward.append([0,5]) # WR reward
reward.append([5,0]) # CN reward
trial_num += 1
xor_dict.append([color_stim, word_stim, color_task, word_task, reward])
# print("new color len: ", color_stim)
# print("new word len: ", len(word_stim))
# print("new color task len: ", len(color_task))
# print("new word task len: ", len(word_task))
# print("new reward len: ", len(reward))
# print("reward: ", reward)
# print(xor_dict)
```
#### File: Scripts/Models (Under Development)/GreedyAgentInteractiveInputs.py
```python
import numpy as np
from psyneulink import *
from gym_forager.envs.forager_env import ForagerEnv
# Runtime Switches:
RENDER = True
PERCEPT_DISTORT = False
PNL_COMPILE = False
# *********************************************************************************************************************
# *********************************************** CONSTANTS ***********************************************************
# *********************************************************************************************************************
# These should probably be replaced by reference to ForagerEnv constants:
obs_len = 2
action_len = 2
player_coord_idx = slice(0, 2)
predator_coord_idx = slice(3, 5)
prey_coord_idx = slice(6, 8)
player_value_idx = 2
predator_value_idx = 5
prey_value_idx = 8
player_len = prey_len = predator_len = obs_len
# *********************************************************************************************************************
# ************************************** MECHANISMS AND COMPOSITION *************************************************
# *********************************************************************************************************************
if PERCEPT_DISTORT:
player = ProcessingMechanism(size=prey_len, function=GaussianDistort(variance=0), name="PLAYER OBS")
prey = ProcessingMechanism(size=prey_len, function=GaussianDistort(variance=0), name="PREY OBS")
else:
player = TransferMechanism(size=prey_len, name="PLAYER OBS")
prey = TransferMechanism(size=prey_len, name="PREY OBS")
# For future use:
values = TransferMechanism(size=3, name="AGENT VALUES")
reward = TransferMechanism(name="REWARD")
# Use ComparatorMechanism to compute direction of action as difference of coordinates between player and prey:
# note: unitization is done in main loop, to allow compilation of LinearCombination function) (TBI)
greedy_action_mech = ComparatorMechanism(name='MOTOR OUTPUT', sample=player, target=prey)
agent_comp = Composition(name='PREDATOR-PREY COMPOSITION')
agent_comp.add_node(player)
agent_comp.add_node(prey)
agent_comp.add_node(greedy_action_mech)
agent_comp.env = ForagerEnv() # NEW: ForagerEnv must be stored in an attribute on the Composition
def main():
# NEW: get_next_input interactively returns a new input from the ForagerEnv
# (rather than specifying a pre-determined list of input values)
def get_next_input(env, result):
action = np.where(result[0] == 0, 0, result[0] / np.abs(result[0]))
env_step = env.step(action)
observation = env_step[0]
done = env_step[2]
if not done:
# NEW: This function MUST return a dictionary of input values for a single trial for each INPUT node
return {player: [observation[player_coord_idx]],
prey: [observation[prey_coord_idx]]}
return done
if RENDER:
agent_comp.env.render()
BIN_EXECUTE = 'Python'
if PNL_COMPILE:
BIN_EXECUTE = 'LLVM'
max_steps = 100 # maximum number of steps before agent quits
num_games = 3
for i in range(3):
agent_comp.run(inputs=get_next_input, # specify the NAME of a fn in lieu of an inputs dict
num_trials=max_steps, # maximum number of steps before agent quits
bin_execute=BIN_EXECUTE)
if __name__ == "__main__":
main()
```
#### File: Scripts/Models (Under Development)/GymForagerRandomAgent.py
```python
(Under Development)/GymForagerRandomAgent.py<gh_stars>10-100
from psyneulink import *
from gym_forager.envs.forager_env import ForagerEnv
from gym_forager.envs.forager.randomexp import RandomAgent
num_trials = 4
env = ForagerEnv()
reward = 0
done = False
# Function used by PsyNeuLink Mechanism
r = RandomAgent(env.action_space)
def random_action(variable):
return r.act(variable, None, None)
# Create PsyNeuLink Composition as agent
agent_mech = ProcessingMechanism(function=random_action)
agent_comp = Composition()
agent_comp.add_node(agent_mech)
def main():
for _ in range(num_trials):
observation = env.reset()
while True:
run_results = agent_comp.run(inputs={agent_mech:observation})
action=run_results[0]
observation, reward, done, _ = env.step(action)
if done:
break
if __name__ == "__main__":
main()
```
#### File: Scripts/Models (Under Development)/N-back.py
```python
from psyneulink import *
# TODO:
# Nback::
# - separate out stim/context external inputs from those from EM into FFN
# - figure out how to specify feedback from DDM to EM:
# - figure out how to execute EM twice:
# > first, at beginning of trial, to retrieve item based on current stimulus & context
# (with prob retrieval = 1, prob storage = 0)
# > second time, at end of trial (under influence of ControlMechanism) to encode current stimulus & context
# (with prob storage = 1; prob of retrieval = 0)
# scheduler.add_condition(A, pnl.AfterNCalls(CM, 1))
# scheduler.add_condition(CM, pnl.Always())
# composition.run(...termination_conds={pnl.TimeScale.TRIAL: pnl.And(pnl.AfterNCalls(CM, 2), pnl.JustRan(CM))})
# - implement circular drift as function for an input mechanism
# - ADD PNL FEATURE: should be able to use InputPort as spec for a pathway (if there is nothing after it);
# same for OutputPort (if there is nothing before it)
#region N-BACK MODEL
def n_back_model():
# Input Mechs
stim = TransferMechanism(name='STIM', size=5)
context = TransferMechanism(name='CONTEXT', size=5)
# Feedforward Network:
stim_input_layer = TransferMechanism(name='STIM INPUT LAYER', size=5)
context_input_layer = TransferMechanism(name='CONTEXT INPUT LAYER', size=5)
match_output_layer = TransferMechanism(name='MATCH LAYER', size=1)
# ffn = AutodiffComposition(name='FFN', pathways=[[stim_input,match_output], [context_input, match_output]])
ffn = Composition(name='FFN', pathways=[[stim_input_layer, match_output_layer],
[context_input_layer, match_output_layer]])
# Episodic Memory, Decision and Control
# em = EpisodicMemoryMechanism(name='EM', content_size=5, assoc_size=5)
em = EpisodicMemoryMechanism(name='EM', size=5,
# function=DictionaryMemory(initializer=[[[0,0,0,0,0],[0,0,0,0,0]]])
)
ctl = ControlMechanism(control=(STORAGE_PROB, em))
decision = DDM(name='DECISION')
resp_decision = Pathway([match_output_layer, (decision, NodeRole.OUTPUT)])
# FIX: ENHANCE add_linear_processing_pathway TO SUPPORT InputPort at end, or OutputPort at beginning:
# stimulus_encoding = [stim, em.input_ports[KEY_INPUT]]
# context_encoding = [context, em.input_ports[VALUE_INPUT]]
# MappingProjection(sender=stim, receiver=stim_input_layer)
# MappingProjection(sender=stim, receiver=em.input_ports[KEY_INPUT])
# MappingProjection(sender=context, receiver=context_input_layer)
# MappingProjection(sender=context, receiver=em.input_ports[VALUE_INPUT])
# MappingProjection(sender=em.output_ports[KEY_OUTPUT], receiver=stim_input_layer)
# MappingProjection(sender=em.output_ports[VALUE_OUTPUT], receiver=context_input_layer)
# stim_processing = Pathway([stim, ffn])
# context_processing = Pathway([context, ffn])
# stim_encoding = Pathway([stim, em])
# context_encoding = Pathway([context, em])
# stim_retrieval = Pathway([em, stim_input_layer])
# context_retrieval = Pathway([em, context_input_layer])
# storage = Pathway([(decision, NodeRole.OUTPUT), (ctl, NodeRole.FEEDBACK_SENDER), em])
# # FIX: show_graph NOT RECOGNIZING STIM->STIM_INPUT_LAYER AND CONTEXT->CONTEXT_INPUT_LAYER
# comp = Composition(pathways=[stim_processing,
# context_processing,
# ffn,
# context_encoding,
# stim_encoding,
# resp_decision,
# stim_retrieval,
# context_retrieval,
# storage])
# FIX: show_graph NOT RECOGNIZING STIM->STIM_INPUT_LAYER AND CONTEXT->CONTEXT_INPUT_LAYER
# comp = Composition(pathways=[[stim, ffn],
# [stim,em],
# [context,ffn],
# [context,em],
# [em,ffn],
# [ffn, em],
# [ffn, decision, ctl, em]])
# comp = Composition(pathways=[ffn,
# [stim, stim_input_layer],
# [stim, MappingProjection(stim, em.input_ports[KEY_INPUT]), em],
# [context, context_input_layer],
# [context, MappingProjection(context, em.input_ports[VALUE_INPUT]), em],
# [em,stim_input_layer],
# [em,context_input_layer],
# [ffn, decision, ctl, em]])
comp = Composition()
comp.add_nodes([stim, context, ffn, em, (decision, NodeRole.OUTPUT), ctl])
comp.add_projection(MappingProjection(), stim, stim_input_layer)
comp.add_projection(MappingProjection(), context, context_input_layer)
comp.add_projection(MappingProjection(), stim, em.input_ports[KEY_INPUT])
comp.add_projection(MappingProjection(), context, em.input_ports[VALUE_INPUT])
comp.add_projection(MappingProjection(), em.output_ports[KEY_OUTPUT], stim_input_layer)
comp.add_projection(MappingProjection(), em.output_ports[VALUE_OUTPUT], context_input_layer)
comp.add_projection(MappingProjection(), match_output_layer, decision)
comp.add_projection(MappingProjection(), decision, ctl)
# comp.add_projection(MappingProjection(), decision, stim_input_layer)
# comp._analyze_graph()
comp.show_graph()
# comp.show_graph(show_cim=True,
# show_node_structure=ALL,
# show_projection_labels=True,
# show_dimensions=True)
# comp.show_graph(show_cim=True,
# show_node_structure=ALL,
# show_projection_labels=True,
# show_dimensions=True)
# comp.run(inputs={stim:[1,2,3,4,5],
# context:[6,7,8,9,10]},
# report_output=ReportOutput.ON)
# comp.run(inputs={a:2.5}, report_output=ReportOutput.FULL)
#endregion
n_back_model()
```
#### File: tests/functions/test_fhn_integrator.py
```python
import numpy as np
import psyneulink.core.llvm as pnlvm
import psyneulink.core.components.functions.stateful.integratorfunctions
import pytest
np.random.seed(0)
params = {'a_v' : np.random.rand(),
'b_v' : np.random.rand(),
'c_v' : np.random.rand(),
'd_v' : np.random.rand(),
'e_v' : np.random.rand(),
'f_v' : np.random.rand(),
'a_w' : np.random.rand(),
'b_w' : np.random.rand(),
'c_w' : np.random.rand(),
'time_constant_v' : np.random.rand(),
'time_constant_w' : np.random.rand(),
'threshold' : np.random.rand(),
'uncorrelated_activity' : np.random.rand(),
'mode' : np.random.rand(),
}
SIZE=8
test_var = np.random.rand(SIZE)
test_scalar = np.random.rand()
test_data = [
(psyneulink.core.components.functions.stateful.integratorfunctions.FitzHughNagumoIntegrator, test_var, "RK4", params, ([0.23619944, 0.24032298, 0.22321782, 0.43865125, 0.42363054,
0.44901757, 0.47938108, 0.42941189], [0.21378097, 0.21388886, 0.21344061, 0.21894107, 0.21856817,
0.21919746, 0.21994384, 0.2187119 ], 0.15000000000000002)),
(psyneulink.core.components.functions.stateful.integratorfunctions.FitzHughNagumoIntegrator, test_scalar, "RK4", params,
([0.33803257], [0.21641212], 0.15000000000000002)),
(psyneulink.core.components.functions.stateful.integratorfunctions.FitzHughNagumoIntegrator, test_var, "EULER", params, ([0.23686576, 0.24093183, 0.22404678, 0.43291206, 0.41863405,
0.44273909,
0.47139546, 0.42413492], [0.20757016, 0.2076755 , 0.20723764, 0.21257185, 0.21221299,
0.21281834, 0.21353476, 0.21235135], 0.15000000000000002)),
(psyneulink.core.components.functions.stateful.integratorfunctions.FitzHughNagumoIntegrator, test_scalar, "EULER", params, ([0.33642314], [0.21013003], 0.15000000000000002)),
]
# use list, naming function produces ugly names
names = [
"FitzHughNagumoIntegrator RK4 VECTOR",
"FitzHughNagumoIntegrator RK4 SCALAR",
"FitzHughNagumoIntegrator EULER VECTOR",
"FitzHughNagumoIntegrator EULER SCALAR",
]
@pytest.mark.function
@pytest.mark.integrator_function
@pytest.mark.fitzHughNagumo_integrator_function
@pytest.mark.benchmark(group="FitzHughNagumoIntegrator")
@pytest.mark.parametrize("func, variable, integration_method, params, expected", test_data, ids=names)
def test_basic(func, variable, integration_method, params, expected, benchmark, func_mode):
f = func(default_variable=variable, integration_method=integration_method, params=params)
EX = pytest.helpers.get_func_execution(f, func_mode)
res = EX(variable)
res = EX(variable)
res = EX(variable)
assert np.allclose(res[0], expected[0])
assert np.allclose(res[1], expected[1])
assert np.allclose(res[2], expected[2])
if benchmark.enabled:
benchmark(EX, variable)
```
#### File: tests/json/test_json.py
```python
import numpy as np
import os
import psyneulink as pnl
import pytest
# stroop stimuli
red = [1, 0]
green = [0, 1]
word = [0, 1]
color = [1, 0]
num_trials = 5
stroop_stimuli = {
'color_input': [red] * num_trials,
'word_input': [green] * num_trials,
'task_input': [color] * num_trials,
}
json_results_parametrization = [
('model_basic.py', 'comp', '{A: 1}'),
('model_nested_comp_with_scheduler.py', 'comp', '{A: 1}'),
(
'model_with_control.py',
'comp',
'{Input: [0.5, 0.123], reward: [20, 20]}'
),
(
'stroop_conflict_monitoring.py',
'Stroop_model',
str(stroop_stimuli).replace("'", '')
),
('model_backprop.py', 'comp', '{a: [1, 2, 3]}'),
]
@pytest.mark.parametrize(
'filename, composition_name, input_dict_str',
json_results_parametrization
)
def test_json_results_equivalence(
filename,
composition_name,
input_dict_str,
):
# Get python script from file and execute
filename = f'{os.path.dirname(__file__)}/{filename}'
with open(filename, 'r') as orig_file:
exec(orig_file.read())
exec(f'{composition_name}.run(inputs={input_dict_str})')
orig_results = eval(f'{composition_name}.results')
# reset random seed
pnl.core.globals.utilities.set_global_seed(0)
# Generate python script from JSON summary of composition and execute
json_summary = eval(f'{composition_name}.json_summary')
exec(pnl.generate_script_from_json(json_summary))
exec(f'{composition_name}.run(inputs={input_dict_str})')
new_results = eval(f'{composition_name}.results')
assert orig_results == new_results
@pytest.mark.parametrize(
'filename, composition_name, input_dict_str',
json_results_parametrization
)
def test_write_json_file(
filename,
composition_name,
input_dict_str,
):
# Get python script from file and execute
filename = f'{os.path.dirname(__file__)}/{filename}'
with open(filename, 'r') as orig_file:
exec(orig_file.read())
exec(f'{composition_name}.run(inputs={input_dict_str})')
orig_results = eval(f'{composition_name}.results')
# reset random seed
pnl.core.globals.utilities.set_global_seed(0)
# Save json_summary of Composition to file and read back in.
json_filename = filename.replace('.py','.json')
exec(f'pnl.write_json_file({composition_name}, json_filename)')
exec(pnl.generate_script_from_json(json_filename))
# exec(f'{composition_name}.run(inputs={input_dict_str})')
exec(f'pnl.get_compositions()[0].run(inputs={input_dict_str})')
final_results = eval(f'{composition_name}.results')
assert orig_results == final_results
@pytest.mark.parametrize(
'filename, input_dict_strs',
[
pytest.param(
'model_with_two_conjoint_comps.py',
{'comp': '{A: 1}', 'comp2': '{A: 1}'},
marks=pytest.mark.xfail
),
('model_with_two_disjoint_comps.py', {'comp': '{A: 1}', 'comp2': '{C: 1}'}),
]
)
def test_write_json_file_multiple_comps(
filename,
input_dict_strs,
):
orig_results = {}
# Get python script from file and execute
filename = f'{os.path.dirname(__file__)}/{filename}'
with open(filename, 'r') as orig_file:
exec(orig_file.read())
for composition_name in input_dict_strs:
exec(f'{composition_name}.run(inputs={input_dict_strs[composition_name]})')
orig_results[composition_name] = eval(f'{composition_name}.results')
# reset random seed
pnl.core.globals.utilities.set_global_seed(0)
# Save json_summary of Composition to file and read back in.
json_filename = filename.replace('.py', '.json')
exec(f'pnl.write_json_file([{",".join(input_dict_strs)}], json_filename)')
exec(pnl.generate_script_from_json(json_filename))
for composition_name in input_dict_strs:
exec(f'{composition_name}.run(inputs={input_dict_strs[composition_name]})')
final_results = eval(f'{composition_name}.results')
assert orig_results[composition_name] == final_results, f'{composition_name}:'
```
#### File: tests/llvm/test_llvm_lite.py
```python
from llvmlite import binding,ir
import ctypes
import pytest
try:
import pycuda
from pycuda import autoinit as pycuda_default
# Import this after pycuda since only cuda test needs numpy
import numpy as np
except:
pycuda = None
@pytest.mark.llvm
def test_llvm_lite():
# Create some useful types
double = ir.DoubleType()
fnty = ir.FunctionType(double, (double, double))
# Create an empty module...
module = ir.Module(name=__file__)
# and declare a function named "fpadd" inside it
func = ir.Function(module, fnty, name="fpadd")
# Now implement basic addition
# basic blocks are sequences of instructions that have exactly one
# entry point and one exit point (no control flow)
# We only need one in this case
# See available operations at:
# http://llvmlite.readthedocs.io/en/latest/ir/builder.html#instruction-building
block = func.append_basic_block(name="entry")
builder = ir.IRBuilder(block)
a, b = func.args
result = builder.fadd(a, b, name="res")
builder.ret(result)
# Uncomment to print the module IR. This prints LLVM IR assembly.
# print("LLVM IR:")
# print(module)
binding.initialize()
# native == currently running CPU
binding.initialize_native_target()
# TODO: This prevents 'LLVM ERROR: Target does not support MC emission!',
# but why?
binding.initialize_native_asmprinter()
# Create compilation target, use default triple
target = binding.Target.from_default_triple()
target_machine = target.create_target_machine()
# And an execution engine with an empty backing module
# TODO: why is empty backing mod necessary?
backing_mod = binding.parse_assembly("")
# There are other engines beside MCJIT
# MCJIT makes it easier to run the compiled function right away.
engine = binding.create_mcjit_compiler(backing_mod, target_machine)
# IR module is not the same as binding module.
# "assembly" in this case is LLVM IR assembly
# TODO is there a better way to convert this?
mod = binding.parse_assembly(str(module))
mod.verify()
# Now add the module and make sure it is ready for execution
engine.add_module(mod)
engine.finalize_object()
# Uncomment to print generated x86 assembly
#print("x86 assembly:")
#print(target_machine.emit_assembly(mod))
# Look up the function pointer (a Python int)
# func_ptr is now an address to a compiled function
func_ptr = engine.get_function_address("fpadd")
# Run the function via ctypes
a = 10.0
b = 3.5
cfunc = ctypes.CFUNCTYPE(ctypes.c_double, ctypes.c_double, ctypes.c_double)(func_ptr)
res = cfunc(10.0, 3.5)
assert res == (a + b)
if res != (a + b):
print("TEST FAILED! {} instead of {}".format(res, a + b))
else:
print("TEST PASSED! {} == {}".format(res, a + b))
engine.remove_module(mod)
# TODO: shutdown cleanly
# we need to do something extra before shutdown
#binding.shutdown()
@pytest.mark.llvm
@pytest.mark.cuda
@pytest.mark.skipif(pycuda is None, reason="pyCUDA module is not available")
def test_llvm_lite_ptx_pycuda():
# Create some useful types
double = ir.DoubleType()
fnty = ir.FunctionType(ir.VoidType(), (double, double, double.as_pointer()))
# Create an empty module...
module = ir.Module(name=__file__)
# and declare a function named "fpadd" inside it
func = ir.Function(module, fnty, name="fpadd")
# Now implement basic addition
# basic blocks are sequences of instructions that have exactly one
# entry point and one exit point (no control flow)
# We only need one in this case
# See available operations at:
# http://llvmlite.readthedocs.io/en/latest/ir/builder.html#instruction-building
block = func.append_basic_block(name="entry")
builder = ir.IRBuilder(block)
a, b, res = func.args
result = builder.fadd(a, b, name="res")
builder.store(result, res)
builder.ret_void()
# Add kernel mark metadata
module.add_named_metadata("nvvm.annotations",[func, "kernel", ir.IntType(32)(1)])
# Uncomment to print the module IR. This prints LLVM IR assembly.
# print("LLVM IR:\n", module)
binding.initialize()
binding.initialize_all_targets()
binding.initialize_all_asmprinters()
capability = pycuda_default.device.compute_capability()
# Create compilation target, use default triple
target = binding.Target.from_triple("nvptx64-nvidia-cuda")
target_machine = target.create_target_machine(cpu="sm_{}{}".format(capability[0], capability[1]), codemodel='small')
mod = binding.parse_assembly(str(module))
mod.verify()
ptx = target_machine.emit_assembly(mod)
# Uncomment to print generated x86 assembly
# print("PTX assembly:\n", ptx)
ptx_mod = pycuda.driver.module_from_buffer(ptx.encode())
cuda_func = ptx_mod.get_function('fpadd')
# Run the function via ctypes
a = np.float64(10.0)
b = np.float64(3.5)
res = np.empty(1, dtype=np.float64)
dev_res = pycuda.driver.Out(res)
cuda_func(a, b, dev_res, block=(1,1,1))
assert res[0] == (a + b)
```
#### File: tests/mechanisms/test_recurrent_transfer_mechanism.py
```python
import numpy as np
import pytest
import psyneulink as pnl
import psyneulink.core.llvm as pnlvm
from psyneulink.core.compositions.composition import Composition
from psyneulink.core.components.functions.nonstateful.combinationfunctions import Reduce
from psyneulink.core.components.functions.nonstateful.distributionfunctions import NormalDist
from psyneulink.core.components.functions.function import FunctionError, get_matrix
from psyneulink.core.components.functions.nonstateful.learningfunctions import Reinforcement
from psyneulink.core.components.functions.stateful.integratorfunctions import AccumulatorIntegrator
from psyneulink.core.components.functions.nonstateful.transferfunctions import Linear, Logistic
from psyneulink.core.components.mechanisms.mechanism import MechanismError
from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferError, TransferMechanism
from psyneulink.core.globals.keywords import MATRIX_KEYWORD_VALUES, RANDOM_CONNECTIVITY_MATRIX, RESULT
from psyneulink.core.globals.preferences.basepreferenceset import REPORT_OUTPUT_PREF, VERBOSE_PREF
from psyneulink.core.globals.parameters import ParameterError
from psyneulink.core.scheduling.condition import Never
from psyneulink.library.components.mechanisms.processing.transfer.recurrenttransfermechanism import \
RecurrentTransferError, RecurrentTransferMechanism
from psyneulink.library.components.projections.pathway.autoassociativeprojection import AutoAssociativeProjection
class TestMatrixSpec:
def test_recurrent_mech_matrix(self):
T = TransferMechanism(default_variable=[[0.0, 0.0, 0.0]])
recurrent_mech = RecurrentTransferMechanism(default_variable=[[0.0, 0.0, 0.0]],
matrix=[[1.0, 2.0, 3.0],
[2.0, 1.0, 2.0],
[3.0, 2.0, 1.0]])
c = Composition(pathways=[T, recurrent_mech])
results = []
def record_trial():
results.append(recurrent_mech.parameters.value.get(c))
c.run(inputs=[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]],
call_after_trial=record_trial)
assert True
def test_recurrent_mech_auto_associative_projection(self):
T = TransferMechanism(default_variable=[[0.0, 0.0, 0.0]])
recurrent_mech = RecurrentTransferMechanism(default_variable=[[0.0, 0.0, 0.0]],
matrix=AutoAssociativeProjection)
c = Composition(pathways=[T, recurrent_mech])
results = []
def record_trial():
results.append(recurrent_mech.parameters.value.get(c))
c.run(inputs=[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]],
call_after_trial=record_trial)
def test_recurrent_mech_auto_auto_hetero(self):
T = TransferMechanism(default_variable=[[0.0, 0.0, 0.0]])
recurrent_mech = RecurrentTransferMechanism(default_variable=[[0.0, 0.0, 0.0]],
auto=3.0,
hetero=-7.0)
c = Composition(pathways=[T, recurrent_mech])
results = []
def record_trial():
results.append(recurrent_mech.parameters.value.get(c))
c.run(inputs=[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]],
call_after_trial=record_trial)
class TestRecurrentTransferMechanismInputs:
def test_recurrent_mech_empty_spec(self):
R = RecurrentTransferMechanism(auto=1.0)
np.testing.assert_allclose(R.value, R.defaults.value)
np.testing.assert_allclose(R.defaults.variable, [[0]])
np.testing.assert_allclose(R.matrix.base, [[1]])
def test_recurrent_mech_check_attrs(self):
R = RecurrentTransferMechanism(
name='R',
size=3,
auto=1.0
)
print("matrix = ", R.matrix.base)
print("auto = ", R.auto)
print("hetero = ", R.hetero)
# np.testing.assert_allclose(R.value, R.defaults.value)
# np.testing.assert_allclose(R.defaults.variable, [[0., 0., 0.]])
# np.testing.assert_allclose(R.matrix.base, [[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]])
def test_recurrent_mech_check_proj_attrs(self):
R = RecurrentTransferMechanism(
name='R',
size=3
)
np.testing.assert_allclose(R.recurrent_projection.matrix.base, R.matrix.base)
assert R.recurrent_projection.sender is R.output_port
assert R.recurrent_projection.receiver is R.input_port
@pytest.mark.mechanism
@pytest.mark.recurrent_transfer_mechanism
@pytest.mark.benchmark(group="RecurrentTransferMechanism")
def test_recurrent_mech_inputs_list_of_ints(self, benchmark, mech_mode):
R = RecurrentTransferMechanism(
name='R',
default_variable=[0, 0, 0, 0]
)
EX = pytest.helpers.get_mech_execution(R, mech_mode)
val1 = EX([10, 12, 0, -1])
val2 = EX([1, 2, 3, 0])
# The outputs match inputs because recurrent projection is
# not used when executing: mech is reset each time
np.testing.assert_allclose(val1, [[10.0, 12.0, 0, -1]])
np.testing.assert_allclose(val2, [[1, 2, 3, 0]])
if benchmark.enabled:
benchmark(EX, [1, 2, 3, 0])
@pytest.mark.mechanism
@pytest.mark.recurrent_transfer_mechanism
@pytest.mark.benchmark(group="RecurrentTransferMechanism")
def test_recurrent_mech_inputs_list_of_floats(self, benchmark, mech_mode):
R = RecurrentTransferMechanism(
name='R',
size=4
)
EX = pytest.helpers.get_mech_execution(R, mech_mode)
val = benchmark(EX, [10.0, 10.0, 10.0, 10.0])
np.testing.assert_allclose(val, [[10.0, 10.0, 10.0, 10.0]])
@pytest.mark.mechanism
@pytest.mark.recurrent_transfer_mechanism
@pytest.mark.benchmark(group="RecurrentTransferMechanism")
def test_recurrent_mech_integrator(self, benchmark, mech_mode):
R = RecurrentTransferMechanism(size=2,
function=Logistic(),
hetero=-2.0,
integrator_mode=True,
integration_rate=0.01,
output_ports = [RESULT])
EX = pytest.helpers.get_mech_execution(R, mech_mode)
val1 = EX([[1.0, 2.0]])
val2 = EX([[1.0, 2.0]])
# execute 10 times
for i in range(10):
val10 = EX([[1.0, 2.0]])
assert np.allclose(val1, [[0.50249998, 0.50499983]])
assert np.allclose(val2, [[0.50497484, 0.50994869]])
assert np.allclose(val10, [[0.52837327, 0.55656439]])
if benchmark.enabled:
benchmark(EX, [[1.0, 2.0]])
@pytest.mark.mechanism
@pytest.mark.recurrent_transfer_mechanism
@pytest.mark.benchmark(group="RecurrentTransferMechanism")
def test_recurrent_mech_lci(self, benchmark, mech_mode):
LCI = pnl.LeakyCompetingIntegrator(rate=0.4)
R = RecurrentTransferMechanism(size=2,
hetero=-2.0,
integrator_mode=True,
integrator_function=LCI,
output_ports = [RESULT])
EX = pytest.helpers.get_mech_execution(R, mech_mode)
val1 = EX([[1.0, 2.0]])
val2 = EX([[1.0, 2.0]])
# execute 10 times
for i in range(10):
val10 = EX([[1.0, 2.0]])
assert np.allclose(val1, [[0.1, 0.2]])
assert np.allclose(val2, [[0.196, 0.392]])
assert np.allclose(val10, [[0.96822561, 1.93645121]])
if benchmark.enabled:
benchmark(EX, [[1.0, 2.0]])
# def test_recurrent_mech_inputs_list_of_fns(self):
# R = RecurrentTransferMechanism(
# name='R',
# size=4,
# integrator_mode=True
# )
# val = R.execute([Linear().execute(), NormalDist().execute(), Exponential().execute(), ExponentialDist().execute()])
# expected = [[np.array([0.]), 0.4001572083672233, np.array([1.]), 0.7872011523172707]]
# assert len(val) == len(expected) == 1
# assert len(val[0]) == len(expected[0])
# for i in range(len(val[0])):
# np.testing.assert_allclose(val[0][i], expected[0][i])
@pytest.mark.mechanism
@pytest.mark.recurrent_transfer_mechanism
@pytest.mark.benchmark(group="RecurrentTransferMechanism")
def test_recurrent_mech_no_inputs(self, benchmark, mech_mode):
R = RecurrentTransferMechanism(
name='R'
)
np.testing.assert_allclose(R.defaults.variable, [[0]])
EX = pytest.helpers.get_mech_execution(R, mech_mode)
val = EX([10])
np.testing.assert_allclose(val, [[10.]])
if benchmark.enabled:
benchmark(EX, [1])
def test_recurrent_mech_inputs_list_of_strings(self):
with pytest.raises(FunctionError) as error_text:
R = RecurrentTransferMechanism(
name='R',
default_variable=[0, 0, 0, 0],
integrator_mode=True
)
R.execute(["one", "two", "three", "four"])
assert "Unrecognized type" in str(error_text.value)
def test_recurrent_mech_var_list_of_strings(self):
with pytest.raises(ParameterError) as error_text:
R = RecurrentTransferMechanism(
name='R',
default_variable=['a', 'b', 'c', 'd'],
integrator_mode=True
)
assert "non-numeric entries" in str(error_text.value)
def test_recurrent_mech_inputs_mismatched_with_default_longer(self):
with pytest.raises(MechanismError) as error_text:
R = RecurrentTransferMechanism(
name='R',
size=4
)
R.execute([1, 2, 3, 4, 5])
assert "does not match required length" in str(error_text.value)
def test_recurrent_mech_inputs_mismatched_with_default_shorter(self):
with pytest.raises(MechanismError) as error_text:
R = RecurrentTransferMechanism(
name='R',
size=6
)
R.execute([1, 2, 3, 4, 5])
assert "does not match required length" in str(error_text.value)
class TestRecurrentTransferMechanismMatrix:
@pytest.mark.parametrize("matrix", MATRIX_KEYWORD_VALUES)
def test_recurrent_mech_matrix_keyword_spec(self, matrix):
if matrix == RANDOM_CONNECTIVITY_MATRIX:
pytest.skip("Random test")
R = RecurrentTransferMechanism(
name='R',
size=4,
matrix=matrix
)
val = R.execute([10, 10, 10, 10])
np.testing.assert_allclose(val, [[10., 10., 10., 10.]])
np.testing.assert_allclose(R.recurrent_projection.matrix.base, get_matrix(matrix, R.size[0], R.size[0]))
@pytest.mark.parametrize("matrix", [np.matrix('1 2; 3 4'), np.array([[1, 2], [3, 4]]), [[1, 2], [3, 4]], '1 2; 3 4'])
def test_recurrent_mech_matrix_other_spec(self, matrix):
R = RecurrentTransferMechanism(
name='R',
size=2,
matrix=matrix
)
val = R.execute([10, 10])
# np.testing.assert_allclose(val, [[10., 10.]])
# assert isinstance(R.matrix.base, np.ndarray)
# np.testing.assert_allclose(R.matrix.base, [[1, 2], [3, 4]])
# np.testing.assert_allclose(R.recurrent_projection.matrix.base, [[1, 2], [3, 4]])
# assert isinstance(R.recurrent_projection.matrix.base, np.ndarray)
def test_recurrent_mech_matrix_auto_spec(self):
R = RecurrentTransferMechanism(
name='R',
size=3,
auto=2
)
assert isinstance(R.matrix.base, np.ndarray)
np.testing.assert_allclose(R.matrix.base, [[2, 1, 1], [1, 2, 1], [1, 1, 2]])
np.testing.assert_allclose(run_twice_in_composition(R, [1, 2, 3], [10, 11, 12]), [17, 19, 21])
def test_recurrent_mech_matrix_hetero_spec(self):
R = RecurrentTransferMechanism(
name='R',
size=3,
hetero=-1
)
# (7/28/17 CW) these numbers assume that execute() leaves its value in the outputPort of the mechanism: if
# the behavior of execute() changes, feel free to change these numbers
val = R.execute([-1, -2, -3])
np.testing.assert_allclose(val, [[-1, -2, -3]])
assert isinstance(R.matrix.base, np.ndarray)
np.testing.assert_allclose(R.matrix.base, [[0, -1, -1], [-1, 0, -1], [-1, -1, 0]])
# Execution 1:
# Recurrent input = [5, 4, 3] | New input = [1, 2, 3] | Total input = [6, 6, 6]
# Output 1 = [6, 6, 6]
# Execution 2:
# Recurrent input =[-12, -12, -12] | New input = [10, 11, 12] | Total input = [-2, -1, 0]
# Output 2 = [-2, -1, 0]
np.testing.assert_allclose(run_twice_in_composition(R, [1, 2, 3], [10, 11, 12]), [-2., -1., 0.])
def test_recurrent_mech_matrix_auto_hetero_spec_size_1(self):
R = RecurrentTransferMechanism(
name='R',
size=1,
auto=-2,
hetero=4.4
)
val = R.execute([10])
np.testing.assert_allclose(val, [[10.]])
assert isinstance(R.matrix.base, np.ndarray)
np.testing.assert_allclose(R.matrix.base, [[-2]])
def test_recurrent_mech_matrix_auto_hetero_spec_size_4(self):
R = RecurrentTransferMechanism(
name='R',
size=4,
auto=2.2,
hetero=-3
)
val = R.execute([10, 10, 10, 10])
np.testing.assert_allclose(val, [[10., 10., 10., 10.]])
np.testing.assert_allclose(R.matrix.base, [[2.2, -3, -3, -3], [-3, 2.2, -3, -3], [-3, -3, 2.2, -3], [-3, -3, -3, 2.2]])
assert isinstance(R.matrix.base, np.ndarray)
def test_recurrent_mech_matrix_auto_hetero_matrix_spec(self):
# when auto, hetero, and matrix are all specified, auto and hetero should take precedence
R = RecurrentTransferMechanism(
name='R',
size=4,
auto=2.2,
hetero=-3,
matrix=[[1, 2, 3, 4]] * 4
)
val = R.execute([10, 10, 10, 10])
np.testing.assert_allclose(val, [[10., 10., 10., 10.]])
np.testing.assert_allclose(R.matrix.base, [[2.2, -3, -3, -3], [-3, 2.2, -3, -3], [-3, -3, 2.2, -3], [-3, -3, -3, 2.2]])
assert isinstance(R.matrix.base, np.ndarray)
def test_recurrent_mech_auto_matrix_spec(self):
# auto should override the diagonal only
R = RecurrentTransferMechanism(
name='R',
size=4,
auto=2.2,
matrix=[[1, 2, 3, 4]] * 4
)
val = R.execute([10, 11, 12, 13])
np.testing.assert_allclose(val, [[10., 11., 12., 13.]])
np.testing.assert_allclose(R.matrix.base, [[2.2, 2, 3, 4], [1, 2.2, 3, 4], [1, 2, 2.2, 4], [1, 2, 3, 2.2]])
def test_recurrent_mech_auto_array_matrix_spec(self):
R = RecurrentTransferMechanism(
name='R',
size=4,
auto=[1.1, 2.2, 3.3, 4.4],
matrix=[[1, 2, 3, 4]] * 4
)
val = R.execute([10, 11, 12, 13])
np.testing.assert_allclose(val, [[10., 11., 12., 13.]])
np.testing.assert_allclose(R.matrix.base, [[1.1, 2, 3, 4], [1, 2.2, 3, 4], [1, 2, 3.3, 4], [1, 2, 3, 4.4]])
def test_recurrent_mech_hetero_float_matrix_spec(self):
# hetero should override off-diagonal only
R = RecurrentTransferMechanism(
name='R',
size=4,
hetero=-2.2,
matrix=[[1, 2, 3, 4]] * 4
)
val = R.execute([1, 2, 3, 4])
np.testing.assert_allclose(val, [[1., 2., 3., 4.]])
np.testing.assert_allclose(
R.matrix.base,
[[1, -2.2, -2.2, -2.2], [-2.2, 2, -2.2, -2.2], [-2.2, -2.2, 3, -2.2], [-2.2, -2.2, -2.2, 4]]
)
def test_recurrent_mech_hetero_matrix_matrix_spec(self):
R = RecurrentTransferMechanism(
name='R',
size=4,
hetero=np.array([[-4, -3, -2, -1]] * 4),
matrix=[[1, 2, 3, 4]] * 4
)
val = R.execute([1, 2, 3, 4])
np.testing.assert_allclose(val, [[1., 2., 3., 4.]])
np.testing.assert_allclose(
R.matrix.base,
[[1, -3, -2, -1], [-4, 2, -2, -1], [-4, -3, 3, -1], [-4, -3, -2, 4]]
)
def test_recurrent_mech_auto_hetero_matrix_spec_v1(self):
# auto and hetero should override matrix
R = RecurrentTransferMechanism(
name='R',
size=4,
auto=[1, 3, 5, 7],
hetero=np.array([[-4, -3, -2, -1]] * 4),
matrix=[[1, 2, 3, 4]] * 4
)
val = R.execute([1, 2, 3, 4])
np.testing.assert_allclose(val, [[1., 2., 3., 4.]])
np.testing.assert_allclose(
R.matrix.base,
[[1, -3, -2, -1], [-4, 3, -2, -1], [-4, -3, 5, -1], [-4, -3, -2, 7]]
)
def test_recurrent_mech_auto_hetero_matrix_spec_v2(self):
R = RecurrentTransferMechanism(
name='R',
size=4,
auto=[3],
hetero=np.array([[-4, -3, -2, -1]] * 4),
matrix=[[1, 2, 3, 4]] * 4
)
val = R.execute([1, 2, 3, 4])
np.testing.assert_allclose(val, [[1., 2., 3., 4.]])
np.testing.assert_allclose(
R.matrix.base,
[[3, -3, -2, -1], [-4, 3, -2, -1], [-4, -3, 3, -1], [-4, -3, -2, 3]]
)
def test_recurrent_mech_auto_hetero_matrix_spec_v3(self):
R = RecurrentTransferMechanism(
name='R',
size=4,
auto=[3],
hetero=2,
matrix=[[1, 2, 3, 4]] * 4
)
val = R.execute([1, 2, 3, 4])
np.testing.assert_allclose(val, [[1., 2., 3., 4.]])
np.testing.assert_allclose(
R.matrix.base,
[[3, 2, 2, 2], [2, 3, 2, 2], [2, 2, 3, 2], [2, 2, 2, 3]]
)
def test_recurrent_mech_matrix_too_large(self):
with pytest.raises(RecurrentTransferError) as error_text:
R = RecurrentTransferMechanism(
name='R',
size=3,
matrix=[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]
)
assert "must be the same as its variable" in str(error_text.value)
def test_recurrent_mech_matrix_too_small(self):
with pytest.raises(RecurrentTransferError) as error_text:
R = RecurrentTransferMechanism(
name='R',
size=5,
matrix=[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]
)
assert "must be the same as its variable" in str(error_text.value)
def test_recurrent_mech_matrix_strings(self):
with pytest.raises(RecurrentTransferError) as error_text:
R = RecurrentTransferMechanism(
name='R',
size=4,
matrix=[['a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd']]
)
assert "has non-numeric entries" in str(error_text.value)
def test_recurrent_mech_matrix_nonsquare(self):
with pytest.raises(RecurrentTransferError) as error_text:
R = RecurrentTransferMechanism(
name='R',
size=4,
matrix=[[1, 3]]
)
assert "must be square" in str(error_text.value)
def test_recurrent_mech_matrix_3d(self):
with pytest.raises(FunctionError) as error_text:
R = RecurrentTransferMechanism(
name='R',
size=2,
matrix=[[[1, 3], [2, 4]], [[5, 7], [6, 8]]]
)
assert "more than 2d" in str(error_text.value)
class TestRecurrentTransferMechanismFunction:
def test_recurrent_mech_function_logistic(self):
R = RecurrentTransferMechanism(
name='R',
size=10,
function=Logistic(gain=2, offset=1)
)
val = R.execute(np.ones(10))
np.testing.assert_allclose(val, [np.full(10, 0.7310585786300049)])
def test_recurrent_mech_function_psyneulink(self):
a = Logistic(gain=2, offset=1)
R = RecurrentTransferMechanism(
name='R',
size=7,
function=a
)
val = R.execute(np.zeros(7))
np.testing.assert_allclose(val, [np.full(7, 0.2689414213699951)])
def test_recurrent_mech_function_custom(self):
# I don't know how to do this at the moment but it seems highly important.
pass
def test_recurrent_mech_normal_fun(self):
with pytest.raises(TransferError) as error_text:
R = RecurrentTransferMechanism(
name='R',
default_variable=[0, 0, 0, 0],
function=NormalDist(),
integration_rate=1.0,
integrator_mode=True
)
R.execute([0, 0, 0, 0])
assert "must be a TRANSFER FUNCTION TYPE" in str(error_text.value)
def test_recurrent_mech_reinforcement_fun(self):
with pytest.raises(TransferError) as error_text:
R = RecurrentTransferMechanism(
name='R',
default_variable=[0, 0, 0, 0],
function=Reinforcement(),
integration_rate=1.0,
integrator_mode=True
)
R.execute([0, 0, 0, 0])
assert "must be a TRANSFER FUNCTION TYPE" in str(error_text.value)
def test_recurrent_mech_integrator_fun(self):
with pytest.raises(TransferError) as error_text:
R = RecurrentTransferMechanism(
name='R',
default_variable=[0, 0, 0, 0],
function=AccumulatorIntegrator(),
integration_rate=1.0,
integrator_mode=True
)
R.execute([0, 0, 0, 0])
assert "must be a TRANSFER FUNCTION TYPE" in str(error_text.value)
def test_recurrent_mech_reduce_fun(self):
with pytest.raises(TransferError) as error_text:
R = RecurrentTransferMechanism(
name='R',
default_variable=[0, 0, 0, 0],
function=Reduce(),
integration_rate=1.0,
integrator_mode=True
)
R.execute([0, 0, 0, 0])
assert "must be a TRANSFER FUNCTION TYPE" in str(error_text.value)
class TestRecurrentTransferMechanismTimeConstant:
def test_recurrent_mech_integration_rate_0_8(self):
R = RecurrentTransferMechanism(
name='R',
default_variable=[0, 0, 0, 0],
function=Linear(),
integration_rate=0.8,
integrator_mode=True
)
val = R.execute([1, 1, 1, 1])
np.testing.assert_allclose(val, [[0.8, 0.8, 0.8, 0.8]])
val = R.execute([1, 1, 1, 1])
np.testing.assert_allclose(val, [[.96, .96, .96, .96]])
def test_recurrent_mech_integration_rate_0_8_initial_0_5(self):
R = RecurrentTransferMechanism(
name='R',
default_variable=[0, 0, 0, 0],
function=Linear(),
integration_rate=0.8,
initial_value=np.array([[0.5, 0.5, 0.5, 0.5]]),
integrator_mode=True
)
val = R.execute([1, 1, 1, 1])
np.testing.assert_allclose(val, [[0.9, 0.9, 0.9, 0.9]])
val = R.execute([1, 2, 3, 4])
np.testing.assert_allclose(val, [[.98, 1.78, 2.5800000000000005, 3.3800000000000003]]) # due to inevitable floating point errors
def test_recurrent_mech_integration_rate_0_8_initial_1_8(self):
R = RecurrentTransferMechanism(
name='R',
default_variable=[0, 0, 0, 0],
function=Linear(),
integration_rate=0.8,
initial_value=np.array([[1.8, 1.8, 1.8, 1.8]]),
integrator_mode=True
)
val = R.execute([1, 1, 1, 1])
np.testing.assert_allclose(val, [[1.16, 1.16, 1.16, 1.16]])
val = R.execute([2, 2, 2, 2])
np.testing.assert_allclose(val, [[1.832, 1.832, 1.832, 1.832]])
val = R.execute([-4, -3, 0, 1])
np.testing.assert_allclose(val, [[-2.8336, -2.0336000000000003, .36639999999999995, 1.1663999999999999]])
def test_recurrent_mech_integration_rate_0_8_initial_1_2(self):
R = RecurrentTransferMechanism(
name='R',
default_variable=[0, 0, 0, 0],
function=Linear(),
integration_rate=0.8,
initial_value=np.array([[-1, 1, -2, 2]]),
integrator_mode=True
)
val = R.execute([3, 2, 1, 0])
np.testing.assert_allclose(val, [[2.2, 1.8, .40000000000000013, .3999999999999999]])
# (7/28/17 CW): the below are used because it's good to test Composition anyways, and because the recurrent Projection
# won't get executed if we only use the execute() method of Mechanism: thus, to test it we must use a Composition
def run_twice_in_composition(mech, input1, input2=None):
if input2 is None:
input2 = input1
c = Composition(pathways=[mech])
c.run(inputs={mech:[input1]})
result = c.run(inputs={mech:[input2]})
return result[0]
class TestRecurrentTransferMechanismInProcess:
simple_prefs = {REPORT_OUTPUT_PREF: False, VERBOSE_PREF: False}
def test_recurrent_mech_transfer_mech_process_three_runs(self):
# this test ASSUMES that the ParameterPort for auto and hetero is updated one run-cycle AFTER they are set by
# lines by `R.auto = 0`. If this (potentially buggy) behavior is changed, then change these values
R = RecurrentTransferMechanism(
size=4,
auto=0,
hetero=-1
)
T = TransferMechanism(
size=3,
function=Linear
)
c = Composition(pathways=[R, T], prefs=TestRecurrentTransferMechanismInComposition.simple_prefs)
c.run(inputs={R: [[1, 2, 3, 4]]})
np.testing.assert_allclose(R.parameters.value.get(c), [[1., 2., 3., 4.]])
np.testing.assert_allclose(T.parameters.value.get(c), [[10., 10., 10.]])
c.run(inputs={R: [[5, 6, 7, 8]]})
np.testing.assert_allclose(R.parameters.value.get(c), [[-4, -2, 0, 2]])
np.testing.assert_allclose(T.parameters.value.get(c), [[-4, -4, -4]])
c.run(inputs={R: [[-1, 2, -2, 5.5]]})
np.testing.assert_allclose(R.parameters.value.get(c), [[-1.0, 4.0, 2.0, 11.5]])
np.testing.assert_allclose(T.parameters.value.get(c), [[16.5, 16.5, 16.5]])
def test_transfer_mech_process_matrix_change(self):
from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection
T1 = TransferMechanism(
size=4,
function=Linear)
proj = MappingProjection(matrix=[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]])
T2 = TransferMechanism(
size=4,
function=Linear)
c = Composition(pathways=[[T1, proj, T2]])
c.run(inputs={T1: [[1, 2, 3, 4]]})
proj.matrix.base = [[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]]
assert np.allclose(proj.matrix.base, [[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]])
# c.run(inputs={T1: [[1, 2, 3, 4]]})
T1.execute([[1, 2, 3, 4]])
proj.execute()
# removed this assert, because before the changes of most_recent_execution_id -> most_recent_context
# proj.matrix.base referred to the 'Process-0' execution_id, even though it was last executed with None
# assert np.allclose(proj.matrix.base, np.array([[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]]))
def test_recurrent_mech_process_matrix_change(self):
R = RecurrentTransferMechanism(
size=4,
auto=1,
hetero=-1)
T = TransferMechanism(
size=4,
function=Linear)
c = Composition(pathways=[T, R], prefs=TestRecurrentTransferMechanismInComposition.simple_prefs)
R.matrix = [[2, 0, 1, 3]] * 4
c.run(inputs={T: [[1, 2, 3, 4]]})
np.testing.assert_allclose(T.parameters.value.get(c), [[1, 2, 3, 4]])
np.testing.assert_allclose(R.parameters.value.get(c), [[1, 2, 3, 4]])
c.run(inputs={T: [[1, 3, 2, 5]]})
np.testing.assert_allclose(R.recurrent_projection.matrix.base, [[2, 0, 1, 3]] * 4)
np.testing.assert_allclose(T.parameters.value.get(c), [[1, 3, 2, 5]])
np.testing.assert_allclose(R.parameters.value.get(c), [[21, 3, 12, 35]])
# this test must wait until we create a property such that R.recurrent_projection.matrix sets R.auto and R.hetero
def test_recurrent_mech_process_proj_matrix_change(self):
R = RecurrentTransferMechanism(
size=4,
auto=1,
hetero=-1)
T = TransferMechanism(
size=4,
function=Linear)
c = Composition(pathways=[T, R], prefs=TestRecurrentTransferMechanismInComposition.simple_prefs)
R.recurrent_projection.matrix = [[2, 0, 1, 3]] * 4
c.run(inputs={T: [[1, 2, 3, 4]]})
np.testing.assert_allclose(T.parameters.value.get(c), [[1, 2, 3, 4]])
np.testing.assert_allclose(R.parameters.value.get(c), [[1, 2, 3, 4]])
c.run(inputs={T: [[1, 3, 2, 5]]})
np.testing.assert_allclose(R.recurrent_projection.matrix.base, [[2, 0, 1, 3]] * 4)
np.testing.assert_allclose(T.parameters.value.get(c), [[1, 3, 2, 5]])
np.testing.assert_allclose(R.parameters.value.get(c), [[21, 3, 12, 35]])
class TestRecurrentTransferMechanismInComposition:
simple_prefs = {REPORT_OUTPUT_PREF: False, VERBOSE_PREF: False}
def test_recurrent_mech_transfer_mech_composition_three_runs(self):
# this test ASSUMES that the ParameterPort for auto and hetero is updated one run-cycle AFTER they are set by
# lines by `R.auto = 0`. If this (potentially buggy) behavior is changed, then change these values
R = RecurrentTransferMechanism(
size=4,
auto=0,
hetero=-1)
T = TransferMechanism(
size=3,
function=Linear)
c = Composition(pathways=[R,T])
c.run(inputs={R: [[1, 2, 3, 4]]})
np.testing.assert_allclose(R.parameters.value.get(c), [[1., 2., 3., 4.]])
np.testing.assert_allclose(T.parameters.value.get(c), [[10., 10., 10.]])
c.run(inputs={R: [[5, 6, 7, 8]]})
np.testing.assert_allclose(R.parameters.value.get(c), [[-4, -2, 0, 2]])
np.testing.assert_allclose(T.parameters.value.get(c), [[-4, -4, -4]])
c.run(inputs={R: [[-1, 2, -2, 5.5]]})
np.testing.assert_allclose(R.parameters.value.get(c), [[-1.0, 4.0, 2.0, 11.5]])
np.testing.assert_allclose(T.parameters.value.get(c), [[16.5, 16.5, 16.5]])
@pytest.mark.xfail(reason='Unsure if this is correct behavior - see note for _recurrent_transfer_mechanism_matrix_setter')
def test_recurrent_mech_composition_auto_change(self):
R = RecurrentTransferMechanism(
size=4,
auto=[1, 2, 3, 4],
hetero=-1)
T = TransferMechanism(
size=3,
function=Linear)
c = Composition(pathways=[R, T], prefs=TestRecurrentTransferMechanismInComposition.simple_prefs)
c.run(inputs={R: [[1, 2, 3, 4]]})
np.testing.assert_allclose(R.parameters.value.get(c), [[1., 2., 3., 4.]])
np.testing.assert_allclose(T.parameters.value.get(c), [[10., 10., 10.]])
R.parameters.auto.set(0, c)
c.run(inputs={R: [[5, 6, 7, 8]]})
np.testing.assert_allclose(R.parameters.value.get(c), [[-4, -2, 0, 2]])
np.testing.assert_allclose(T.parameters.value.get(c), [[-4, -4, -4]])
R.recurrent_projection.parameters.auto.set([1, 1, 2, 4], c)
c.run(inputs={R: [[12, 11, 10, 9]]})
np.testing.assert_allclose(R.parameters.value.get(c), [[8, 11, 14, 23]])
np.testing.assert_allclose(T.parameters.value.get(c), [[56, 56, 56]])
@pytest.mark.xfail(reason='Unsure if this is correct behavior - see note for _recurrent_transfer_mechanism_matrix_setter')
def test_recurrent_mech_composition_hetero_change(self):
R = RecurrentTransferMechanism(
size=4,
auto=[1, 2, 3, 4],
hetero=[[-1, -2, -3, -4]] * 4)
T = TransferMechanism(
size=5,
function=Linear)
c = Composition(pathways=[R, T], prefs=TestRecurrentTransferMechanismInComposition.simple_prefs)
c.run(inputs={R: [[1, 2, 3, -0.5]]})
np.testing.assert_allclose(R.parameters.value.get(c), [[1., 2., 3., -0.5]])
np.testing.assert_allclose(T.parameters.value.get(c), [[5.5, 5.5, 5.5, 5.5, 5.5]])
R.parameters.hetero.set(0, c)
c.run(inputs={R: [[-1.5, 0, 1, 2]]})
np.testing.assert_allclose(R.parameters.value.get(c), [[-.5, 4, 10, 0]])
np.testing.assert_allclose(T.parameters.value.get(c), [[13.5, 13.5, 13.5, 13.5, 13.5]])
R.parameters.hetero.set(np.array([[-1, 2, 3, 1.5]] * 4), c)
c.run(inputs={R: [[12, 11, 10, 9]]})
np.testing.assert_allclose(R.parameters.value.get(c), [[-2.5, 38, 50.5, 29.25]])
np.testing.assert_allclose(T.parameters.value.get(c), [[115.25, 115.25, 115.25, 115.25, 115.25]])
@pytest.mark.xfail(reason='Unsure if this is correct behavior - see note for _recurrent_transfer_mechanism_matrix_setter')
def test_recurrent_mech_composition_auto_and_hetero_change(self):
R = RecurrentTransferMechanism(
size=4,
auto=[1, 2, 3, 4],
hetero=[[-1, -2, -3, -4]] * 4)
T = TransferMechanism(
size=5,
function=Linear)
c = Composition(pathways=[R,T], prefs=TestRecurrentTransferMechanismInComposition.simple_prefs)
c.run(inputs={R: [[1, 2, 3, -0.5]]})
np.testing.assert_allclose(R.parameters.value.get(c), [[1., 2., 3., -0.5]])
np.testing.assert_allclose(T.parameters.value.get(c), [[5.5, 5.5, 5.5, 5.5, 5.5]])
R.parameters.hetero.set(0, c)
c.run(inputs={R: [[-1.5, 0, 1, 2]]})
np.testing.assert_allclose(R.parameters.value.get(c), [[-.5, 4, 10, 0]])
np.testing.assert_allclose(T.parameters.value.get(c), [[13.5, 13.5, 13.5, 13.5, 13.5]])
R.parameters.auto.set([0, 0, 0, 0], c)
c.run(inputs={R: [[12, 11, 10, 9]]})
np.testing.assert_allclose(R.parameters.value.get(c), [[12, 11, 10, 9]])
np.testing.assert_allclose(T.parameters.value.get(c), [[42, 42, 42, 42, 42]])
@pytest.mark.xfail(reason='Unsure if this is correct behavior - see note for _recurrent_transfer_mechanism_matrix_setter')
def test_recurrent_mech_composition_matrix_change(self):
R = RecurrentTransferMechanism(
size=4,
auto=1,
hetero=-1)
T = TransferMechanism(
size=4,
function=Linear)
c = Composition(pathways=[T, R], prefs=TestRecurrentTransferMechanismInComposition.simple_prefs)
R.parameters.matrix.set([[2, 0, 1, 3]] * 4, c)
c.run(inputs={T: [[1, 2, 3, 4]]})
np.testing.assert_allclose(T.parameters.value.get(c), [[1, 2, 3, 4]])
np.testing.assert_allclose(R.parameters.value.get(c), [[1, 2, 3, 4]])
c.run(inputs={T: [[1, 3, 2, 5]]})
np.testing.assert_allclose(R.recurrent_projection.parameters.matrix.get(c), [[2, 0, 1, 3]] * 4)
np.testing.assert_allclose(T.parameters.value.get(c), [[1, 3, 2, 5]])
np.testing.assert_allclose(R.parameters.value.get(c), [[21, 3, 12, 35]])
def test_recurrent_mech_with_learning(self):
R = RecurrentTransferMechanism(size=4,
function=Linear,
matrix=np.full((4, 4), 0.1),
enable_learning=True
)
# Test that all of these are the same:
np.testing.assert_allclose(
R.recurrent_projection.mod_matrix,
[
[0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1]
]
)
np.testing.assert_allclose(R.recurrent_projection.matrix.base, R.matrix.base)
np.testing.assert_allclose(R.input_port.path_afferents[0].matrix.base, R.matrix.base)
# Test that activity is properly computed prior to learning
# p = Process(pathway=[R])
c = Composition(pathways=[R])
R.learning_enabled = False
c.learn(inputs={R:[1, 1, 0, 0]})
c.learn(inputs={R:[1, 1, 0, 0]})
np.testing.assert_allclose(R.parameters.value.get(c), [[1.2, 1.2, 0.2, 0.2]])
# Test that activity and weight changes are properly computed with learning
R.learning_enabled = True
c.learn(inputs={R:[1, 1, 0, 0]})
np.testing.assert_allclose(R.parameters.value.get(c), [[1.28, 1.28, 0.28, 0.28]])
np.testing.assert_allclose(
R.recurrent_projection.get_mod_matrix(c),
[
[0.1, 0.18192000000000003, 0.11792000000000001, 0.11792000000000001],
[0.18192000000000003, 0.1, 0.11792000000000001, 0.11792000000000001],
[0.11792000000000001, 0.11792000000000001, 0.1, 0.10392000000000001],
[0.11792000000000001, 0.11792000000000001, 0.10392000000000001, 0.1]
]
)
c.learn(inputs={R:[1, 1, 0, 0]})
np.testing.assert_allclose(R.parameters.value.get(c), [[1.4268928, 1.4268928, 0.3589728, 0.3589728]])
np.testing.assert_allclose(
R.recurrent_projection.get_mod_matrix(c),
[
[0.1, 0.28372115, 0.14353079, 0.14353079],
[0.28372115, 0.1, 0.14353079, 0.14353079],
[0.14353079, 0.14353079, 0.1, 0.11036307],
[0.14353079, 0.14353079, 0.11036307, 0.1]
]
)
def test_recurrent_mech_change_learning_rate(self):
R = RecurrentTransferMechanism(size=4,
function=Linear,
enable_learning=True,
learning_rate=0.1
)
c = Composition(pathways=[R])
assert R.learning_rate.base == 0.1
assert R.learning_mechanism.learning_rate.base == 0.1
# assert R.learning_mechanism.function.learning_rate.base == 0.1
c.learn(inputs={R:[[1.0, 1.0, 1.0, 1.0]]})
matrix_1 = [[0., 1.1, 1.1, 1.1],
[1.1, 0., 1.1, 1.1],
[1.1, 1.1, 0., 1.1],
[1.1, 1.1, 1.1, 0.]]
assert np.allclose(R.recurrent_projection.mod_matrix, matrix_1)
print(R.recurrent_projection.mod_matrix)
R.learning_rate.base = 0.9
assert R.learning_rate.base == 0.9
assert R.learning_mechanism.learning_rate.base == 0.9
# assert R.learning_mechanism.function.learning_rate.base == 0.9
c.learn(inputs={R:[[1.0, 1.0, 1.0, 1.0]]})
matrix_2 = [[0., 1.911125, 1.911125, 1.911125],
[1.911125, 0., 1.911125, 1.911125],
[1.911125, 1.911125, 0., 1.911125],
[1.911125, 1.911125, 1.911125, 0.]]
# assert np.allclose(R.recurrent_projection.mod_matrix, matrix_2)
print(R.recurrent_projection.mod_matrix)
def test_learning_of_orthognal_inputs(self):
size=4
R = RecurrentTransferMechanism(
size=size,
function=Linear,
enable_learning=True,
auto=0,
hetero=np.full((size,size),0.0)
)
C=Composition(pathways=[R])
inputs_dict = {R:[1,0,1,0]}
C.learn(num_trials=4,
inputs=inputs_dict)
np.testing.assert_allclose(
R.recurrent_projection.get_mod_matrix(C),
[
[0.0, 0.0, 0.23700501, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.23700501, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0]
]
)
np.testing.assert_allclose(R.output_port.parameters.value.get(C), [1.18518086, 0.0, 1.18518086, 0.0])
# Reset state so learning of new pattern is "uncontaminated" by activity from previous one
R.output_port.parameters.value.set([0, 0, 0, 0], C, override=True)
inputs_dict = {R:[0,1,0,1]}
C.learn(num_trials=4,
inputs=inputs_dict)
np.testing.assert_allclose(
R.recurrent_projection.get_mod_matrix(C),
[
[0.0, 0.0, 0.23700501, 0.0 ],
[0.0, 0.0, 0.0, 0.23700501],
[0.23700501, 0.0, 0.0, 0. ],
[0.0, 0.23700501, 0.0, 0. ]
]
)
np.testing.assert_allclose(R.output_port.parameters.value.get(C),[0.0, 1.18518086, 0.0, 1.18518086])
class TestRecurrentTransferMechanismReset:
def test_reset_run(self):
R = RecurrentTransferMechanism(name="R",
initial_value=0.5,
integrator_mode=True,
integration_rate=0.1,
auto=1.0,
noise=0.0)
R.reset_stateful_function_when = Never()
C = Composition(pathways=[R])
assert np.allclose(R.integrator_function.previous_value, 0.5)
# S.run(inputs={R: 1.0},
# num_trials=2,
# initialize=True,
# initial_values={R: 0.0})
C.run(inputs={R: 1.0},
num_trials=2,
initialize_cycle_values={R: [0.0]}
)
# Trial 1 | variable = 1.0 + 0.0
# integration: 0.9*0.5 + 0.1*1.0 + 0.0 = 0.55 ---> previous value = 0.55
# linear fn: 0.55*1.0 = 0.55
# Trial 2 | variable = 1.0 + 0.55
# integration: 0.9*0.55 + 0.1*1.55 + 0.0 = 0.65 ---> previous value = 0.65
# linear fn: 0.65*1.0 = 0.65
assert np.allclose(R.integrator_function.parameters.previous_value.get(C), 0.65)
R.integrator_function.reset(0.9, context=C)
assert np.allclose(R.integrator_function.parameters.previous_value.get(C), 0.9)
assert np.allclose(R.parameters.value.get(C), 0.65)
R.reset(0.5, context=C)
assert np.allclose(R.integrator_function.parameters.previous_value.get(C), 0.5)
assert np.allclose(R.parameters.value.get(C), 0.5)
C.run(inputs={R: 1.0}, num_trials=2)
# Trial 3
# integration: 0.9*0.5 + 0.1*1.5 + 0.0 = 0.6 ---> previous value = 0.6
# linear fn: 0.6*1.0 = 0.6
# Trial 4
# integration: 0.9*0.6 + 0.1*1.6 + 0.0 = 0.7 ---> previous value = 0.7
# linear fn: 0.7*1.0 = 0.7
assert np.allclose(R.integrator_function.parameters.previous_value.get(C), 0.7)
class TestClip:
def test_clip_float(self):
R = RecurrentTransferMechanism(clip=[-2.0, 2.0])
assert np.allclose(R.execute(3.0), 2.0)
assert np.allclose(R.execute(-3.0), -2.0)
def test_clip_array(self):
R = RecurrentTransferMechanism(default_variable=[[0.0, 0.0, 0.0]],
clip=[-2.0, 2.0])
assert np.allclose(R.execute([3.0, 0.0, -3.0]), [2.0, 0.0, -2.0])
def test_clip_2d_array(self):
R = RecurrentTransferMechanism(default_variable=[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
clip=[-2.0, 2.0])
assert np.allclose(R.execute([[-5.0, -1.0, 5.0], [5.0, -5.0, 1.0], [1.0, 5.0, 5.0]]),
[[-2.0, -1.0, 2.0], [2.0, -2.0, 1.0], [1.0, 2.0, 2.0]])
class TestRecurrentInputPort:
def test_ris_simple(self):
R2 = RecurrentTransferMechanism(default_variable=[[0.0, 0.0, 0.0]],
matrix=[[1.0, 2.0, 3.0],
[2.0, 1.0, 2.0],
[3.0, 2.0, 1.0]],
has_recurrent_input_port=True)
R2.execute(input=[1, 3, 2])
c = Composition(pathways=[R2])
c.run(inputs=[[1, 3, 2]])
np.testing.assert_allclose(R2.parameters.value.get(c), [[14., 12., 13.]])
assert len(R2.input_ports) == 2
assert "Recurrent Input Port" not in R2.input_port.name # make sure recurrent InputPort isn't primary
class TestCustomCombinationFunction:
def test_rt_without_custom_comb_fct(self):
R1 = RecurrentTransferMechanism(
has_recurrent_input_port=True,
size=2,
)
result = R1.execute([1,2])
np.testing.assert_allclose(result, [[1,2]])
def test_rt_with_custom_comb_fct(self):
def my_fct(x):
return x[0] * x[1] if len(x) == 2 else x[0]
R2 = RecurrentTransferMechanism(
has_recurrent_input_port=True,
size=2,
combination_function=my_fct
)
result = R2.execute([1,2])
np.testing.assert_allclose(result, [[0,0]])
@pytest.mark.mechanism
@pytest.mark.integrator_mechanism
@pytest.mark.parametrize('cond0, cond1, expected', [
(pnl.Never(), pnl.AtTrial(2),
[[np.array([0.5]), np.array([0.5])],
[np.array([0.75]), np.array([0.75])],
[np.array([0.875]), np.array([0.5])], # I2 resets at Trial 2
[np.array([0.9375]), np.array([0.75])],
[np.array([0.96875]), np.array([0.875])],
[np.array([0.984375]), np.array([0.9375])],
[np.array([0.9921875]), np.array([0.96875])]]),
(pnl.Never(), pnl.AtTrialStart(),
[[np.array([0.5]), np.array([0.5])],
[np.array([0.75]), np.array([0.5])],
[np.array([0.875]), np.array([0.5])],
[np.array([0.9375]), np.array([0.5])],
[np.array([0.96875]), np.array([0.5])],
[np.array([0.984375]), np.array([0.5])],
[np.array([0.9921875]), np.array([0.5])]]),
(pnl.AtPass(0), pnl.AtTrial(2),
[[np.array([0.5]), np.array([0.5])],
[np.array([0.5]), np.array([0.75])],
[np.array([0.5]), np.array([0.5])], # I2 resets at Trial 2
[np.array([0.5]), np.array([0.75])],
[np.array([0.5]), np.array([0.875])],
[np.array([0.5]), np.array([0.9375])],
[np.array([0.5]), np.array([0.96875])]]),
], ids=lambda x: str(x) if isinstance(x, pnl.Condition) else "")
# 'LLVM' mode is not supported, because synchronization of compiler and
# python values during execution is not implemented.
@pytest.mark.usefixtures("comp_mode_no_llvm")
def test_reset_stateful_function_when_composition(self, comp_mode, cond0, cond1, expected):
I1 = pnl.RecurrentTransferMechanism(integrator_mode=True,
integration_rate=0.5)
I2 = pnl.RecurrentTransferMechanism(integrator_mode=True,
integration_rate=0.5)
I1.reset_stateful_function_when = cond0
I2.reset_stateful_function_when = cond1
C = pnl.Composition()
C.add_node(I1)
C.add_node(I2)
C.run(inputs={I1: [[1.0]], I2: [[1.0]]}, num_trials=7, execution_mode=comp_mode)
assert np.allclose(expected, C.results)
@pytest.mark.mechanism
@pytest.mark.integrator_mechanism
@pytest.mark.parametrize('cond0, cond1, expected', [
(pnl.AtPass(0), pnl.AtTrial(2),
[[np.array([0.5]), np.array([0.5])],
[np.array([0.5]), np.array([0.75])],
[np.array([0.5]), np.array([0.5])], # I2 resets at Trial 2
[np.array([0.5]), np.array([0.75])],
[np.array([0.5]), np.array([0.875])],
[np.array([0.5]), np.array([0.9375])],
[np.array([0.5]), np.array([0.96875])]]),
], ids=lambda x: str(x) if isinstance(x, pnl.Condition) else "")
@pytest.mark.parametrize('has_initializers2', [True, False],
ids=["initializers1", "NO initializers1"])
@pytest.mark.parametrize('has_initializers1', [True, False],
ids=["initializers2", "NO initializers2"])
# 'LLVM' mode is not supported, because synchronization of compiler and
# python values during execution is not implemented.
@pytest.mark.usefixtures("comp_mode_no_llvm")
def test_reset_stateful_function_when_has_initializers_composition(self, comp_mode, cond0, cond1, expected,
has_initializers1, has_initializers2):
I1 = pnl.RecurrentTransferMechanism(integrator_mode=True,
integration_rate=0.5)
I2 = pnl.RecurrentTransferMechanism(integrator_mode=True,
integration_rate=0.5)
I1.reset_stateful_function_when = cond0
I2.reset_stateful_function_when = cond1
I1.has_initializers = has_initializers1
I2.has_initializers = has_initializers2
C = pnl.Composition()
C.add_node(I1)
C.add_node(I2)
exp = expected.copy()
def_res = [np.array([0.5]), np.array([0.75]), np.array([0.875]),
np.array([0.9375]), np.array([0.96875]),
np.array([0.984375]), np.array([0.9921875])]
if not has_initializers1:
exp = list(zip(def_res, (x[1] for x in exp)))
if not has_initializers2:
exp = list(zip((x[0] for x in exp), def_res))
C.run(inputs={I1: [[1.0]], I2: [[1.0]]}, num_trials=7, execution_mode=comp_mode)
assert np.allclose(exp, C.results)
@pytest.mark.mechanism
@pytest.mark.integrator_mechanism
@pytest.mark.parametrize('until_finished, expected', [
(True, [[[[0.96875]]], [[[0.9990234375]]]]), # The 5th and the 10th iteration
(False, [[[[0.5]]], [[[0.75]]]]), # The first and the second iteration
], ids=['until_finished', 'oneshot'])
# 'LLVM' mode is not supported, because synchronization of compiler and
# python values during execution is not implemented.
@pytest.mark.usefixtures("comp_mode_no_llvm")
def test_max_executions_before_finished(self, comp_mode, until_finished, expected):
I1 = pnl.RecurrentTransferMechanism(integrator_mode=True,
integration_rate=0.5,
termination_threshold=0.0,
max_executions_before_finished=5,
execute_until_finished=until_finished)
C = pnl.Composition()
C.add_node(I1)
results = C.run(inputs={I1: [[1.0]]}, num_trials=1, execution_mode=comp_mode)
if comp_mode is pnl.ExecutionMode.Python:
assert I1.parameters.is_finished_flag.get(C) is until_finished
results2 = C.run(inputs={I1: [[1.0]]}, num_trials=1, execution_mode=comp_mode)
assert np.allclose(expected[0], results)
assert np.allclose(expected[1], results2)
class TestDebugProperties:
def test_defaults(self):
R = RecurrentTransferMechanism(name='R',
size=3)
print("\n\nTEST DEFAULTS")
print("\n\nAuto Values -----------------------------------")
print("R.auto = ", R.auto)
print("R.parameters.auto.get() = ", R.parameters.auto.get())
print("\n\nHetero Values ----------------------------------")
print("R.hetero = ", R.hetero)
print("R.parameters.hetero.get() = ", R.parameters.hetero.get())
print("\n\nMatrix Values ----------------------------------")
print("R.matrix = ", R.matrix.base)
print("R.parameters.matrix.get() = ", R.parameters.matrix.get())
comp = pnl.Composition()
comp.add_node(R)
print("\n\n---------------------------- Run -------------------------- ")
eid = "eid"
inputs = {R: [[1.0, 1.0, 1.0]]}
comp.run(inputs=inputs,
context=eid)
print("\n\nAuto Values -----------------------------------")
print("R.auto = ", R.auto)
print("R.parameters.auto.get(eid) = ", R.parameters.auto.get(eid))
print("\n\nHetero Values ----------------------------------")
print("R.hetero = ", R.hetero)
print("R.parameters.hetero.get(eid) = ", R.parameters.hetero.get(eid))
print("\n\nMatrix Values ----------------------------------")
print("R.matrix = ", R.matrix.base)
print("R.parameters.matrix.get(eid) = ", R.parameters.matrix.get(eid))
def test_auto(self):
auto_val = 10.0
R = RecurrentTransferMechanism(name='R',
size=3,
auto=auto_val)
print("\n\nTEST AUTO [auto = ", auto_val, "]")
print("\n\nAuto Values -----------------------------------")
print("R.auto = ", R.auto)
print("R.parameters.auto.get() = ", R.parameters.auto.get())
print("\n\nHetero Values ----------------------------------")
print("R.hetero = ", R.hetero)
print("R.parameters.hetero.get() = ", R.parameters.hetero.get())
print("\n\nMatrix Values ----------------------------------")
print("R.matrix = ", R.matrix.base)
print("R.parameters.matrix.get() = ", R.parameters.matrix.get())
comp = pnl.Composition()
comp.add_node(R)
print("\n\n---------------------------- Run -------------------------- ")
eid = "eid"
inputs = {R: [[1.0, 1.0, 1.0]]}
comp.run(inputs=inputs,
context=eid)
print("\n\nAuto Values -----------------------------------")
print("R.auto = ", R.auto)
print("R.parameters.auto.get(eid) = ", R.parameters.auto.get(eid))
print("\n\nHetero Values ----------------------------------")
print("R.hetero = ", R.hetero)
print("R.parameters.hetero.get(eid) = ", R.parameters.hetero.get(eid))
print("\n\nMatrix Values ----------------------------------")
print("R.matrix = ", R.matrix.base)
print("R.parameters.matrix.get(eid) = ", R.parameters.matrix.get(eid))
def test_hetero(self):
hetero_val = 10.0
R = RecurrentTransferMechanism(name='R',
size=3,
hetero=hetero_val)
print("\n\nTEST HETERO [hetero = ", hetero_val, "]")
print("\n\nAuto Values -----------------------------------")
print("R.auto = ", R.auto)
print("R.parameters.auto.get() = ", R.parameters.auto.get())
print("\n\nHetero Values ----------------------------------")
print("R.hetero = ", R.hetero)
print("R.parameters.hetero.get() = ", R.parameters.hetero.get())
print("\n\nMatrix Values ----------------------------------")
print("R.matrix = ", R.matrix.base)
print("R.parameters.matrix.get() = ", R.parameters.matrix.get())
comp = pnl.Composition()
comp.add_node(R)
print("\n\n---------------------------- Run -------------------------- ")
eid = "eid"
inputs = {R: [[1.0, 1.0, 1.0]]}
comp.run(inputs=inputs,
context=eid)
print("\n\nAuto Values -----------------------------------")
print("R.auto = ", R.auto)
print("R.parameters.auto.get(eid) = ", R.parameters.auto.get(eid))
print("\n\nHetero Values ----------------------------------")
print("R.hetero = ", R.hetero)
print("R.parameters.hetero.get(eid) = ", R.parameters.hetero.get(eid))
print("\n\nMatrix Values ----------------------------------")
print("R.matrix = ", R.matrix.base)
print("R.parameters.matrix.get(eid) = ", R.parameters.matrix.get(eid))
def test_auto_and_hetero(self):
auto_val = 10.0
hetero_val = 5.0
R = RecurrentTransferMechanism(name='R',
size=3,
auto=auto_val,
hetero=hetero_val)
print("\n\nTEST AUTO AND HETERO\n [auto = ", auto_val, " | hetero = ", hetero_val, "] ")
print("\n\nAuto Values -----------------------------------")
print("R.auto = ", R.auto)
print("R.parameters.auto.get() = ", R.parameters.auto.get())
print("\n\nHetero Values ----------------------------------")
print("R.hetero = ", R.hetero)
print("R.parameters.hetero.get() = ", R.parameters.hetero.get())
print("\n\nMatrix Values ----------------------------------")
print("R.matrix = ", R.matrix.base)
print("R.parameters.matrix.get() = ", R.parameters.matrix.get())
comp = pnl.Composition()
comp.add_node(R)
print("\n\nRun")
eid = "eid"
inputs = {R: [[1.0, 1.0, 1.0]]}
comp.run(inputs=inputs,
context=eid)
print("\n\nAuto Values -----------------------------------")
print("R.auto = ", R.auto)
print("R.parameters.auto.get(eid) = ", R.parameters.auto.get(eid))
print("\n\nHetero Values ----------------------------------")
print("R.hetero = ", R.hetero)
print("R.parameters.hetero.get(eid) = ", R.parameters.hetero.get(eid))
print("\n\nMatrix Values ----------------------------------")
print("R.matrix = ", R.matrix.base)
print("R.parameters.matrix.get(eid) = ", R.parameters.matrix.get(eid))
def test_matrix(self):
matrix_val = [[ 5.0, 10.0, 10.0],
[10.0, 5.0, 10.0],
[10.0, 10.0, 5.0]]
R = RecurrentTransferMechanism(name='R',
size=3,
matrix=matrix_val)
print("\n\nTEST MATRIX\n", matrix_val)
print("\n\nAuto Values -----------------------------------")
print("R.auto = ", R.auto)
print("R.parameters.auto.get() = ", R.parameters.auto.get())
print("\n\nHetero Values ----------------------------------")
print("R.hetero = ", R.hetero)
print("R.parameters.hetero.get() = ", R.parameters.hetero.get())
print("\n\nMatrix Values ----------------------------------")
print("R.matrix = ", R.matrix.base)
print("R.parameters.matrix.get() = ", R.parameters.matrix.get())
comp = pnl.Composition()
comp.add_node(R)
print("\n\nRun")
eid = "eid"
inputs = {R: [[1.0, 1.0, 1.0]]}
comp.run(inputs=inputs,
context=eid)
print("\n\nAuto Values -----------------------------------")
print("R.auto = ", R.auto)
print("R.parameters.auto.get(eid) = ", R.parameters.auto.get(eid))
print("\n\nHetero Values ----------------------------------")
print("R.hetero = ", R.hetero)
print("R.parameters.hetero.get(eid) = ", R.parameters.hetero.get(eid))
print("\n\nMatrix Values ----------------------------------")
print("R.matrix = ", R.matrix.base)
print("R.parameters.matrix.get(eid) = ", R.parameters.matrix.get(eid))
```
#### File: tests/scheduling/conftest.py
```python
import psyneulink as pnl
import pytest
def pytest_assertrepr_compare(op, left, right):
if isinstance(left, list) and isinstance(right, list) and op == '==':
return [
'Time Step output matching:',
'Actual output:', str(left),
'Expected output:', str(right)
]
@pytest.helpers.register
def setify_expected_output(expected_output):
type_set = type(set())
for i in range(len(expected_output)):
if type(expected_output[i]) is not type_set:
try:
iter(expected_output[i])
expected_output[i] = set(expected_output[i])
except TypeError:
expected_output[i] = set([expected_output[i]])
return expected_output
@pytest.fixture
def three_node_linear_composition():
A = pnl.TransferMechanism(name='A')
B = pnl.TransferMechanism(name='B')
C = pnl.TransferMechanism(name='C')
comp = pnl.Composition()
comp.add_linear_processing_pathway([A, B, C])
return comp.nodes, comp
```
|
{
"source": "jeshuren/CIAMS",
"score": 2
}
|
#### File: src/automs/cluster_indices_generation.py
```python
import logging
import numpy as np
import tqdm
from .utils import read_bag_file, extract_bag_index
from .internal_indices import InternalIndices, INTERNAL_INDICES_METHOD_NAMES_DICT
from .external_indices import ExternalIndices, EXTERNAL_INDICES_METHOD_NAMES_DICT
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
KMEANS_CHOOSEN_CLUSTER_INDICES = {
'internal_indices' : [
# 'WGSS',
# 'BGSS',
# 'Ball-Hall',
# 'Banfeld-Raftery',
# 'Calinski-Harabasz',
# 'Det-Ratio',
# 'Ksq-DetW',
# 'Log-Det-Ratio',
# 'Log-SS-Ratio',
## 'Scott-Symons',
'Silhouette',
# 'Trace-WiB',
'C',
'Dunn',
# 'Davies-Bouldin',
# 'Ray-Turi',
# 'PBM',
'Score',
],
'external_indices' : [
'Entropy',
'Purity',
'Precision',
'Recall',
'F',
'Weighted-F',
'Folkes-Mallows',
'Rand',
'Adjusted-Rand',
'Adjusted-Mutual-Info',
'Normalized-Mutual-Info',
'Homogeneity',
'Completeness',
'V-Measure',
'Jaccard',
'<NAME>̂',
'Kulczynski',
# 'McNemar',
'Phi',
'Russel-Rao',
'Rogers-Tanimoto',
'Sokal-Sneath1',
'Sokal-Sneath2',
],
}
HIERARCHICAL_CHOOSEN_CLUSTER_INDICES = {
'internal_indices' : [
# 'WGSS',
# 'BGSS',
# 'Ball-Hall',
# 'Banfeld-Raftery',
# 'Calinski-Harabasz',
# 'Det-Ratio',
# 'Ksq-DetW',
# 'Log-Det-Ratio',
# 'Log-SS-Ratio',
## 'Scott-Symons',
'Silhouette',
# 'Trace-WiB',
'C',
'Dunn',
# 'Davies-Bouldin',
# 'Ray-Turi',
# 'PBM',
'Score',
],
'external_indices' : [
'Entropy',
'Purity',
'Precision',
'Recall',
'F',
'Weighted-F',
'Folkes-Mallows',
'Rand',
'Adjusted-Rand',
'Adjusted-Mutual-Info',
'Normalized-Mutual-Info',
'Homogeneity',
'Completeness',
'V-Measure',
'Jaccard',
'<NAME>̂',
'Kulczynski',
# 'McNemar',
'Phi',
'Russel-Rao',
'Rogers-Tanimoto',
'Sokal-Sneath1',
'Sokal-Sneath2',
],
}
SPECTRAL_CHOOSEN_CLUSTER_INDICES = {
'internal_indices' : [
# 'WGSS',
# 'BGSS',
# 'Ball-Hall',
# 'Banfeld-Raftery',
# 'Calinski-Harabasz',
# 'Det-Ratio',
# 'Ksq-DetW',
# 'Log-Det-Ratio',
# 'Log-SS-Ratio',
## 'Scott-Symons',
'Silhouette',
# 'Trace-WiB',
'C',
'Dunn',
'Davies-Bouldin',
'Ray-Turi',
# 'PBM',
'Score',
],
'external_indices' : [
'Entropy',
'Purity',
'Precision',
'Recall',
'F',
'Weighted-F',
'Folkes-Mallows',
'Rand',
'Adjusted-Rand',
'Adjusted-Mutual-Info',
'Normalized-Mutual-Info',
'Homogeneity',
'Completeness',
'V-Measure',
'Jaccard',
'Hubert Γ̂',
'Kulczynski',
# 'McNemar',
'Phi',
'Russel-Rao',
'Rogers-Tanimoto',
'Sokal-Sneath1',
'Sokal-Sneath2',
],
}
HDBSCAN_CHOOSEN_CLUSTER_INDICES = {
'internal_indices' : [
# 'WGSS',
# 'BGSS',
# 'Ball-Hall',
# 'Banfeld-Raftery',
# 'Calinski-Harabasz',
# 'Det-Ratio',
# 'Ksq-DetW',
# 'Log-Det-Ratio',
'Log-SS-Ratio',
## 'Scott-Symons',
'Silhouette',
# 'Trace-WiB',
# 'C',
'Dunn',
# 'Davies-Bouldin',
# 'Ray-Turi',
# 'PBM',
'Score',
],
'external_indices' : [
'Entropy',
'Purity',
'Precision',
'Recall',
'F',
'Weighted-F',
'Folkes-Mallows',
'Rand',
'Adjusted-Rand',
'Adjusted-Mutual-Info',
'Normalized-Mutual-Info',
'Homogeneity',
'Completeness',
'V-Measure',
'Jaccard',
'Hubert Γ̂',
'Kulczynski',
# 'McNemar',
'Phi',
'Russel-Rao',
'Rogers-Tanimoto',
'Sokal-Sneath1',
'Sokal-Sneath2',
],
}
FEATURE_VECTOR_CLUSTER_INDICES_ORDER_TRIPLES = [
# ('kmeans', 'internal_indices', 'WGSS'),
# ('kmeans', 'internal_indices', 'BGSS'),
# ('kmeans', 'internal_indices', 'Ball-Hall'),
# ('kmeans', 'internal_indices', 'Banfeld-Raftery'),
# ('kmeans', 'internal_indices', 'Calinski-Harabasz'),
# ('kmeans', 'internal_indices', 'Det-Ratio'),
# ('kmeans', 'internal_indices', 'Ksq-DetW'),
# ('kmeans', 'internal_indices', 'Log-Det-Ratio'),
# ('kmeans', 'internal_indices', 'Log-SS-Ratio'),
## ('kmeans', 'internal_indices', 'Scott-Symons'),
('kmeans', 'internal_indices', 'Silhouette'),
# ('kmeans', 'internal_indices', 'Trace-WiB'),
('kmeans', 'internal_indices', 'C'),
('kmeans', 'internal_indices', 'Dunn'),
# ('kmeans', 'internal_indices', 'Davies-Bouldin'),
# ('kmeans', 'internal_indices', 'Ray-Turi'),
# ('kmeans', 'internal_indices', 'PBM'),
('kmeans', 'internal_indices', 'Score'),
('kmeans', 'external_indices', 'Entropy'),
('kmeans', 'external_indices', 'Purity'),
('kmeans', 'external_indices', 'Precision'),
('kmeans', 'external_indices', 'Recall'),
('kmeans', 'external_indices', 'F'),
('kmeans', 'external_indices', 'Weighted-F'),
('kmeans', 'external_indices', 'Folkes-Mallows'),
('kmeans', 'external_indices', 'Rand'),
('kmeans', 'external_indices', 'Adjusted-Rand'),
('kmeans', 'external_indices', 'Adjusted-Mutual-Info'),
('kmeans', 'external_indices', 'Normalized-Mutual-Info'),
('kmeans', 'external_indices', 'Homogeneity'),
('kmeans', 'external_indices', 'Completeness'),
('kmeans', 'external_indices', 'V-Measure'),
('kmeans', 'external_indices', 'Jaccard'),
('kmeans', 'external_indices', 'Hubert Γ̂'),
('kmeans', 'external_indices', 'Kulczynski'),
# ('kmeans', 'external_indices', 'McNemar'),
('kmeans', 'external_indices', 'Phi'),
('kmeans', 'external_indices', 'Russel-Rao'),
('kmeans', 'external_indices', 'Rogers-Tanimoto'),
('kmeans', 'external_indices', 'Sokal-Sneath1'),
('kmeans', 'external_indices', 'Sokal-Sneath2'),
# ('hierarchical', 'internal_indices', 'WGSS'),
# ('hierarchical', 'internal_indices', 'BGSS'),
# ('hierarchical', 'internal_indices', 'Ball-Hall'),
# ('hierarchical', 'internal_indices', 'Banfeld-Raftery'),
# ('hierarchical', 'internal_indices', 'Calinski-Harabasz'),
# ('hierarchical', 'internal_indices', 'Det-Ratio'),
# ('hierarchical', 'internal_indices', 'Ksq-DetW'),
# ('hierarchical', 'internal_indices', 'Log-Det-Ratio'),
# ('hierarchical', 'internal_indices', 'Log-SS-Ratio'),
## ('hierarchical', 'internal_indices', 'Scott-Symons'),
('hierarchical', 'internal_indices', 'Silhouette'),
# ('hierarchical', 'internal_indices', 'Trace-WiB'),
('hierarchical', 'internal_indices', 'C'),
('hierarchical', 'internal_indices', 'Dunn'),
# ('hierarchical', 'internal_indices', 'Davies-Bouldin'),
# ('hierarchical', 'internal_indices', 'Ray-Turi'),
# ('hierarchical', 'internal_indices', 'PBM'),
('hierarchical', 'internal_indices', 'Score'),
('hierarchical', 'external_indices', 'Entropy'),
('hierarchical', 'external_indices', 'Purity'),
('hierarchical', 'external_indices', 'Precision'),
('hierarchical', 'external_indices', 'Recall'),
('hierarchical', 'external_indices', 'F'),
('hierarchical', 'external_indices', 'Weighted-F'),
('hierarchical', 'external_indices', 'Folkes-Mallows'),
('hierarchical', 'external_indices', 'Rand'),
('hierarchical', 'external_indices', 'Adjusted-Rand'),
('hierarchical', 'external_indices', 'Adjusted-Mutual-Info'),
('hierarchical', 'external_indices', 'Normalized-Mutual-Info'),
('hierarchical', 'external_indices', 'Homogeneity'),
('hierarchical', 'external_indices', 'Completeness'),
('hierarchical', 'external_indices', 'V-Measure'),
('hierarchical', 'external_indices', 'Jaccard'),
('hierarchical', 'external_indices', 'Hubert Γ̂'),
('hierarchical', 'external_indices', 'Kulczynski'),
# ('hierarchical', 'external_indices', 'McNemar'),
('hierarchical', 'external_indices', 'Phi'),
('hierarchical', 'external_indices', 'Russel-Rao'),
('hierarchical', 'external_indices', 'Rogers-Tanimoto'),
('hierarchical', 'external_indices', 'Sokal-Sneath1'),
('hierarchical', 'external_indices', 'Sokal-Sneath2'),
# ('spectral', 'internal_indices', 'WGSS'),
# ('spectral', 'internal_indices', 'BGSS'),
# ('spectral', 'internal_indices', 'Ball-Hall'),
# ('spectral', 'internal_indices', 'Banfeld-Raftery'),
# ('spectral', 'internal_indices', 'Calinski-Harabasz'),
# ('spectral', 'internal_indices', 'Det-Ratio'),
# ('spectral', 'internal_indices', 'Ksq-DetW'),
# ('spectral', 'internal_indices', 'Log-Det-Ratio'),
# ('spectral', 'internal_indices', 'Log-SS-Ratio'),
## ('spectral', 'internal_indices', 'Scott-Symons'),
('spectral', 'internal_indices', 'Silhouette'),
# ('spectral', 'internal_indices', 'Trace-WiB'),
('spectral', 'internal_indices', 'C'),
('spectral', 'internal_indices', 'Dunn'),
('spectral', 'internal_indices', 'Davies-Bouldin'),
('spectral', 'internal_indices', 'Ray-Turi'),
# ('spectral', 'internal_indices', 'PBM'),
('spectral', 'internal_indices', 'Score'),
('spectral', 'external_indices', 'Entropy'),
('spectral', 'external_indices', 'Purity'),
('spectral', 'external_indices', 'Precision'),
('spectral', 'external_indices', 'Recall'),
('spectral', 'external_indices', 'F'),
('spectral', 'external_indices', 'Weighted-F'),
('spectral', 'external_indices', 'Folkes-Mallows'),
('spectral', 'external_indices', 'Rand'),
('spectral', 'external_indices', 'Adjusted-Rand'),
('spectral', 'external_indices', 'Adjusted-Mutual-Info'),
('spectral', 'external_indices', 'Normalized-Mutual-Info'),
('spectral', 'external_indices', 'Homogeneity'),
('spectral', 'external_indices', 'Completeness'),
('spectral', 'external_indices', 'V-Measure'),
('spectral', 'external_indices', 'Jaccard'),
('spectral', 'external_indices', 'Hubert Γ̂'),
('spectral', 'external_indices', 'Kulczynski'),
# ('spectral', 'external_indices', 'McNemar'),
('spectral', 'external_indices', 'Phi'),
('spectral', 'external_indices', 'Russel-Rao'),
('spectral', 'external_indices', 'Rogers-Tanimoto'),
('spectral', 'external_indices', 'Sokal-Sneath1'),
('spectral', 'external_indices', 'Sokal-Sneath2'),
# ('hdbscan', 'internal_indices', 'WGSS'),
# ('hdbscan', 'internal_indices', 'BGSS'),
# ('hdbscan', 'internal_indices', 'Ball-Hall'),
# ('hdbscan', 'internal_indices', 'Banfeld-Raftery'),
# ('hdbscan', 'internal_indices', 'Calinski-Harabasz'),
# ('hdbscan', 'internal_indices', 'Det-Ratio'),
# ('hdbscan', 'internal_indices', 'Ksq-DetW'),
# ('hdbscan', 'internal_indices', 'Log-Det-Ratio'),
('hdbscan', 'internal_indices', 'Log-SS-Ratio'),
## ('hdbscan', 'internal_indices', 'Scott-Symons'),
('hdbscan', 'internal_indices', 'Silhouette'),
# ('hdbscan', 'internal_indices', 'Trace-WiB'),
# ('hdbscan', 'internal_indices', 'C'),
('hdbscan', 'internal_indices', 'Dunn'),
# ('hdbscan', 'internal_indices', 'Davies-Bouldin'),
# ('hdbscan', 'internal_indices', 'Ray-Turi'),
# ('hdbscan', 'internal_indices', 'PBM'),
('hdbscan', 'internal_indices', 'Score'),
('hdbscan', 'external_indices', 'Entropy'),
('hdbscan', 'external_indices', 'Purity'),
('hdbscan', 'external_indices', 'Precision'),
('hdbscan', 'external_indices', 'Recall'),
('hdbscan', 'external_indices', 'F'),
('hdbscan', 'external_indices', 'Weighted-F'),
('hdbscan', 'external_indices', 'Folkes-Mallows'),
('hdbscan', 'external_indices', 'Rand'),
('hdbscan', 'external_indices', 'Adjusted-Rand'),
('hdbscan', 'external_indices', 'Adjusted-Mutual-Info'),
('hdbscan', 'external_indices', 'Normalized-Mutual-Info'),
('hdbscan', 'external_indices', 'Homogeneity'),
('hdbscan', 'external_indices', 'Completeness'),
('hdbscan', 'external_indices', 'V-Measure'),
('hdbscan', 'external_indices', 'Jaccard'),
('hdbscan', 'external_indices', 'Hubert Γ̂'),
('hdbscan', 'external_indices', 'Kulczynski'),
# ('hdbscan', 'external_indices', 'McNemar'),
('hdbscan', 'external_indices', 'Phi'),
('hdbscan', 'external_indices', 'Russel-Rao'),
('hdbscan', 'external_indices', 'Rogers-Tanimoto'),
('hdbscan', 'external_indices', 'Sokal-Sneath1'),
('hdbscan', 'external_indices', 'Sokal-Sneath2'),
]
def generate_kmeans_cluster_indices(dataset, choosen_indices, n_jobs=None):
cluster_labels = dataset.perform_kmeans_clustering(n_clusters='n_classes', n_jobs=n_jobs)
internal_indices_values = dict()
internal_validation = InternalIndices(dataset.data, cluster_labels)
choosen_internal_indices = choosen_indices['internal_indices']
for internal_index in choosen_internal_indices:
internal_index_method = getattr(internal_validation, INTERNAL_INDICES_METHOD_NAMES_DICT[internal_index])
internal_indices_values[internal_index] = internal_index_method()
external_indices_values = dict()
external_validation = ExternalIndices(dataset.target, cluster_labels)
choosen_external_indices = choosen_indices['external_indices']
for external_index in choosen_external_indices:
external_index_method = getattr(external_validation, EXTERNAL_INDICES_METHOD_NAMES_DICT[external_index])
external_indices_values[external_index] = external_index_method()
indices_values = {
'internal_indices' : internal_indices_values,
'external_indices' : external_indices_values,
}
return indices_values
def generate_hierarchical_cluster_indices(dataset, choosen_indices, n_jobs=None):
cluster_labels = dataset.perform_hierarchical_clustering(n_clusters='n_classes')
internal_indices_values = dict()
internal_validation = InternalIndices(dataset.data, cluster_labels)
choosen_internal_indices = choosen_indices['internal_indices']
for internal_index in choosen_internal_indices:
internal_index_method = getattr(internal_validation, INTERNAL_INDICES_METHOD_NAMES_DICT[internal_index])
internal_indices_values[internal_index] = internal_index_method()
external_indices_values = dict()
external_validation = ExternalIndices(dataset.target, cluster_labels)
choosen_external_indices = choosen_indices['external_indices']
for external_index in choosen_external_indices:
external_index_method = getattr(external_validation, EXTERNAL_INDICES_METHOD_NAMES_DICT[external_index])
external_indices_values[external_index] = external_index_method()
indices_values = {
'internal_indices' : internal_indices_values,
'external_indices' : external_indices_values,
}
return indices_values
def generate_spectral_cluster_indices(dataset, choosen_indices, n_jobs=None):
cluster_labels = dataset.perform_spectral_clustering(n_clusters='n_classes', n_jobs=n_jobs)
internal_indices_values = dict()
internal_validation = InternalIndices(dataset.data, cluster_labels)
choosen_internal_indices = choosen_indices['internal_indices']
for internal_index in choosen_internal_indices:
internal_index_method = getattr(internal_validation, INTERNAL_INDICES_METHOD_NAMES_DICT[internal_index])
internal_indices_values[internal_index] = internal_index_method()
external_indices_values = dict()
external_validation = ExternalIndices(dataset.target, cluster_labels)
choosen_external_indices = choosen_indices['external_indices']
for external_index in choosen_external_indices:
external_index_method = getattr(external_validation, EXTERNAL_INDICES_METHOD_NAMES_DICT[external_index])
external_indices_values[external_index] = external_index_method()
indices_values = {
'internal_indices' : internal_indices_values,
'external_indices' : external_indices_values,
}
return indices_values
def generate_hdbscan_cluster_indices(dataset, choosen_indices, n_jobs=None):
cluster_labels = dataset.perform_hdbscan_clustering(core_dist_n_jobs=(n_jobs if n_jobs is not None else 4))
internal_indices_values = dict()
internal_validation = InternalIndices(dataset.data, cluster_labels)
choosen_internal_indices = choosen_indices['internal_indices']
for internal_index in choosen_internal_indices:
internal_index_method = getattr(internal_validation, INTERNAL_INDICES_METHOD_NAMES_DICT[internal_index])
internal_indices_values[internal_index] = internal_index_method()
external_indices_values = dict()
external_validation = ExternalIndices(dataset.target, cluster_labels)
choosen_external_indices = choosen_indices['external_indices']
for external_index in choosen_external_indices:
external_index_method = getattr(external_validation, EXTERNAL_INDICES_METHOD_NAMES_DICT[external_index])
external_indices_values[external_index] = external_index_method()
indices_values = {
'internal_indices' : internal_indices_values,
'external_indices' : external_indices_values,
}
return indices_values
def bag_generate_cluster_indices(bag_filename, n_jobs=1):
""" Perform clustering on the bags and generate cluster indices evaluating the quality of clusters """
bag_index = extract_bag_index(bag_filename)
dataset = read_bag_file(bag_filename)
logger.info("Bag %2d : performing kmeans clustering, generating cluster indices", bag_index)
kmeans_cluster_indices = generate_kmeans_cluster_indices(dataset, KMEANS_CHOOSEN_CLUSTER_INDICES, n_jobs=n_jobs)
logger.info("Bag %2d : performing hierarchical clustering, generating cluster indices", bag_index)
hierarchical_cluster_indices = generate_hierarchical_cluster_indices(dataset, HIERARCHICAL_CHOOSEN_CLUSTER_INDICES, n_jobs=n_jobs)
logger.info("Bag %2d : performing spectral clustering, generating cluster indices", bag_index)
spectral_cluster_indices = generate_spectral_cluster_indices(dataset, SPECTRAL_CHOOSEN_CLUSTER_INDICES, n_jobs=n_jobs)
logger.info("Bag %2d : performing hdbscan clustering, generating cluster indices", bag_index)
hdbscan_cluster_indices = generate_hdbscan_cluster_indices(dataset, HDBSCAN_CHOOSEN_CLUSTER_INDICES, n_jobs=n_jobs)
cluster_indices = {
'kmeans' : kmeans_cluster_indices,
'hierarchical' : hierarchical_cluster_indices,
'spectral' : spectral_cluster_indices,
'hdbscan' : hdbscan_cluster_indices,
}
return cluster_indices
def convert_cluster_indices_to_features(cluster_indices):
""" Convert the cluster indices into a flat feature vector """
feature_vector = list(map(
lambda keys_triple: cluster_indices[keys_triple[0]][keys_triple[1]][keys_triple[2]],
FEATURE_VECTOR_CLUSTER_INDICES_ORDER_TRIPLES
))
feature_vector = np.array(feature_vector)
return feature_vector
```
#### File: src/automs/config.py
```python
class CsvConfig:
""" Dataset configuration class for CSV data format """
dataset_filetype = 'csv'
def __init__(self, sep=',', skiprows=None, header_row=None, usecols=None, target_col=-1, categorical_cols='infer', na_values=None, **kargs):
"""
Parameters
----------
sep : str, optional
Column delimiter. **Accepted values:** ``None`` implies autodetect delimiter, ``'\s+'`` uses combination of spaces and tabs, regular expressions. (default is ``','`` ).
skiprows : list of int or int, optional
List of line indices to skip or the number of starting lines to skip. (default value ``None`` implies don't skip any lines)
header_row : int, optional
Relative Zero-Index (index of rows after skipping rows using ``skiprows`` parameter) of the row containing column names. Note: All preceding rows are ignored. (default value ``None`` implies no header row)
usecols : list, optional
List of column names (or column indices, if no header row specified) to consider. (default value ``None`` indicates use of all columns)
target_col : int, optional
Relative Zero-Index of column (after filtering columns using ``usecols`` parameter) to use as target values. ``None`` indicates absence of target value columns. (default value ``-1`` implies use the last column as target values)
categorical_cols : 'infer' or list or str or int or 'all', optional
List (str or int if singleton) of column names (or absolute indices of columns, if no header row specified) of categorical columns to encode. Default value ``'infer'`` autodetects nominal categorical columns. ``'all'`` implies all columns are nominal categorical. ``None`` implies no nominal categorical columns exist.
na_values : scalar or str or list-like or dict, optional
Additional strings to recognize as NA/NaN. If dict is passed, it specifies per-column NA values. By default the following values are interpreted as NaN: ‘’, ‘#N/A’, ‘#N/A N/A’, ‘#NA’, ‘-1.#IND’, ‘-1.#QNAN’, ‘-NaN’, ‘-nan’, ‘1.#IND’, ‘1.#QNAN’, ‘N/A’, ‘NA’, ‘NULL’, ‘NaN’, ‘n/a’, ‘nan’, ‘null’. (default value ``None`` implies no additional values to intrepret as NaN)
**kargs
Other keyword arguments accepted by :func:`pandas.read_csv` such as ``comment`` and ``lineterminator``.
Notes
-----
* ``skiprows`` parameter uses absolute row indices whereas ``header_row`` parameter uses relative index (i.e., zero-index after removing rows specied by ``skiprows`` parameter).
* ``usecols`` and ``categorical_cols`` parameters use absolute column names (or indices, if no header row) whereas ``target_cols`` parameter uses relative column indices (or names) after filtering out columns specified by ``usecols`` parameter.
* ``categorical_cols='infer'`` identifies and encodes nominal features (i.e., features of 'string' type, with fewer unique entries than a value heuristically determined from the number of data samples) and drops other 'string' and 'date' type features from the dataset. Use :func:`automs.eda.max_classes_nominal` to find the heuristically determined value of maximum number of distinct entries in nominal features for a given number of samples.
* Data samples with any NA/NaN features are implicitly dropped.
"""
self.sep = sep
self.skiprows = skiprows
self.header_row = header_row
self.usecols = usecols
self.target_col = target_col
self.encode_target = True
self.categorical_cols = categorical_cols
self.na_values = na_values
self.nrows = None
# self.kargs = kargs
for key, value in kargs.items():
setattr(self, key, value)
class LibsvmConfig:
""" Dataset configuration class for LIBSVM data format """
dataset_filetype = 'libsvm'
def __init__(self):
pass
class ArffConfig:
""" Dataset configuration class for ARFF data format """
dataset_filetype = 'arff'
def __init__(self, target_attr='class', numeric_categorical_attrs=None):
"""
Parameters
----------
target_attr : str, optional
Attribute name of the target column. ``None`` implies no target columns. (default value is ``'class'``)
numeric_categorical_attrs : list of str, optional
List of names of numeric attributes to be inferred as nominal and to be encoded. Note: All nominal attributes are implicitly encoded. (default value ``None`` implies no numeric attributes are to be infered as nominal)
Notes
-----
All nominal type attributes are implicitly encoded.
"""
self.target_attr = target_attr
self.encode_target = True
self.numeric_categorical_attrs = numeric_categorical_attrs
DATASET_FILETYPE_CONFIG = {
'csv': CsvConfig,
'libsvm': LibsvmConfig,
'arff': ArffConfig
}
```
#### File: src/automs/eda.py
```python
from collections import Counter
from functools import reduce
import logging
from math import ceil
import os
import pickle
from random import shuffle
import sys
from time import time
# import warnings
# third party libraries
import hdbscan
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import pandas as pd
from scipy.io.arff import loadarff
from scipy.sparse.csgraph import laplacian
import seaborn as sns
from sklearn.cluster import DBSCAN, KMeans, SpectralClustering, AgglomerativeClustering
from sklearn.datasets import load_svmlight_file
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import LabelEncoder, StandardScaler
# local application code
# warnings.simplefilter(action='ignore', category=FutureWarning)
# warnings.filterwarnings("ignore", category=DeprecationWarning)
# setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class EDA:
"""A data container class with methods for data pre-processing and cluster analysis related tasks"""
def load_data(self, data, target=None, flatten_features=True):
"""Load obj:`np.ndarray` or :obj:`list` objects as data and target values
Parameters:
data (:obj:`np.ndarray`): array of data samples (samples x features)
target (:obj:`np.ndarray`, optional): class labels or target vales
flatten_features (bool): flatten complex **multi-dimensional** features, if ``True``
Note:
* Complex 'multi-dimensional' features of data samples are implicitly flattened by default.
* Column indices (or names) of the features are zero-indexed.
Examples:
Illustration of implicit flattening of multi-dimensional features::
>>> from automs import eda
>>> #create dummy data with multi-dimensional features
>>> data = [
... [
... [[1],[2]], [[3,4],[5,6]]
... ],
... [
... [[7],[8]], [[9,10],[11,12]]
... ]
... ]
>>> main = eda.EDA(force_file=False)
>>> main.load_data(data)
>>> print(main.data)
>>> print("no. of samples = ", main.n_samples)
>>> print("no. of features = ", main.n_features)
"""
try:
data = np.array(data)
if flatten_features:
#Flatten 'simple' numerical multi-dimensional features
if issubclass(data.dtype.type, np.integer) or issubclass(data.dtype.type, np.floating):
if len(data.shape)==1:
data = data.reshape(data.shape[0], 1)
if len(data.shape)>2:
data = data.reshape(data.shape[0], np.product(data.shape[1:]))
#Flatten 'complex' non-numerical multi-dimensional features
elif issubclass(data.dtype.type, np.object_):
flattened_data = []
for sample in data:
flattened_data.append(flatten_list(sample))
data = np.array(flattened_data, dtype=np.number)
if not(issubclass(data.dtype.type, np.integer) or issubclass(data.dtype.type, np.floating)):
# raise UserWarning("error: Data contains 'non-numerical features' or 'varying number of features across samples'")
logger.error("Data contains 'non-numerical features' or 'varying number of features across samples'")
raise ValueError("Data contains 'non-numerical features' or 'varying number of features across samples'")
except Exception as err:
# print('{0}\nerror: failed to load data or flatten multi-dimensional features'.format(err))
logger.error("Failed to load data or flatten multi-dimensional features: %s", err)
raise ValueError("failed to load data or flatten multi-dimensional features")
self.data = data
self.n_samples, self.n_features = self.data.shape
self.columns_ = np.arange(self.n_features)
if target is not None:
try:
if self.n_samples == len(target):
self.target = np.array(target)
else:
# raise UserWarning("number of 'target' values doesn't match number of samples in data")
logger.error("Number of 'target' values doesn't match number of samples in data")
raise ValueError("number of 'target' values doesn't match number of samples in data")
if len(self.target.shape)>1:
# raise UserWarning("'target' values form a multi-dimensional array (but one-dimensional array expected).")
logger.error("'target' values form a mutli-dimensional array (but one-dimensional array expected).")
raise ValueError("'target' values form a mutli-dimensional array (but one-dimensional array expected).")
except Exception as err:
# print('{0}\nerror: invalid target array supplied'.format(err))
logger.error("Invalid target array supplied : %s", err)
raise ValueError("invalid target array supplied")
self.classes_ = None
classes_ = np.unique(self.target)
if classes_.shape[0] <= max_classes_nominal(self.n_samples):
self.classes_ = classes_
"""Reading datasets from standard file formats (Supported File Formats : csv, libsvm, arff)
See also:
`Loading from External datasets <http://scikit-learn.org/stable/datasets/#loading-from-external-datasets>`_
"""
def read_data_csv(self, file, sep=',', skiprows=None, header_row=None, usecols=None, target_col=-1, encode_target=True, categorical_cols='infer', na_values=None, nrows=None, **kargs):
"""Read data from CSV format file
Parameters:
file (str or open file): path to the CSV data file or URL (http, ftp, S3 location) or ``open file`` object.
sep (str, default=','): Column delimiter. Accepted values: ``None`` implies autodetect delimiter, '\s+' uses combination of spaces and tabs, Regular expressions
skiprows (:obj:`list` or int, default= ``None``): 'List' (list) of line indices to skip or 'Number' (int) of starting lines to skip.
header_row (int, default=``None``): Relative Zero-Index (index of rows after skipping rows using ``skiprows`` parameter) of the row containing column names. Note: All preceding rows are ignored.
usecols (:obj:`list`, default= ``None``): List of column 'names' (or 'indices', if no column names) to consider. ``None`` indicates use of all columns.
target_col (int, default=``-1``): Relative Zero-Index of column (after filtering columns using ``usecols`` parameter) to use as target values. ``None`` indicates absence of target value columns.
encode_target (bool, default=True): Encode target values
categorical_cols (:obj:`list`, str, int, 'all', None, default='infer'): List (str or int if singleton) of column 'names' (or absolute 'indices', if no column names) of categorical columns to encode. ``categorical_cols='infer'`` autodetects nominal categorical columns. ``categorical_cols='all'`` implies all columns are nominal categorical. ``categorical_cols=None`` implies no nominal categorical columns.
na_values (scalar, str, list-like, or dict, default=``None``): Additional strings to recognize as NA/NaN. If dict passed, specific per-column NA values. By default the following values are interpreted as NaN: ‘’, ‘#N/A’, ‘#N/A N/A’, ‘#NA’, ‘-1.#IND’, ‘-1.#QNAN’, ‘-NaN’, ‘-nan’, ‘1.#IND’, ‘1.#QNAN’, ‘N/A’, ‘NA’, ‘NULL’, ‘NaN’, ‘n/a’, ‘nan’, ‘null’.
nrows (int, default=``None``): Number of rows of data to read. `None`` implies all available rows.
**kargs: Other keyword arguments accepted by :func:`pandas.read_csv` (Keyword Arguments: comment, lineterminator, ...)
Note:
* ``skiprows`` parameter uses absolute row indices whereas ``header_row`` parameter uses relative index (i.e., zero-index after removing rows specied by ``skiprows`` parameter).
* ``usecols`` and ``categorical_cols`` parameters use absolute column 'names' (or 'indices' if no 'names') whereas ``target_cols`` parameter uses relative column 'indices' (or 'names') after filtering out columns specified by ``usecols`` parameter.
* ``categorical_cols='infer'`` identifies and encodes nominal features (i.e., features of 'string' type, with fewer unique entries than a value heuristically determined from number of data samples) and drops other 'string' and 'date' type features.
use func:`automs.eda.max_classes_nominal` to find the heuristically determined value of maximum number of distinct entries in nominal features for given number of samples
* Data samples with any NA/NaN features are implicitly dropped.
Examples:
Illustration of **Reading from CSV data file** ::
>>> from automs import eda
>>> main = eda.EDA()
>>>
>>> from io import StringIO
>>>
>>> data = '''Dataset: Abalone
... Source: UCI ML Repository
...
... skips rows until this, i.e., skiprows = 4. Header row follows immediately, i.e., header_row = 0.
... Sex, Length, Diameter, Height, Whole weight, Shucked weight, Viscera weight, Shell weight, Rings
... M,0.455,0.365,0.095,0.514,0.2245,0.101,0.15,15
... M,0.35,0.265,0.09,0.2255,0.0995,0.0485,0.07,7
... F,0.53,0.42,0.135,0.677,0.2565,0.1415,0.21,9
... M,0.44,0.365,0.125,0.516,0.2155,0.114,0.155,10
... I,0.33,0.255,0.08,0.205,0.0895,0.0395,0.055,7
... I,0.425,0.3,0.095,0.3515,0.141,0.0775,0.12,8
... F,0.53,0.415,0.15,0.7775,0.237,0.1415,0.33,20
... F,0.545,0.425,0.125,0.768,0.294,0.1495,0.26,16
... M,0.475,0.37,0.125,0.5095,0.2165,0.1125,0.165,9
... F,0.55,0.44,0.15,0.8945,0.3145,0.151,0.32,19
... '''
>>>
>>> # use columns ['Sex', 'Length', 'Diameter', 'Height', 'Rings']. 'Ring' is the target to predict, i.e., target_col=-1 .
... # Auto-detect nominal categorical columns to encode, i.e., categorical_cols='infer' (default)
... main.read_data_csv(StringIO(data), sep=',', skiprows=4, header_row=0, usecols=['Sex', 'Length', 'Diameter', 'Height', 'Rings'], target_col=-1, encode_target=False)
>>>
>>> # Print the processed data samples. Note: 'Sex' column has been encoded.
... print(main.data)
[[ 2. 0.455 0.365 0.095]
[ 2. 0.35 0.265 0.09 ]
[ 0. 0.53 0.42 0.135]
[ 2. 0.44 0.365 0.125]
[ 1. 0.33 0.255 0.08 ]
[ 1. 0.425 0.3 0.095]
[ 0. 0.53 0.415 0.15 ]
[ 0. 0.545 0.425 0.125]
[ 2. 0.475 0.37 0.125]
[ 0. 0.55 0.44 0.15 ]]
>>>
>>> # Print the names of columns in data
... print(main.columns_)
Index(['Sex', 'Length', 'Diameter', 'Height'], dtype='object')
>>>
>>> # Print the target values, i.e, 'Rings' values.
... print(main.target)
[15 7 9 10 7 8 20 16 9 19]
::
>>> from automs import eda
>>> main = eda.EDA()
>>>
>>> from io import StringIO
>>>
>>> # First 10 samples from Dataset : Mushroom (UCI ML Repository). A string type feature was intentionally introduced as Column '0'.
>>> data = '''
... sample1 p x s n t p f c n k e e s s w w p w o p k s u
... sample2 e x s y t a f c b k e c s s w w p w o p n n g
... sample3 e b s w t l f c b n e c s s w w p w o p n n m
... sample4 p x y w t p f c n n e e s s w w p w o p k s u
... sample5 e x s g f n f w b k t e s s w w p w o e n a g
... sample6 e x y y t a f c b n e c s s w w p w o p k n g
... sample7 e b s w t a f c b g e c s s w w p w o p k n m
... sample8 e b y w t l f c b n e c s s w w p w o p n s m
... sample9 p x y w t p f c n p e e s s w w p w o p k v g
... sample10 e b s y t a f c b g e c s s w w p w o p k s m
... '''
>>>
>>> # Column delimiter is spaces or tabs, i.e., sep='\s+'
... # No header rows available, i.e., header_row=None (default).
... # Use all columns, i.e., usecols=None (default).
... # Column '1' contains target values. Encode the target values, i.e., encode_target=True (default).
... main.read_data_csv(StringIO(data), sep='\s+', header_row=None, target_col=1)
info: columns [0] was/were inferred as 'string' or 'date' type feature(s) and dropped
>>>
>>> #Print the processed data samples. Note: Column '0' was inferred as 'string' type feature and dropped.
... print(main.data)
[[ 1. 0. 1. 1. 3. 0. 0. 1. 1. 0. 1. 0. 0. 0. 0. 0. 0. 0. 1. 0. 2. 2.]
[ 1. 0. 3. 1. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 1. 0.]
[ 0. 0. 2. 1. 1. 0. 0. 0. 2. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 1. 1.]
[ 1. 1. 2. 1. 3. 0. 0. 1. 2. 0. 1. 0. 0. 0. 0. 0. 0. 0. 1. 0. 2. 2.]
[ 1. 0. 0. 0. 2. 0. 1. 0. 1. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0.]
[ 1. 1. 3. 1. 0. 0. 0. 0. 2. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 1. 0.]
[ 0. 0. 2. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 1. 1.]
[ 0. 1. 2. 1. 1. 0. 0. 0. 2. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 2. 1.]
[ 1. 1. 2. 1. 3. 0. 0. 1. 3. 0. 1. 0. 0. 0. 0. 0. 0. 0. 1. 0. 3. 0.]
[ 0. 0. 3. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 2. 1.]]
>>>
>>> # Print the names of columns in data
... print(main.columns_)
Int64Index([2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], dtype='int64')
>>>
>>> # Print the target values, i.e, Column '1' values.
... print(main.target)
[1, 0, 0, 1, 0, 0, 0, 0, 1, 0]
>>>
>>> # Print the distinct (original) classes in target values
... print(main.classes_)
['e', 'p']
"""
dataset = pd.read_csv(filepath_or_buffer=file, sep=sep, skiprows=skiprows, header=header_row, usecols=usecols, index_col=target_col, na_values=na_values, skipinitialspace=True, nrows=nrows, **kargs)
dataset.dropna(axis='index', how='any', inplace=True)
# column index (or names) in data
self.columns_ = dataset.columns
columns_dtypes = dataset.dtypes.values
data, target = dataset.values, None if target_col is None else np.array(dataset.index)
del dataset
if target is not None:
# Distinct (original) classes in target values
self.classes_ = None
if encode_target:
target_labelEncoder = LabelEncoder()
target = target_labelEncoder.fit_transform(target)
self.classes_ = target_labelEncoder.classes_.tolist()
del target_labelEncoder
# Column name indexed dictionary of distinct (original) categories in the data columns. Defaults to ``None`` for numeric (non-categorical) valued columns.
self.columns_categories_ = dict.fromkeys(self.columns_)
# using array of absolute (zero-)indices of columns for ``catergorical_cols`` parameter
if isinstance(categorical_cols, str) and categorical_cols.casefold()=="infer":
n_samples, n_features = data.shape
selected_columns = np.array([True]*n_features)
# maximum number of classes in a column to be "infered" as "categorical (nominal)"
max_infer_nominal_classes = max_classes_nominal(n_samples)
self._nominal_columns = []
for column_index in np.where(columns_dtypes==np.object)[0]:
column_labelEncoder = LabelEncoder()
column_labelEncoder.fit(data.T[column_index])
if len(column_labelEncoder.classes_) <= max_infer_nominal_classes:
self._nominal_columns.append(self.columns_[column_index])
self.columns_categories_[self.columns_[column_index]] = column_labelEncoder.classes_.tolist()
data.T[column_index] = column_labelEncoder.transform(data.T[column_index])
else:
selected_columns[column_index] = False
del self.columns_categories_[self.columns_[column_index]]
del column_labelEncoder
if self._nominal_columns:
logger.info("Columns %s was/were inferred as 'nominal' categorical feature(s) and encoded", self._nominal_columns)
if not selected_columns.all():
logger.info("Columns %s was/were inferred as 'string' or 'date' type feature(s) and dropped", self.columns_[np.where(selected_columns==False)].tolist())
self.columns_ = self.columns_[selected_columns]
data = data.T[selected_columns].T
elif isinstance(categorical_cols, str) and categorical_cols.casefold()=='all':
self._nominal_columns = self.columns_.copy()
for column_index in range(self.columns_.shape[0]):
column_labelEncoder = LabelEncoder()
data.T[column_index] = column_labelEncoder.fit_transform(data.T[column_index])
self.columns_categories_[self.columns_[column_index]] = column_labelEncoder.classes_.tolist()
del column_labelEncoder
elif isinstance(categorical_cols, list) or isinstance(categorical_cols, int) or isinstance(categorical_cols, str):
if isinstance(categorical_cols, int) or isinstance(categorical_cols, str):
categorical_cols = [categorical_cols]
self._nominal_columns = categorical_cols.copy()
# TODO: Process each column in a seperate thread
for column_name in categorical_cols:
column_index, = np.where(self.columns_==column_name)
if column_index.shape == (1,):
column_labelEncoder = LabelEncoder()
data.T[column_index[0]] = column_labelEncoder.fit_transform(data.T[column_index[0]])
self.columns_categories_[column_name] = column_labelEncoder.classes_.tolist()
del column_labelEncoder
else:
logger.warning("Column '%s' could not be (uniquely) identified and was skipped", column_name)
self._nominal_columns.remove(column_name)
continue
elif categorical_cols is None:
self._nominal_columns = None
else:
# print("error: Invalid argument for parameter 'categorical_cols'. Accepted arguments: {list of names (or indices) of nominal columns, 'infer', 'all', None}")
logger.error("Invalid argument for parameter 'categorical_cols'. Accepted arguments: {list of names (or indices) of nominal columns, 'infer', 'all', None}")
raise TypeError("invalid argument for parameter 'categorical_cols'")
try:
data = data.astype(np.number)
except ValueError as err:
# print("warning: Data contains 'string' (or 'date') type features and could not be casted to 'numerical' type")
logger.warning("Data contains 'string' (or 'date') type features and could not be casted to 'numerical' type")
self.data, self.target = data, target
self.n_samples, self.n_features = self.data.shape
def read_data_libsvm(self, file, type='classification', dtype=np.float, n_features=None, **kargs):
"""Read data from LIBSVM format file
Parameters:
file (str or open file or int): Path to LIBSVM data file or ``open file`` object or file descriptor
type ({'classification','regression','ranking'}, default='classification'): Type of dataset
dtype (datatypes, default=``np.float``): Datatype of data array
n_features (int, default= ``None``): Number of features to use. ``None`` implies infer from data.
**kargs: Other Keyword arguments accepted by :func:`sklearn.datasets.load_svmlight_file` (Keyword arguments : offset, length, multilabel ...)
Note:
* ``file-like`` objects passed to 'file' parameter must be opened in binary mode.
* Learning to Rank('ranking' type) datasets are not currently supported
* ``dtype`` parameter accepts only numerical datatypes
* The LIBSVM data file is assumed to have been already preprocessed, i.e., encoding categorical features and removal of missing values.
Examples:
Illustration of **Reading from LIBSVM data file** ::
>>> from automs import eda
>>> main = eda.EDA()
>>>
>>> from io import BytesIO
>>>
>>> # First 10 samples from dataset Breast Cancer (Source: https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/breast-cancer)
... data = b'''
... 2.000000 1:1000025.000000 2:5.000000 3:1.000000 4:1.000000 5:1.000000 6:2.000000 7:1.000000 8:3.000000 9:1.000000 10:1.000000
... 2.000000 1:1002945.000000 2:5.000000 3:4.000000 4:4.000000 5:5.000000 6:7.000000 7:10.000000 8:3.000000 9:2.000000 10:1.000000
... 2.000000 1:1015425.000000 2:3.000000 3:1.000000 4:1.000000 5:1.000000 6:2.000000 7:2.000000 8:3.000000 9:1.000000 10:1.000000
... 2.000000 1:1016277.000000 2:6.000000 3:8.000000 4:8.000000 5:1.000000 6:3.000000 7:4.000000 8:3.000000 9:7.000000 10:1.000000
... 2.000000 1:1017023.000000 2:4.000000 3:1.000000 4:1.000000 5:3.000000 6:2.000000 7:1.000000 8:3.000000 9:1.000000 10:1.000000
... 4.000000 1:1017122.000000 2:8.000000 3:10.000000 4:10.000000 5:8.000000 6:7.000000 7:10.000000 8:9.000000 9:7.000000 10:1.000000
... 2.000000 1:1018099.000000 2:1.000000 3:1.000000 4:1.000000 5:1.000000 6:2.000000 7:10.000000 8:3.000000 9:1.000000 10:1.000000
... 2.000000 1:1018561.000000 2:2.000000 3:1.000000 4:2.000000 5:1.000000 6:2.000000 7:1.000000 8:3.000000 9:1.000000 10:1.000000
... 2.000000 1:1033078.000000 2:2.000000 3:1.000000 4:1.000000 5:1.000000 6:2.000000 7:1.000000 8:1.000000 9:1.000000 10:5.000000
... 2.000000 1:1033078.000000 2:4.000000 3:2.000000 4:1.000000 5:1.000000 6:2.000000 7:1.000000 8:2.000000 9:1.000000 10:1.000000
... '''
>>>
>>> import numpy as np
>>> # Each row is an instance and takes the form **<target value> <feature index>:<feature value> ... **.
... # Dataset is 'classification' type and target values (first column) represents class label of each sample, i.e., type='classification' (default)
... # All features assume only integral values, i.e., dtype=np.int
... main.read_data_libsvm(BytesIO(data), dtype=np.int)
>>>
>>> # Print the data samples
... print(main.data)
[[1000025 5 1 1 1 2 1 3 1 1]
[1002945 5 4 4 5 7 10 3 2 1]
[1015425 3 1 1 1 2 2 3 1 1]
[1016277 6 8 8 1 3 4 3 7 1]
[1017023 4 1 1 3 2 1 3 1 1]
[1017122 8 10 10 8 7 10 9 7 1]
[1018099 1 1 1 1 2 10 3 1 1]
[1018561 2 1 2 1 2 1 3 1 1]
[1033078 2 1 1 1 2 1 1 1 5]
[1033078 4 2 1 1 2 1 2 1 1]]
>>>
>>> # Print indices of columns or features. Assumption: Feature indices always uses one-based index
... print(main.columns_)
[ 1 2 3 4 5 6 7 8 9 10]
>>>
>>> # Print target values
... print(main.target)
[2 2 2 2 2 4 2 2 2 2]
>>>
>>> # Print the distinct classes in target values
... print(main.classes_)
[2 4]
"""
dataset = load_svmlight_file(f=file, dtype=dtype, n_features=n_features, query_id=False, **kargs)
data, target = dataset[0].toarray(), dataset[1]
del dataset
self.classes_ = None
if type.casefold()=="classification":
target = target.astype(np.int)
target_labelEncoder = LabelEncoder()
target = target_labelEncoder.fit_transform(target)
self.classes_ = target_labelEncoder.classes_.tolist()
elif type.casefold()=="regression":
pass
elif type.casefold()=="ranking":
logger.error("'ranking' type datasets are not currently supported")
raise NotImplementedError("'ranking' type datasets are not currently supported")
n_features = data.shape[1]
self.columns_ = np.arange(1, n_features+1)
self._nominal_columns = None
self.data, self.target = data, target
self.n_samples, self.n_features = self.data.shape
# TODO: Allow use of subset of attributes
def read_data_arff(self, file, target_attr='class', encode_target='infer', numeric_categorical_attrs=None, drop_na_rows=True):
"""Read data from ARFF format file
Parameters:
file (str or open file): path to ARFF data file or ``open file`` object
target_attr (str, default='class'): attribute name of the target column. ``target_attr=None``implies no target columns.
encode_target (bool, default-'infer'): Encode target values. ``encode_target='infer'`` encodes nominal target and ignores numeric target attributes.
numeric_categorical_attrs (:obj:`list`, default= ``None``): List of 'names' of numeric attributes to be inferred as nominal and to be encoded. Note: All nominal attributes are implicitly encoded.
drop_na_rows (bool, detault=True): Drop data samples with NA/NaN ('?') features
Note:
All nominal type attributes are implicitly encoded.
Examples:
Illustration of **Reading from ARFF data file** ::
>>> from automs import eda
>>> main = eda.EDA()
>>>
>>> from io import StringIO
>>>
>>> # An excerpt from dataset 'Hepatitis' involving features 'Age', 'Sex', 'Steroid', Albumin', 'Protime' and 'Class'.
>>> data = '''
... % Dataset: Hepatitis (Source: Weka)
... @relation hepatitis
...
... @attribute Age integer
... @attribute Sex {male, female}
... @attribute Steroid {no, yes}
... @attribute Albumin real
... @attribute Class {DIE, LIVE}
...
... @data
... 30,male,no,4,LIVE
... 50,female,no,3.5,LIVE
... 78,female,yes,4,LIVE
... 31,female,?,4,LIVE
... 34,female,yes,4,LIVE
... 46,female,yes,3.3,DIE
... 44,female,yes,4.3,LIVE
... 61,female,no,4.1,LIVE
... 53,male,no,4.1,LIVE
... 43,female,yes,3.1,DIE
... '''
>>>
>>> # The target is attribute 'Class', i.e., target_attr='Class'
... # Data samples with any missing ('?') features should be dropped, i.e., drop_na_rows=True (default).
... main.read_data_arff(StringIO(data), target_attr='Class')
info: The dataset may contain attributes with N/A ('?') values
>>>
>>> # Print the processed data samples.
... '''Note: Nominal features ['Sex', 'Steroid'] have been implicitly encoded.
... Samples with any missing value('?') features have been dropped'''
[[ 30. 1. 0. 4. ]
[ 50. 0. 0. 3.5]
[ 78. 0. 1. 4. ]
[ 34. 0. 1. 4. ]
[ 46. 0. 1. 3.3]
[ 44. 0. 1. 4.3]
[ 61. 0. 0. 4.1]
[ 53. 1. 0. 4.1]
[ 43. 0. 1. 3.1]]
>>>
>>> # Print the names of columns in data
... print(main.columns_)
['Age', 'Sex', 'Steroid', 'Albumin']
>>>
>>> # Print the target values. Note: Target attribute 'Class' has been encoded.
... print(main.target)
[1 1 1 1 0 1 1 1 0]
>>>
>>> # Print the distinct (original) classes in target values
... print(main.classes_)
['DIE', 'LIVE']
"""
dataset, metadata = loadarff(f=file)
rows_without_na = np.ones(dataset.shape[0], dtype=np.bool)
for attribute in metadata:
if metadata[attribute][0] == 'nominal':
rows_without_na[np.where(dataset[attribute] == b'?')] = False
if metadata[attribute][0] == 'numeric':
rows_without_na[np.isnan(dataset[attribute])] = False
if not rows_without_na.all():
logger.info("The dataset may contain attributes with N/A ('?') values")
# print("info: The dataset may contain attributes with N/A ('?') values")
if drop_na_rows:
dataset = dataset[rows_without_na]
# if target_attr is None or target_attr in metadata:
# data_records, target = dataset[[attribute for attribute in metadata if attribute!=target_attr]], None if target_attr is None else dataset[target_attr]
self.columns_ = metadata.names().copy()
if target_attr is None or target_attr in metadata:
if target_attr in metadata:
self.columns_.remove(target_attr)
data_records, target = dataset[self.columns_], None if target_attr is None else dataset[target_attr]
del dataset
else:
# print("error: Unknown 'target' attribute name specified")
logger.error("Unknown 'target' attribute name specified")
raise ValueError("unknown 'target' attribute name specified")
# Processing target labels
if target_attr is not None:
self.classes_ = None
# 'classification' type datasets
if metadata[target_attr][0]=='nominal':
if isinstance(encode_target, str) and encode_target.casefold()=='infer':
encode_target = True
# 'regression' type datasets
elif metadata[target_attr][0]=='numeric':
target = target.astype(np.number)
if isinstance(encode_target, str) and encode_target.casefold()=='infer':
encode_target = False
if encode_target:
target_labelEncoder = LabelEncoder()
target = target_labelEncoder.fit_transform(target)
self.classes_ = [target_class.decode() for target_class in target_labelEncoder.classes_.tolist()]
#self.classes_ = target_labelEncoder.classes_.tolist()
# Form a new data array
data = np.empty( ( data_records.size, len(data_records.dtype.names) ), dtype=np.float64)
self._nominal_columns = []
# Column name indexed dictionary of distinct (original) categories in the data columns. Defaults to ``None`` for numeric (non-categorical) valued columns.
self.columns_categories_ = dict.fromkeys(self.columns_)
for index, attribute in enumerate(data_records.dtype.names):
attribute_values = data_records[attribute]
encode_attribute = False
if metadata[attribute][0] == 'numeric':
if numeric_categorical_attrs is not None and attribute in numeric_categorical_attrs:
encode_attribute = True
elif metadata[attribute][0] == 'nominal':
encode_attribute = True
if encode_attribute:
self._nominal_columns.append(attribute)
attr_labelEncoder = LabelEncoder()
attribute_values = attr_labelEncoder.fit_transform(attribute_values)
self.columns_categories_[attribute] = [attr.decode() for attr in attr_labelEncoder.classes_.tolist()]
del attr_labelEncoder
data.T[index] = attribute_values
del data_records
self.data, self.target = data, target
self.n_samples, self.n_features = self.data.shape
def dummy_coding(self, nominal_columns='infer', drop_first=False):
"""Dummy coding (One-Hot Encoding) of nominal categorical columns (features)
Parameters:
nominal_columns (:obj:`list`, int, str, 'all', default='infer'): List (str or int if singleton) of column 'names' (or absolute 'indices', if no column names) of nominal categorical columns to dummy code. ``nominal_columns='infer'`` autodetects nominal categorical columns. ``nominal_columns='all'`` implies all columns are nominal categorical. ``nominal_columns=None`` implies no nominal categorical columns.
drop_first (bool, default=False): Whether to get k-1 dummies out of k categorical levels by removing the first level.
Note:
``nominal_columns`` parameter uses absolute column 'names' (or absolute column 'indices' if no names) as presented in the original data file.
See also:
`What is One Hot Encoding? Why And When do you have to use it? (Source: HackerNoon) <https://hackernoon.com/what-is-one-hot-encoding-why-and-when-do-you-have-to-use-it-e3c6186d008f>`_
Examples:
Illustration of **Dummy-Coding** of Nominal Categorical Columns
::
>>> from automs import eda
>>> main = eda.EDA()
>>> from io import StringIO
>>> data = '''
... % Dataset: Automobiles (Source: UCI ML Repository)
... % Attributes : symboling (ordinal) {-3, -2, -1, 0, 1, 2, 3}
... % body-style (nominal) {hardtop, wagon, sedan, hatchback, convertible}
... % engine-size (continous) [61, 326]
... % engine-location (nominal) {front, rear}
... % Target Attribute : symboling
...
... 3,convertible,130,front
... 1,hatchback,152,front
... 2,sedan,109,front
... 3,hardtop,194,rear
... 0,wagon,132,front
... -2,sedan,141,front
... 3,convertible,194,rear
... -1,hatchback,122,front
... 2,hardtop,97,front
... 0,wagon,108,front
... '''
>>> # Ignore lines starting with '%' as comment, i.e., comment='%'.
... # Use column 0 (attribute 'symboling') as target values to predict, i.e., target_col=0.
... # Encode nominal columns 1 and 3 (body-style and engine-location), i.e., categorical_cols=[1,3]
... main.read_data_csv(StringIO(data), comment='%', target_col=0, encode_target=False, categorical_cols=[1,3])
>>> # Print the processed data samples.
... print(main.data)
[[ 0. 130. 0.]
[ 2. 152. 0.]
[ 3. 109. 0.]
[ 1. 194. 1.]
[ 4. 132. 0.]
[ 3. 141. 0.]
[ 0. 194. 1.]
[ 2. 122. 0.]
[ 1. 97. 0.]
[ 4. 108. 0.]]
>>> # Print names (or absolute indices, if no names) of columns in data.
... # Note: Column 0 was isolated as target values.
... print(main.columns_)
Int64Index([1, 2, 3], dtype='int64')
>>> # Print the names (or absolute indices, if no names) of nominal columns in data.
... print(main._nominal_columns)
[1, 3]
>>> # Dummy code nominal columns inferred from data, i.e., nominal_columns='infer' (default).
... main.dummy_coding()
info: columns [1, 3] was/were infered as nominal column(s) for dummy coding
>>> # Print the data samples post dummy-coding
... print(main.data)
[[ 130. 1. 0. 0. 0. 0. 1. 0.]
[ 152. 0. 0. 1. 0. 0. 1. 0.]
[ 109. 0. 0. 0. 1. 0. 1. 0.]
[ 194. 0. 1. 0. 0. 0. 0. 1.]
[ 132. 0. 0. 0. 0. 1. 1. 0.]
[ 141. 0. 0. 0. 1. 0. 1. 0.]
[ 194. 1. 0. 0. 0. 0. 0. 1.]
[ 122. 0. 0. 1. 0. 0. 1. 0.]
[ 97. 0. 1. 0. 0. 0. 1. 0.]
[ 108. 0. 0. 0. 0. 1. 1. 0.]]
>>> # Print names of columns in data post dummy-coding.
... # Note: Dummy/indicator columns assume names of the form **'<original column name>_<nominal category binarized>'**
... print(main.columns_)
Index([2, '1_0.0', '1_1.0', '1_2.0', '1_3.0', '1_4.0', '3_0.0', '3_1.0'], dtype='object')
"""
try:
dataframe = pd.DataFrame(self.data, columns=self.columns_, dtype=np.number)
except ValueError:
# print("warning: Data contains non-numeric features")
logger.warning("Data contains non-numeric features")
dataframe = pd.DataFrame(self.data, columns=self.columns_)
#if not (nominal_columns==[] or nominal_columns is None): # Both [] (empty list) and ``None`` are False Expressions
if nominal_columns: # Evaluates to True if (nominal_columns!=[] and nominal_columns is not None)
if isinstance(nominal_columns, str) and nominal_columns.casefold()=='infer':
if hasattr(self, '_nominal_columns'):
nominal_columns = self._nominal_columns if self._nominal_columns is not None else []
# print("info: columns {0} was/were infered as nominal column(s) for dummy coding".format(nominal_columns))
logger.info("Columns %s was/were infered as nominal column(s) for dummy coding", nominal_columns)
else:
# print("error: could not infer nominal type columns from data")
logger.error("Could not infer nominal type columns from data")
raise Exception("could not infer nominal type columns from data")
elif isinstance(nominal_columns, str) and nominal_columns.casefold()=='all':
nominal_columns = self.columns_.copy()
elif isinstance(nominal_columns, list) or isinstance(nominal_columns, str) or isinstance(nominal_columns, int):
if isinstance(nominal_columns, str) or isinstance(nominal_columns, int):
nominal_columns = [nominal_columns]
if not set(nominal_columns).issubset(self.columns_):
# print("warning: Unknown columns names: {0} in argument to parameter 'nominal_columns' have been ignored".format( set(nominal_columns).difference(self.columns_) ))
logger.warning("Unknown columns names: %s in argument to parameter 'nominal_columns' have been ignored", set(nominal_columns).difference(self.columns_) )
nominal_columns = list( set(nominal_columns).intersection(self.columns_) )
else:
# print("error: Invalid arguments to parameter 'nominal_columns'. Accepted Arguments: {list of names of nominal columns, 'infer', 'all', None}")
logger.error("Invalid arguments to parameter 'nominal_columns'. Accepted Arguments: {list of names of nominal columns, 'infer', 'all', None}")
raise TypeError("invalid arguments to parameter 'nominal_columns'")
dataframe_dummy_coded = pd.get_dummies(dataframe, columns=nominal_columns, drop_first=drop_first)
del dataframe
self.data = dataframe_dummy_coded.values
self.columns_ = dataframe_dummy_coded.columns
del dataframe_dummy_coded
del self._nominal_columns
self.n_samples, self.n_features = self.data.shape
else:
# print("info: No columns to dummy code (nominal_columns = {0})".format(nominal_columns.__repr__()))
logger.info("No columns to dummy code (nominal_columns = %s)", nominal_columns.__repr__())
def standardize_data(self):
"""Feature Scaling through Standardisation (or Z-score normalisation)
See also:
`Importance of Feature Scaling <http://scikit-learn.org/stable/auto_examples/preprocessing/plot_scaling_importance.html>`_
"""
if not hasattr(self, 'standard_scaler'):
try:
self.data = self.data.astype(np.float, copy=False)
except ValueError:
# print("error: Standardization of data failed due to presence of non-numeric features")
logger.error("Standardization of data failed due to presence of non-numeric features")
raise ValueError("standardization of data failed due to presence of non-numeric features")
self.standard_scaler = StandardScaler(copy=False)
self.data = self.standard_scaler.fit_transform(self.data)
else:
# print("info: Data already in Standard Normal Form")
logger.info("Data already in Standard Normal Form")
def destandardize_data(self):
"""Scale back and shift features to original representation (i.e., as prior to Standardization)
Note:
Data should not have been modified post standardization for de-standardisation to return accurate original representation.
"""
if hasattr(self, 'standard_scaler'):
self.data = self.standard_scaler.inverse_transform(self.data)
del self.standard_scaler
def random_stratified_sampling(self, location, bag_name, sample_size, n_iterations=10, file_prefix=None):
"""Performs repeated Stratified Random Sampling of data with 'replacement across samples drawn' and dumps the sampled data into files
Parameters:
location (str): Location to dump the sampled data bags.
bag_name (str): Name of (to be created) folder that acts as a container for the sampled data bags.
sample_size (int, float): Number of data samples in every bag. { ``int`` (range: 1 to n_samples):Absolute number of samples per bag, ``float`` (range: (0, 1] ):Number of samples per bag represented as a fraction of the total number of samples}
n_iterations (int, default=10): Number of bags to be formed.
file_prefix (str, default=None): Prefix for bag filenames. Bag filenames are of the form '[<file_prefix>_]bag<bag number>.p'.
Note:
* Each sampled data bag file is an pickled dictionary of 'data' and 'target' attributes.
* Each bag folder contains a file 'metadata.p' which is a pickled dictionary of metadata information about the original dataset (bagging timestamp, class distribution, n_samples, n_features, columns (features) information).
* The metadata 'timestamp' attribute (time of bagging in seconds since the Epoch as a float) can uniquely identify bags (in most cases).
"""
# Ensure that the dataset is a classification dataset
if not ( hasattr(self, 'classes_') and self.classes_ is not None ):
# print("error: Cannot perform random stratified sampling on the non-classification dataset. If the dataset is indeed a classification dataset, ensure that you encode target column when reading.")
logger.error("Cannot perform random stratified sampling on the non-classification dataset. If the dataset is indeed a classification dataset, ensure that you encode target column when reading.")
raise ValueError("cannot perform random stratified sampling on the non-classification dataset")
cwd = os.getcwd()
location = os.path.abspath(os.path.expanduser(location))
try:
os.chdir(location)
except FileNotFoundError:
# print("error: Failed to resolve location '%s'"%location)
logger.error("Failed to resolve location for dumping sampled data files: '%s'", location)
raise FileNotFoundError("failed to resolve location for dumping sampled data files")
# print("error: Buddi-automs 'warehouse' not setup. Specify an user path for sampled data bags.")
# sys.exit(1)
try:
os.mkdir(bag_name)
os.chdir(bag_name)
except OSError as err:
logger.error("Unable to write sampled data bags to disk : %s", err)
raise OSError("unable to write sampled data bags to disk")
# print("error: Unable to write sampled data bags to disk.\n{0}".format(err))
# sys.exit(1)
# Resolving SIZE of bagged samples as a fraction
if isinstance(sample_size, int) and (sample_size>0 and sample_size<=self.n_samples):
sample_size = sample_size/self.n_samples
elif isinstance(sample_size, float) and (sample_size>0.0 and sample_size<=1.0):
pass
else:
# print("error: Invalid sampling size encountered")
logger.error("Invalid sampling size encountered")
raise ValueError("invalid sampling size encountered")
# Resolving FILE PREFIX for bagged samples
if file_prefix is None:
file_prefix = ''
else:
file_prefix = file_prefix + '_'
# Compute the indices of samples for each class
classes_samples_indices = list(map(lambda class_: np.where(self.target == class_)[0], range(len(self.classes_))))
classes_sampled_data_cnts = list(map(lambda class_samples_indices: round(sample_size*len(class_samples_indices)), classes_samples_indices))
def generate_sampled_data_indices(classes_samples_indices, classes_sampled_data_cnts):
# Choose sample indices for each class
classes_choosen_indices = list(map(lambda x: list(np.random.choice(x[0], size=x[1], replace=False)), zip(classes_samples_indices, classes_sampled_data_cnts)))
# combine indices of samples choosen for each class to generate indices for sampled data
sampled_data_choosen_indices = reduce(lambda a,b : a+b, classes_choosen_indices)
# shuffle the choosen indices
shuffle(sampled_data_choosen_indices)
return sampled_data_choosen_indices
bags_filenames = []
# Repeated Sampling of data
for iteration in range(n_iterations):
sampled_data = dict.fromkeys(['data', 'target'])
# Replace with stratified method of choosing indices
# choosen_indices = np.random.choice(np.arange(self.n_samples),size=sample_size,replace=False)
choosen_indices = generate_sampled_data_indices(classes_samples_indices, classes_sampled_data_cnts)
sampled_data['data'], sampled_data['target'] = self.data[choosen_indices], self.target[choosen_indices] if self.target is not None else None
bag_filename = os.path.abspath(file_prefix + "bag"+str(iteration+1)+".p")
pickle.dump(sampled_data, open(bag_filename, "xb"))
bags_filenames.append(bag_filename)
del sampled_data
# Metadata of data
metadata = {
'timestamp':time(), # Uniquely identifies baggings (with probability ~= 1)
'classes':label_cnt_dict(self.target) if self.target is not None else None,
'n_samples':self.n_samples, # Not inferrable from classes, if target=None
'n_features':self.n_features,
'column_names':self.columns_,
'column_categories':self.columns_categories_ if hasattr(self, 'columns_categories_') else None,
'stratified_sampling': True
}
metadata_filename = os.path.abspath("metadata.p")
pickle.dump(metadata, open(metadata_filename, "xb"))
# Change the directory back to the original working directory
os.chdir(cwd)
return {
'bags_filenames': bags_filenames,
'metadata_filename': metadata_filename
}
def perform_kmeans_clustering(self, n_clusters='n_classes', **kargs):
"""Perform K-Means Clustering on the data
n_clusters ({int, 'n_classes'}, default='n_classes'): number (``int``) of clusters in the data. ``n_classes`` implies uses number of classes in data as number of clusters.
**kargs: Other Keyword arguments (parameters) accepted by object :`sklearn.cluster.KMeans` constructor (Keyword Arguments: n_init, max_iter, verbose, n_jobs).
See also:
* The method :func:`automs.eda.EDA.perform_kmeans_clustering` is built upon `scikit-learn's KMeans Clustering API`_ (:obj:`sklearn.cluster.KMeans`).
Examples:
Illustration of performing KMeans Clustering on synthetic dataset::
>>> from automs import eda
>>> main = eda.EDA()
>>> # Generate synthetic dataset (with istropic gaussian blobs clusters) using :func:`sklearn.datasets.make_blobs`
... from sklearn.datasets import make_blobs
>>> data, target = make_blobs(n_samples=100, n_features=2, centers=3)
>>> # Load the synthetic dataset into the EDA object :obj:`main`
... main.load_data(data, target)
>>> # Perform K-Means Clustering on the data
... main.perform_kmeans_clustering(n_clusters='n_classes')
info: Data implicilty Standardized (aka Z-Score Normalised) for K-Means Clustering
info: Number of clusters in data, K=3 (equal to number of classes)
inertia : 8.030120482
clusters : {0: 33, 1: 34, 2: 33}
parameters : {'verbose': 0, 'precompute_distances': 'auto', 'init': 'k-means++', 'tol': 0.0001, 'n_jobs': 1, 'random_state': None, 'max_iter': 300, 'n_init': 10, 'algorithm': 'auto', 'copy_x': True, 'n_clusters': 3}
n_clusters : 3
cluster_centers : [[ 0.54512904 -1.38171852]
[-1.36053651 0.7996122 ]
[ 0.85663585 0.55787564]]
labels : [1 0 0 2 1 2 0 1 1 2 2 2 1 1 1 0 0 1 2 0 0 0 1 0 2 2 1 1 2 2 1 0 1 0 2 0 0
0 2 1 2 1 0 1 0 0 0 0 1 1 1 0 0 2 0 1 0 2 1 2 1 2 2 1 0 2 1 2 2 1 2 0 1 1
2 0 0 2 0 2 1 0 0 2 2 2 0 0 1 1 2 2 1 1 0 1 2 1 2 2]
.. _`Scikit-Learn's KMeans Clustering API`: scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html
"""
logger.info("Performing KMeans Clustering")
if not hasattr(self, 'standard_scaler'):
self.standardize_data()
logger.info("Data implicilty Standardized (aka Z-Score Normalised) for K-Means Clustering")
if isinstance(n_clusters, int):
pass
# 'number of clusters' to find = 'number of classes' in the labelled dataset
elif isinstance(n_clusters, str) and n_clusters.casefold()=='n_classes':
if self.target is not None:
if hasattr(self, 'classes_') and self.classes_ is not None:
n_clusters, = self.classes_.shape
else:
n_clusters, = np.unique(self.target).shape
logger.info("Number of clusters in data, K=%d (equal to number of classes)", n_clusters)
# print("info: number of clusters in data, K=%d (equal to number of classes)"%n_clusters)
else:
# print("error: number of classes in data couldn't be determined due to absence of target class info.")
logger.error("Number of classes in data couldn't be determined due to absence of target class info.")
raise ValueError("number of classes in data couldn't be determined due to absence of target class info")
else:
# print("error: invalid argument for parameter 'n_clusters'. Accepted arguments: {int, 'n_classes'}")
logger.error("Invalid argument for parameter 'n_clusters'. Accepted arguments: {int, 'n_classes'}")
raise TypeError("invalid argument for parameter 'n_clusters'")
kmeans_clusterer = KMeans(n_clusters=n_clusters, **kargs)
kmeans_clusterer.fit(self.data)
self.kmeans_results = {
'parameters' : kmeans_clusterer.get_params(),
'labels' : kmeans_clusterer.labels_,
'n_clusters' : n_clusters,
'clusters' : label_cnt_dict(kmeans_clusterer.labels_),
'cluster_centers' : kmeans_clusterer.cluster_centers_,
'inertia' : kmeans_clusterer.inertia_
}
# print_dict(self.kmeans_results)
# logger.info("KMeans clustering results = %s", kmeans_results)
return self.kmeans_results['labels']
def perform_spectral_clustering(self, n_clusters='n_classes', **kargs):
logger.info("Performing Spectral Clustering")
if not hasattr(self, 'standard_scaler'):
self.standardize_data()
# print("info: Data implicilty Standardized (aka Z-Score Normalised) for Spectral Clustering")
logger.info("Data implicilty Standardized (aka Z-Score Normalised) for Spectral Clustering.")
if isinstance(n_clusters, int):
pass
# 'number of clusters' to find = 'number of classes' in the labelled dataset
elif isinstance(n_clusters, str) and n_clusters.casefold()=='n_classes':
if self.target is not None:
if hasattr(self, 'classes_') and self.classes_ is not None:
n_clusters, = self.classes_.shape
else:
n_clusters, = np.unique(self.target).shape
# print("info: number of clusters in data, K=%d (equal to number of classes)"%n_clusters)
logger.info("Number of clusters in data, K = %d (equal to number of classes)", n_clusters)
else:
# print("error: number of classes in data couldn't be determined due to absence of target class info.")
logger.error("Number of classes in data couldn't be determined due to absence of target class info.")
raise ValueError("Number of classes in data couldn't be determined due to absence of target class info")
else:
# print("error: invalid argument for parameter 'n_clusters'. Accepted arguments: {int, 'n_classes'}")
logger.error("Invalid argument for parameter 'n_clusters'. Accepted arguments: {int, 'n_classes'}")
raise TypeError("invalid argument for parameter 'n_clusters'")
spectral_clusterer = SpectralClustering(n_clusters=n_clusters, **kargs)
try:
spectral_clusterer.fit(self.data)
except MemoryError:
logger.error("Data too large to be processed on this machine.")
raise MemoryError("data too large to be processed on this machine")
self.spectral_results = {
'parameters' : spectral_clusterer.get_params(),
'labels' : spectral_clusterer.labels_,
'n_clusters' : n_clusters,
'clusters' : label_cnt_dict(spectral_clusterer.labels_)
}
# print_dict(self.spectral_results)
# logger.info("Spectral clustering results = %s", self.spectral_results)
return self.spectral_results['labels']
def perform_hdbscan_clustering(self, **kargs):
# print("info:Performing hdbscan_clusterer")
logger.info("Performing HDBSCAN clustering")
if not hasattr(self, 'standard_scaler'):
self.standardize_data()
# print("info: Data implicilty Standardized (aka Z-Score Normalised) for HDBSCAN Clustering")
logger.info("Data implicilty Standardized (aka Z-Score Normalised) for HDBSCAN Clustering.")
hdbscan_clusterer = hdbscan.HDBSCAN(**kargs)
hdbscan_clusterer.fit(self.data)
assert len(np.unique(hdbscan_clusterer.labels_)) > 1
# # `allow_single_cluster=False` (default). Then, why have this block ?
# if(len(np.unique(hdbscan_clusterer.labels_))<=1):
# print("Found only one cluster ")
# print("Reducing min_n_samples ")
# reduced_min_samples = hdbscan_clusterer.min_cluster_size
# while(len(np.unique(hdbscan_clusterer.labels_)) <=1):
# reduced_min_samples = reduced_min_samples - 1
# print("Trying reduced cluster size {}".format(reduced_min_samples))
# hdbscan_clusterer.set_params(min_cluster_size = reduced_min_samples)
# hdbscan_clusterer.fit(self.data)
self.hdbscan_results = {
'parameters' : hdbscan_clusterer.get_params(),
'labels' : hdbscan_clusterer.labels_,
'n_clusters' : len(np.unique(hdbscan_clusterer.labels_)),
'clusters' : label_cnt_dict(hdbscan_clusterer.labels_)
}
# print_dict(self.hdbscan_results)
# logger.info("HDBSCAN clustering results = %s", self.hdbscan_results)
return self.hdbscan_results['labels']
def perform_hierarchical_clustering(self, n_clusters='n_classes', **kargs):
"""Perform Ward's Hierarchical Clustering on the data
n_clusters ({int, 'n_classes'}, default='n_classes'): number (``int``) of clusters in the data. ``n_classes`` implies uses number of classes in data as number of clusters.
**kargs: Other Keyword arguments (parameters) accepted by object :`sklearn.cluster.AgglomerativeClustering` constructor (Keyword Arguments: affinity, linkage, memory).
See also:
* The method :func:`automs.eda.EDA.perform_hierarchical_clustering` is built upon `scikit-learn's Agglomerative Clustering API`_ (:obj:`sklearn.cluster.AgglomerativeClustering`).
Examples:
Illustration of performing Ward's Agglomerative hierarchical Clustering on synthetic dataset::
>>> from automs import eda
>>> main = eda.EDA()
>>> # Generate synthetic dataset (with istropic gaussian blobs clusters) using :func:`sklearn.datasets.make_blobs`
... from sklearn.datasets import make_blobs
>>> data, target = make_blobs(n_samples=100, n_features=2, centers=3)
>>> # Load the synthetic dataset into the EDA object :obj:`main`
... main.load_data(data, target)
>>> # Perform Agglomerative hierarchical Clustering on the data
... main.perform_hierarchical_clustering(n_clusters='n_classes')
info: Data implicilty Standardized (aka Z-Score Normalised) for hierarchical Clustering
info: Number of clusters in data, K=3 (equal to number of classes)
n_clusters : 3
labels : [1 2 2 1 0 2 1 2 1 2 2 2 1 1 2 1 0 2 1 0 1 0 2 0 2 0 2 1 1 2 1 1 2 2 1 1 0
0 2 0 0 0 0 1 0 0 2 2 2 1 1 0 1 0 1 2 1 2 1 2 0 1 0 0 0 2 2 2 0 0 0 1 1 1
0 1 0 0 2 2 0 0 2 1 1 1 2 2 1 0 2 0 1 0 0 1 0 0 1 2]
clusters : {0: 34, 1: 34, 2: 32}
parameters : {'affinity': 'euclidean', 'connectivity': None, 'pooling_func': <function mean at 0x7f991ff63268>, 'n_clusters': 3, 'memory': None, 'compute_full_tree': 'auto', 'linkage': 'ward'}
.. _`scikit-learn's Agglomerative Clustering API`: http://scikit-learn.org/stable/modules/generated/sklearn.cluster.AgglomerativeClustering.html
"""
logger.info("Performing Hierarchical Clustering")
if not hasattr(self, 'standard_scaler'):
self.standardize_data()
# print("info: Data implicilty Standardized (aka Z-Score Normalised) for hierarchical Clustering")
logger.info("Data implicilty Standardized (aka Z-Score Normalised) for hierarchical Clustering")
if isinstance(n_clusters, int):
pass
# 'number of clusters' to find = 'number of classes' in the labelled dataset
elif isinstance(n_clusters, str) and n_clusters.casefold()=='n_classes':
if self.target is not None:
if hasattr(self, 'classes_') and self.classes_ is not None:
n_clusters, = self.classes_.shape
else:
n_clusters, = np.unique(self.target).shape
# print("info: number of clusters in data, K=%d (equal to number of classes)"%n_clusters)
logger.info("Number of clusters in data, K = %d (equal to number of classes)", n_clusters)
else:
# print("error: number of classes in data couldn't be determined due to absence of target class info.")
logger.error("Number of classes in data couldn't be determined due to absence of target class info.")
raise ValueError("number of classes in data couldn't be determined due to absence of target class info")
else:
# print("error: invalid argument for parameter 'n_clusters'. Accepted arguments: {int, 'n_classes'}")
logger.error("Invalid argument for parameter 'n_clusters'. Accepted arguments: {int, 'n_classes'}")
raise TypeError("Invalid argument for parameter 'n_clusters'")
hierarchical_clusterer = AgglomerativeClustering(n_clusters=n_clusters, **kargs)
try:
hierarchical_clusterer.fit(self.data)
except MemoryError:
logger.error("Data too large to be processed on this machine.")
raise MemoryError("data too large to be processed on this machine")
self.hierarchical_results = {
'parameters' : hierarchical_clusterer.get_params(),
'labels' : hierarchical_clusterer.labels_,
'n_clusters' : n_clusters,
'clusters' : label_cnt_dict(hierarchical_clusterer.labels_)
}
# print_dict(self.hierarchical_results)
# logger.info("hierarchical clustering results = %s", self.hierarchical_results)
return self.hierarchical_results['labels']
def label_cnt_dict(labels):
unique, counts = np.unique(labels, return_counts=True)
return dict(zip(unique, counts))
def print_dict(dictionary):
for key,value in dictionary.items():
print(key,value,sep=" : ")
def visualise_2D(x_values,y_values,labels=None,class_names=None):
"""Visualise clusters of selected 2 features"""
sns.set_style('white')
sns.set_context('poster')
sns.set_color_codes()
plot_kwds = {'alpha' : 0.5, 's' : 50, 'linewidths':0}
frame = plt.gca()
frame.axes.get_xaxis().set_visible(False)
frame.axes.get_yaxis().set_visible(False)
if labels is None:
plt.scatter(x_values,y_values,c='b',**plot_kwds)
else:
pallete=sns.color_palette('dark',np.unique(labels).max()+1)
colors=[pallete[x] if x>=0 else (0.0,0.0,0.0) for x in labels]
plt.scatter(x_values,y_values,c=colors,**plot_kwds)
legend_entries = [mpatches.Circle((0,0),1,color=x,alpha=0.5) for x in pallete]
if class_names is None:
legend_labels = range(len(pallete))
else:
legend_labels = ["class "+str(label)+" ( "+str(name)+" )" for label,name in enumerate(class_names)]
plt.legend(legend_entries,legend_labels,loc='best')
plt.show()
def visualise_3D(x_values,y_values,z_values,labels=None):
"""Visualise clusters of selected 3 features -- plotly"""
fig = plt.figure()
ax = fig.add_subplot(111,projection='3d')
plot_kwds = {'alpha' : 0.5, 's' : 50, 'linewidths':0}
if labels is None:
ax.scatter(x_values,y_values,z_values,c='b',**plot_kwds)
else:
pallete=sns.color_palette('dark',np.unique(labels).max()+1)
colors=[pallete[x] if x>=0 else (0.0,0.0,0.0) for x in labels]
ax.scatter(x_values,y_values,z_values,c=colors,**plot_kwds)
plt.show()
#Flatten complex 'multi-dimensional' list or ``np.ndarray``s
def flatten_list(data):
if isinstance(data, int) or isinstance(data, float):
return list([data])
if isinstance(data, np.ndarray):
data = data.tolist()
flattened_list = []
for element in data:
flattened_list = flattened_list + flatten_list(element)
return flattened_list
# max number of classes in a nominal variables for dataset with ``n_samples`` data points
def max_classes_nominal(n_samples):
# Result of quadratic regression on "n_samples" -> "max classes in nominal columns"
reg_coefs = np.array([ 8.54480458e-03, 1.31494511e-08])
reg_intercept = 14.017948334463796
if n_samples <= 16:
return ceil(n_samples/3)
elif n_samples <= 100000:
return ceil( min(np.sum([n_samples, n_samples*n_samples]*reg_coefs) + reg_intercept, n_samples/4) )
else:
return n_samples/100
```
#### File: src/automs/exceptions.py
```python
class UnableToLearnBothClassesError(Exception):
""" Exception raised when automs is unable to learn both majority and minority classes in data after several attempts """
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
```
#### File: src/automs/f1_scores_estimation.py
```python
import logging
import os
import pickle
import numpy as np
import onnxruntime as rt
from .cluster_indices_generation import FEATURE_VECTOR_CLUSTER_INDICES_ORDER_TRIPLES
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
MODELS_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'models')
def estimate_decision_tree_f1_scores(feature_vectors):
# Load the decision tree f1-score estimation regressor model
model_onnx_filename = os.path.join(MODELS_PATH, 'decision_tree_f1_estimator.onnx')
sess = rt.InferenceSession(model_onnx_filename)
input_name = sess.get_inputs()[0].name
label_name = sess.get_outputs()[0].name
estimated_f1_scores = sess.run([label_name], {input_name: feature_vectors})[0].squeeze(-1)
return estimated_f1_scores
def estimate_random_forest_f1_scores(feature_vectors):
# Load the random forest f1-score estimation regressor model
model_onnx_filename = os.path.join(MODELS_PATH, 'random_forest_f1_estimator.onnx')
sess = rt.InferenceSession(model_onnx_filename)
input_name = sess.get_inputs()[0].name
label_name = sess.get_outputs()[0].name
estimated_f1_scores = sess.run([label_name], {input_name: feature_vectors})[0].squeeze(-1)
return estimated_f1_scores
def estimate_logistic_regression_f1_scores(feature_vectors):
# Load the logistic regression f1-score estimation regressor model
model_onnx_filename = os.path.join(MODELS_PATH, 'logistic_regression_f1_estimator.onnx')
sess = rt.InferenceSession(model_onnx_filename)
input_name = sess.get_inputs()[0].name
label_name = sess.get_outputs()[0].name
estimated_f1_scores = sess.run([label_name], {input_name: feature_vectors})[0].squeeze(-1)
return estimated_f1_scores
def estimate_k_nearest_neighbors_f1_scores(feature_vectors):
# Load the k-nearest neighbors f1-score estimation regressor model
model_onnx_filename = os.path.join(MODELS_PATH, 'k_nearest_neighbor_f1_estimator.onnx')
sess = rt.InferenceSession(model_onnx_filename)
input_name = sess.get_inputs()[0].name
label_name = sess.get_outputs()[0].name
estimated_f1_scores = sess.run([label_name], {input_name: feature_vectors})[0].squeeze(-1)
return estimated_f1_scores
def estimate_xgboost_f1_scores(feature_vectors):
# Load the xgboost f1-score estimation regressor model
model_onnx_filename = os.path.join(MODELS_PATH, 'xgboost_f1_estimator.onnx')
sess = rt.InferenceSession(model_onnx_filename)
input_name = sess.get_inputs()[0].name
label_name = sess.get_outputs()[0].name
estimated_f1_scores = sess.run([label_name], {input_name: feature_vectors})[0].squeeze(-1)
return estimated_f1_scores
def estimate_support_vector_machine_f1_scores(feature_vectors):
# Load the support vector classifier f1-score estimation regressor model
model_onnx_filename = os.path.join(MODELS_PATH, 'support_vector_machine_f1_estimator.onnx')
sess = rt.InferenceSession(model_onnx_filename)
input_name = sess.get_inputs()[0].name
label_name = sess.get_outputs()[0].name
estimated_f1_scores = sess.run([label_name], {input_name: feature_vectors})[0].squeeze(-1)
return estimated_f1_scores
def estimate_f1_scores(feature_vectors):
""" Estimate the f1-scores corresponding to various classifier models from bag's metafeatures """
feature_vectors = np.array(feature_vectors, dtype=np.float32)
## Verify that features in feature vector generated is same as features expected by the f1 estimation models
models_features_filename = os.path.join(MODELS_PATH, 'features.pkl')
with open(models_features_filename, 'rb') as f_features:
f1_estimators_feature_input_cluster_indices_triple = pickle.load(f_features)
if not f1_estimators_feature_input_cluster_indices_triple == FEATURE_VECTOR_CLUSTER_INDICES_ORDER_TRIPLES:
logger.error("Mismatch between features in feature vector generated using cluster indices and features in feature vector expected by f1-score estimation models.")
raise ValueError("mismatch between features generated, expected by f1 estimation models")
decision_tree_estimated_f1_scores = estimate_decision_tree_f1_scores(feature_vectors)
random_forest_estimated_f1_scores = estimate_random_forest_f1_scores(feature_vectors)
logistic_regression_estimated_f1_scores = estimate_logistic_regression_f1_scores(feature_vectors)
k_nearest_neighbor_estimated_f1_scores = estimate_k_nearest_neighbors_f1_scores(feature_vectors)
xgboost_estimated_f1_scores = estimate_xgboost_f1_scores(feature_vectors)
support_vector_machine_estimated_f1_scores = estimate_support_vector_machine_f1_scores(feature_vectors)
clf_models_estimated_f1_scores = {
'decision tree': decision_tree_estimated_f1_scores,
'random forest': random_forest_estimated_f1_scores,
'logistic regression': logistic_regression_estimated_f1_scores,
'k-nearest neighbor': k_nearest_neighbor_estimated_f1_scores,
'xgboost': xgboost_estimated_f1_scores,
'support vector machine': support_vector_machine_estimated_f1_scores
}
return clf_models_estimated_f1_scores
```
#### File: src/automs/sampling.py
```python
import logging
# local application code imports
from .config import DATASET_FILETYPE_CONFIG
from .utils import _check_dataset_filename
from .eda import EDA
# setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
def read_dataset_file(dataset_filename, dataset_config):
dataset_filetype = _check_dataset_filename(dataset_filename)
# verify that the dataset file format inferred from dataset filename and the dataset config object type match
if type(dataset_config) != DATASET_FILETYPE_CONFIG[dataset_filetype]:
logger.error(f"Encountered dataset config object of type `{type(dataset_config)}` when expecting object of type `{DATASET_FILETYPE_CONFIG[dataset_filetype]}`")
raise TypeError("Encountered invalid dataset config object")
# Read the dataset into an :obj:`automs.eda.EDA` object
dataset = EDA()
if dataset_filetype == 'csv': dataset.read_data_csv(dataset_filename, **vars(dataset_config))
elif dataset_filetype == 'libsvm': dataset.read_data_libsvm(dataset_filename, **vars(dataset_config))
elif dataset_filetype == 'arff': dataset.read_data_arff(dataset_filename, **vars(dataset_config))
else:
logger.error(f"Specified dataset file's filetype or data format ('{dataset_filetype}') doesn't have an associated reader method in :class:`automs.eda.EDA`.")
raise ValueError("No reader method for specified dataset's filetype")
# Dummy code the nominal columns (or features)
dataset.dummy_coding()
return dataset
def sample_dataset(dataset, oneshot, sample_location):
# Compute the number of bags and sample size for each bag
if oneshot:
sample_size = dataset.n_samples
n_bags = 1
else:
if dataset.n_samples > 1000:
sample_size = 500
elif dataset.n_samples > 500:
# sample size is half of dataset size
sample_size = dataset.n_samples // 2
else:
logger.error(f"Dataset must have atlest 500 examples for sub-sampled bags setting. Dataset has only {dataset.n_samples} examples. Run the dataset in oneshot setting.")
raise ValueError("Dataset too small for sub-sampled bags setting")
n_bags = round(5 * dataset.n_samples / (0.63 * sample_size))
logger.info(f"Number of bags = {n_bags}, Number of samples per bag = {sample_size}")
stratified_sampling_results = dataset.random_stratified_sampling(sample_location, 'bags', sample_size, n_iterations=n_bags)
return stratified_sampling_results
```
|
{
"source": "JesicaDsouza/MiniProject-Cyber_Bullying_Detection",
"score": 3
}
|
#### File: MiniProject-Cyber_Bullying_Detection/website/main.py
```python
from flask import Flask,render_template,request
app = Flask(__name__)
import pickle
from sklearn.feature_extraction.text import TfidfVectorizer
import numpy as np
import pandas as pd
import os
from sklearn.model_selection import train_test_split
from sklearn import svm
#file = open('model.pkl','rb')
#clf = pickle.load(file)
#file.close()
@app.route('/',methods=["GET","POST"])
def hello_world():
if request.method == "POST":
MyDict = request.form
text = (MyDict['text'])
label = model.predict(tfidf.transform([text]))[0]
print(label)
return render_template('show.html',label=label)
return render_template('index.html')
if __name__ == "__main__" :
df = pd.read_csv('D:\cyberbullycode\public_data_labeled.csv')
df_x = df["Text"]
df_y = df["label"]
x_train,x_test,y_train,y_test = train_test_split(df_x,df_y,test_size = 0.2,random_state = 4)
tfidf = TfidfVectorizer()
x_traincv = tfidf.fit_transform(x_train)
x_testcv = tfidf.transform(x_test)
model = svm.SVC()
model.fit(x_traincv,y_train)
app.run(debug = True)
```
|
{
"source": "jesielcarlos/ApiEvolux",
"score": 2
}
|
#### File: ApiEvolux/server/instance.py
```python
from flask import Flask, Blueprint
from flask_restplus import Api
from marshmallow import ValidationError
from ma import ma
from db import db
class Server():
def __init__(self):
self.app = Flask(__name__)
self.bluePrint = Blueprint('api', __name__, url_prefix='/api')
self.api = Api(self.bluePrint, doc='/doc', title='Api evolux')
self.app.register_blueprint(self.bluePrint)
self.app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///data.db'
self.app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
self.app.config['PROPAGATE_EXCEPTIONS'] = True
self.number_ns = self.number_ns()
super().__init__()
def number_ns(self, ):
return self.api.namespace(name='Numbers', description='number related operations', path='/')
def run(self, ):
self.app.run( port=5000, debug=True, host='0.0.0.0')
server = Server()
```
|
{
"source": "jesielcarlos/gestao_rh",
"score": 2
}
|
#### File: apps/funcionarios/views.py
```python
from django.db.models.query import QuerySet
from django.views.generic import ListView, UpdateView
from django.views.generic.edit import CreateView, DeleteView
from django.urls import reverse_lazy
from .models import Funcionario
from django.contrib.auth.models import User
class FuncionariosList(ListView):
model = Funcionario
def get_queryset(self):
empresa_logada = self.request.user.funcionario.empresa
return Funcionario.objects.filter(empresa=empresa_logada)
class FuncionarioEdit(UpdateView):
model = Funcionario
fields = ['nome','departamentos']
class FuncionarioDelete(DeleteView):
model = Funcionario
success_url = reverse_lazy('list_funcionarios')
class FuncionarioNovo(CreateView):
model = Funcionario
fields = ['nome','departamentos']
def form_valid(self, form):
funcionario = form.save(commit=False)
funcionario.empresa = self.request.user.funcionario.empresa
username=funcionario.nome.split(' ')[0] + funcionario.nome.split(' ')[1]
funcionario.user = User.objects.create(username=username)
funcionario.save()
return super(FuncionarioNovo, self).form_valid(form)
```
|
{
"source": "jesielin/SnakeRace",
"score": 4
}
|
#### File: SnakeRace/Liao Xue Feng Py2 Edu/filter.py
```python
__author__ = 'shawn'
def is_odd(n):
return n % 2 == 1
print filter(is_odd, [1, 2, 3, 4, 5, 6, 7, 8, 9])
def not_empty(n):
return n and n.strip()
print filter(not_empty, ['A', '', 'B', '', 'D'])
# 请尝试用filter()删除1~100的素数。
def is_prime(n):
index = 2
while index < n / 2 and n % index != 0:
index += 1
return index < n / 2
print filter(is_prime, range(1, 100))
```
|
{
"source": "jesierski/early-sunrise",
"score": 3
}
|
#### File: Recommender/recommender/recommender.py
```python
from random import choice
import pickle
from for_model import get_scores
from recommender_cosim import get_scores2
import pandas as pd
from functions import profile
@profile
def get_movie_recommendation(name1, name2, name3, rating1, rating2, rating3):
print('***Do some ML magic***') #will be printed in the shell where web server runs
print('Name1 :', name1)
print('Rating1: ', rating1)
print('Name2 :', name2)
print('Rating2: ', rating2)
print('Name3 :', name3)
print('Rating3: ', rating3)
df = get_scores(name1, name2, name3, rating1, rating2, rating3)
return df #goes to the browser
def get_movie_recommendation2(name1, name2, name3, rating1, rating2, rating3):
print('***Do some ML magic***') #will be printed in the shell where web server runs
print('Name1 :', name1)
print('Rating1: ', rating1)
print('Name2 :', name2)
print('Rating2: ', rating2)
print('Name3 :', name3)
print('Rating3: ', rating3)
df2 = get_scores2(name1, name2, name3, rating1, rating2, rating3)
return df2 #goes to the browser
if __name__ == '__main__':
print(get_movie_recommendation("No Game No Life: Zero (2017)", "Father of the Bride Part II (1995)", "Grumpier Old Men (1995)", 2.4, 5, 4.0))
print(get_movie_recommendation2("No Game No Life: Zero (2017)", "Father of the Bride Part II (1995)", "Grumpier Old Men (1995)", 2.4, 5, 4.0))
#run when recommender.py is run in terminal
#but not when importing
```
|
{
"source": "jesierski/splendid-day",
"score": 3
}
|
#### File: jesierski/splendid-day/recognition.py
```python
import PIL
import PIL.Image
from flower_model import classify, get_training_flowers
"""[recognition of flower type with DL model a]
"""
def get_flower_classification(picture):
print('Here is some DL happening.')
answer = classify(picture)
return answer
if __name__ == '__main__':
path = 'static/images/sunflower_14.jpeg'
example_flower = PIL.Image.open(path)
print(get_flower_classification(example_flower))
```
#### File: jesierski/splendid-day/website.py
```python
import os
from flask import Flask, flash, request, redirect, url_for, render_template, render_template_string
from werkzeug.utils import secure_filename
import pathlib
import PIL
import PIL.Image
import recognition
UPLOAD_FOLDER = 'static/images/'
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}
app = Flask('Flower recognition')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
"""[check uploaded file has allowed extension]
"""
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
"""[Upload file]
Returns:
[template]: [returns to index for further processing of uploaded file]
"""
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
file = request.files['file_input']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return redirect(url_for('upload_file', filename=filename))
return render_template('index.html')
"""[ouputs result of flower recognition]
Returns:
[template]: [render_template('result.html', recognition=result, flower_file=path_uploaded_file, file_name=filename)]
"""
@app.route('/result', methods=['GET', 'POST'])
def give_result():
if request.method == 'POST':
uploaded_file = request.files['classify_input']
filename = secure_filename(uploaded_file.filename)
path_uploaded_file = os.path.join(app.config['UPLOAD_FOLDER'], filename)
img = PIL.Image.open(path_uploaded_file)
result = recognition.get_flower_classification(img)
return render_template('result.html', recognition=result, flower_file=path_uploaded_file, file_name=filename)
if __name__ == '__main__':
app.run(debug=True, port=5000)
```
|
{
"source": "jesimar/Benchmark-Languages",
"score": 3
}
|
#### File: Benchmark-Languages/Numeros-Primos/primo.py
```python
def ehPrimo(number):
nDiv = 0
for i in range(1, number+1):
if number % i == 0:
nDiv=nDiv+1
return nDiv
def main():
r = [10, 100, 1000, 10000]
for max in r:
contPrimo = 0
for number in range(2,max):
nDiv = ehPrimo(number)
if nDiv == 2:
contPrimo = contPrimo + 1
print("Numeros de Primos ate %d: %2.2f %%\n", max, (100.0*contPrimo)/max)
main()
```
|
{
"source": "jesimar/desafio-senai",
"score": 3
}
|
#### File: desafio-senai/test/test_basic.py
```python
from unittest import TestCase, main
import os
import numpy as np
import pandas as pd
class BasicTests(TestCase):
#instancia sem problema
def test_file_analysis_instance1(self):
path = os.path.join('data', 'instances', 'instance-1.csv')
df = pd.read_csv(path, header=None)
values_init_phase0 = df.iloc[0][:10].values
values_final_phase0 = df.iloc[0][799999:800003].values
values_init_phase1 = df.iloc[1][:10].values
values_final_phase1 = df.iloc[1][799999:800003].values
values_init_phase2 = df.iloc[2][:10].values
values_final_phase2 = df.iloc[2][799999:800003].values
serie_init_phase0 = np.array([18, 18, 17, 18, 18, 18, 19, 18, 18, 17])
serie_final_phase0 = np.array([17, 0, 0, 0])
serie_init_phase1 = np.array([1, 0, -1, 1, 0, 0, 1, 0, 0, 0])
serie_final_phase1 = np.array([0, 1, 1, 0])
serie_init_phase2 = np.array([-19, -19, -20, -19, -19, -20, -18, -19, -20, -19])
serie_final_phase2 = np.array([-19, 2, 2, 0])
self.assertEqual((values_init_phase0 == serie_init_phase0).all(), True)
self.assertEqual((values_final_phase0 == serie_final_phase0).all(), True)
self.assertEqual((values_init_phase1 == serie_init_phase1).all(), True)
self.assertEqual((values_final_phase1 == serie_final_phase1).all(), True)
self.assertEqual((values_init_phase2 == serie_init_phase2).all(), True)
self.assertEqual((values_final_phase2 == serie_final_phase2).all(), True)
#instancia com problema
def test_file_analysis_instance153(self):
path = os.path.join('data', 'instances', 'instance-153.csv')
df = pd.read_csv(path, header=None)
values_init_phase0 = df.iloc[0][:10].values
values_final_phase0 = df.iloc[0][799999:800003].values
values_init_phase1 = df.iloc[1][:10].values
values_final_phase1 = df.iloc[1][799999:800003].values
values_init_phase2 = df.iloc[2][:10].values
values_final_phase2 = df.iloc[2][799999:800003].values
serie_init_phase0 = np.array([-15, -13, -13, -13, -13, -14, -14, -15, -16, -16])
serie_final_phase0 = np.array([-11, 456, 0, 1])
serie_init_phase1 = np.array([18, 22, 21, 20, 22, 19, 20, 20, 16, 20])
serie_final_phase1 = np.array([21, 457, 1, 1])
serie_init_phase2 = np.array([-7, -3, -4, -6, -3, -6, -5, -5, -8, -5])
serie_final_phase2 = np.array([-3, 458, 2, 1])
self.assertEqual((values_init_phase0 == serie_init_phase0).all(), True)
self.assertEqual((values_final_phase0 == serie_final_phase0).all(), True)
self.assertEqual((values_init_phase1 == serie_init_phase1).all(), True)
self.assertEqual((values_final_phase1 == serie_final_phase1).all(), True)
self.assertEqual((values_init_phase2 == serie_init_phase2).all(), True)
self.assertEqual((values_final_phase2 == serie_final_phase2).all(), True)
if __name__ == '__main__':
main()
```
|
{
"source": "jesiqueira/IQ_V2",
"score": 3
}
|
#### File: IQ_V2/indicadores/indicadores.py
```python
class Indicadores:
def __init__(self, candles):
self.candle = candles
def sma(self, periodo) -> float:
"""Calcular SMA de n período, deve receber um array de candle e um pediodo"""
# df = pd.DataFrame(self.candle).sort_index(ascending=False)
close = []
for i in range(1, periodo + 1):
close.append(self.candle[-i]['close'])
return sum(close) / periodo
def ema(self, periodo) -> float:
"""Calcular EMA de n período, deve receber um array de candle e um pediodo"""
close = []
for i in range(1, periodo + 1):
close.append(self.candle[-i]['close'])
sma = sum(close) / periodo
print(close)
return (2 / (periodo + 1)) * (self.candle[-1]['close'] - sma) + sma
```
#### File: iqoptionapi/http/send_sms.py
```python
from iqoptionapi.http.resource import Resource
import json
class SMS_Sender(Resource):
"""Class for IQ option sms resource."""
# pylint: disable=too-few-public-methods
url = ""
def _post(self, data=None, headers=None):
"""Send get request for IQ Option API sms http resource.
:returns: The instance of :class:`requests.Response`.
"""
return self.api.send_http_request_v2(method="POST", url="https://auth.iqoption.com/api/v2/verify/2fa",data=json.dumps(data), headers=headers)
def __call__(self, token_reason):
"""Method to get IQ Option API sms http request.
:param str method: The method of a IQ Option server 2FA.
:param str token_reason: The token of a IQ Option server 2FA.
:returns: The instance of :class:`requests.Response`.
"""
data = {"method": "sms",
"token": token_reason}
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Referer': 'https://iqoption.com/en/login',
'Sec-Fetch-Mode': 'cors',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36'
}
return self._post(data=data, headers=headers)
```
#### File: ws/chanels/base.py
```python
import time
class Base(object):
"""Class for base IQ Option websocket chanel."""
# pylint: disable=too-few-public-methods
def __init__(self, api):
"""
:param api: The instance of :class:`IQOptionAPI
<iqoptionapi.api.IQOptionAPI>`.
"""
self.api = api
def send_websocket_request(self, name, msg,request_id=""):
"""Send request to IQ Option server websocket.
:param str name: The websocket chanel name.
:param dict msg: The websocket chanel msg.
:returns: The instance of :class:`requests.Response`.
"""
if request_id == '':
request_id = int(str(time.time()).split('.')[1])
return self.api.send_websocket_request(name, msg,request_id)
```
#### File: ws/chanels/unsubscribe.py
```python
from iqoptionapi.ws.chanels.base import Base
import datetime
import iqoptionapi.constants as OP_code
class Unsubscribe(Base):
"""Class for IQ option candles websocket chanel."""
# pylint: disable=too-few-public-methods
name = "unsubscribeMessage"
def __call__(self, active_id, size=1):
data = {"name": "candle-generated",
"params": {
"routingFilters": {
"active_id": str(active_id),
"size": int(size)
}
}
}
self.send_websocket_request(self.name, data)
class Unsubscribe_candles(Base):
"""Class for IQ option candles websocket chanel."""
# pylint: disable=too-few-public-methods
name = "unsubscribeMessage"
def __call__(self, active_id, size=1):
data = {"name": "candles-generated",
"params": {
"routingFilters": {
"active_id": str(active_id)
}
}
}
self.send_websocket_request(self.name, data)
class Unsubscribe_Instrument_Quites_Generated(Base):
name = "unsubscribeMessage"
def __call__(self, ACTIVE, expiration_period):
data = {
"name": "instrument-quotes-generated",
"params": {
"routingFilters": {
"active": int(OP_code.ACTIVES[ACTIVE]),
"expiration_period": int(expiration_period*60),
"kind": "digital-option",
},
},
"version": "1.0"
}
self.send_websocket_request(self.name, data)
def get_digital_expiration_time(self, duration):
exp = int(self.api.timesync.server_timestamp)
value = datetime.datetime.fromtimestamp(exp)
minute = int(value.strftime('%M'))
# second=int(value.strftime('%S'))
ans = exp-exp % 60 # delete second
ans = ans+(duration-minute % duration)*60
if exp > ans-10:
ans = ans+(duration)*60
return ans
class Unsubscribe_top_assets_updated(Base):
name = "unsubscribeMessage"
def __call__(self, instrument_type):
data = {"name": "top-assets-updated",
"params": {
"routingFilters": {
"instrument_type": str(instrument_type)
}
},
"version": "1.2"
}
self.send_websocket_request(self.name, data)
class Unsubscribe_commission_changed(Base):
name = "unsubscribeMessage"
def __call__(self, instrument_type):
data = {"name": "commission-changed",
"params": {
"routingFilters": {
"instrument_type": str(instrument_type)
}
},
"version": "1.0"
}
self.send_websocket_request(self.name, data)
class Unscribe_live_deal(Base):
name = "unsubscribeMessage"
def __call__(self, name, active_id, _type):
if name == "live-deal-binary-option-placed":
_type_name = "option_type"
_active_id = "active_id"
elif name == "live-deal-digital-option":
_type_name = "expiration_type"
_active_id = "instrument_active_id"
elif name == "live-deal":
_type_name = "instrument_type"
_active_id = "instrument_active_id"
data = {"name": str(name),
"params": {
"routingFilters": {
_active_id: int(active_id),
_type_name: str(_type)
}
},
"version": "2.0"
}
self.send_websocket_request(self.name, data)
class UnsubscribeDigitalPriceSplitter(Base):
name = "unsubscribeMessage"
def __call__(self, asset_id):
data = {
"name": "price-splitter.client-price-generated",
"version": "1.0",
"params": {
"routingFilters": {
"instrument_type": "digital-option",
"asset_id": int(asset_id)
}
}
}
self.send_websocket_request(self.name, msg=data)
```
|
{
"source": "jesiqueira/selenium",
"score": 4
}
|
#### File: selenium/Aula_04/aula_04_2_atributos.py
```python
from selenium.webdriver import Chrome
def find_by_text(browser, tag, text):
"""
- browser = instacia do browser
- texto = conteudo que deve estar na tag
- tag = tag onde o texto será procurado
"""
elementos = browser.find_elements_by_tag_name(tag) #lista
for elemento in elementos:
if elemento.text == text:
return elemento
def find_by_href(browser, link):
"""
- Encontrar o Elemento 'a' com o link `link`
"""
elementos = browser.find_elements_by_tag_name('a')
for elemento in elementos:
if link in elemento.get_attribute('href'):
return elemento
browser = Chrome()
browser.get('https://selenium.dunossauro.live/aula_04_a.html')
elemento_ddg = find_by_text(browser, 'li', 'Item 1')
print(elemento_ddg.text)
elemento_tag = find_by_href(browser, 'google')
print(elemento_tag.get_attribute('href'))
print(elemento_tag.text)
browser.quit()
```
|
{
"source": "jesiqueira/work",
"score": 2
}
|
#### File: work/app/__init__.py
```python
from flask import Flask
def create_app():
app = Flask(__name__)
#Rotas
from app.controllers.main.rotas import main
#Registrar Blueprint
app.register_blueprint(main)
return app
```
|
{
"source": "jesisca-tandi/nn-manual",
"score": 3
}
|
#### File: nn-manual/nn/functional.py
```python
import numpy as np
from itertools import product
def clip_gradients(in_grads, clip=1):
return np.clip(in_grads, -clip, clip)
def sigmoid(X):
return 1.0 / (1 + np.exp(-X))
def img2col(data, h_indices, w_indices, k_h, k_w):
batch = data.shape[0]
indices = list(product(h_indices, w_indices))
out = np.stack(map(
lambda x: data[:, :, x[0]:x[0]+k_h, x[1]:x[1]+k_w].reshape(batch, -1), indices), axis=-1)
return out
```
#### File: nn-manual/nn/model.py
```python
import numpy as np
import copy
import pickle
import sys
import time
from nn.functional import clip_gradients
class Model():
def __init__(self):
self.layers = []
self.inputs = None
self.optimizer = None
self.regularization = None
def add(self, layer):
self.layers.append(layer)
def compile(self, optimizer, loss, regularization=None):
self.optimizer = optimizer
self.layers.append(loss)
self.regularization = regularization
def forward(self, inputs, targets):
self.inputs = []
layer_inputs = inputs
for l, layer in enumerate(self.layers):
self.inputs.append(layer_inputs)
if l == len(self.layers)-1:
layer_inputs, probs = layer.forward(layer_inputs, targets)
else:
layer_inputs = layer.forward(layer_inputs)
outputs = layer_inputs
return outputs, probs
def backward(self, targets):
for l, layer in enumerate(self.layers[::-1]):
if l == 0:
grads = layer.backward(self.inputs[-1-l], targets)
else:
grads = layer.backward(grads, self.inputs[-1-l])
def get_params(self):
params = {}
grads = {}
for l, layer in enumerate(self.layers):
if layer.trainable:
layer_params, layer_grads = layer.get_params('layer-%dth' % l)
params.update(layer_params)
grads.update(layer_grads)
if self.regularization:
reg_grads = self.regularization.backward(params)
for k, v in grads.items():
grads[k] += reg_grads[k]
return params, grads
def update(self, optimizer, iteration):
params, grads = self.get_params()
# clip gradients
# for k, v in grads.items():
# grads[k] = clip_gradients(v)
# print(k, np.mean(np.abs(v)))
new_params = optimizer.update(params, grads, iteration)
# for l, layer in enumerate(self.layers):
# if layer.trainable:
# w_key = 'layer-%dth:' % l + layer.name + '/weights'
# b_key = 'layer-%dth:' % l + layer.name + '/bias'
# layer_params = {
# w_key: new_params[w_key],
# b_key: new_params[b_key]
# }
# layer.update(layer_params)
for l, layer in enumerate(self.layers):
if layer.trainable:
layer_params, _ = layer.get_params('layer-{}th'.format(l))
for k in layer_params.keys():
layer_params[k] = new_params[k]
assert ~np.any(
np.isnan(layer_params[k])), '{} contains NaN'.format(k)
layer.update(layer_params)
def train(self, dataset, train_batch=32, val_batch=1000, test_batch=1000, epochs=5, val_intervals=100, test_intervals=500, print_intervals=100):
train_loader = dataset.train_loader(train_batch)
num_train = dataset.num_train
train_results = []
test_results = []
val_results = []
for epoch in range(epochs):
print('Epoch %d: ' % epoch, end='\n')
start = time.time()
for iteration in range(num_train//train_batch):
total_iteration = epoch*(num_train//train_batch)+iteration
# output test loss and accuracy
if test_intervals > 0 and iteration > 0 and iteration % test_intervals == 0:
test_loss, test_acc = self.test(dataset, test_batch)
test_results.append([total_iteration, test_loss, test_acc])
if val_intervals > 0 and iteration > 0 and iteration % val_intervals == 0:
val_loss, val_acc = self.val(dataset, val_batch)
val_results.append([total_iteration, val_loss, val_acc])
x, y = next(train_loader)
loss, probs = self.forward(x, y)
acc = np.sum(np.argmax(probs, axis=-1) == y) / train_batch
train_results.append([total_iteration, loss, acc])
if self.regularization:
params, _ = self.get_params()
reg_loss = self.regularization.forward(params)
self.backward(y)
self.update(self.optimizer, total_iteration)
if iteration > 0 and iteration % print_intervals == 0:
speed = (print_intervals*train_batch) / \
(time.time() - start)
print('Train iter %d/%d:\t' %
(iteration, num_train//train_batch), end='')
print('acc %.2f, loss %.2f' % (acc, loss), end='')
if self.regularization:
print(', reg loss %.2f' % reg_loss, end='')
print(', speed %.2f samples/sec' % (speed))
start = time.time()
return np.array(train_results), np.array(val_results), np.array(test_results)
def test(self, dataset, test_batch):
# set the mode into testing mode
for layer in self.layers:
layer.set_mode(training=False)
test_loader = dataset.test_loader(test_batch)
num_test = dataset.num_test
num_accurate = 0
sum_loss = 0
try:
while True:
x, y = next(test_loader)
loss, probs = self.forward(x, y)
num_accurate += np.sum(np.argmax(probs, axis=-1) == y)
sum_loss += loss
except StopIteration:
avg_loss = sum_loss*test_batch/num_test
accuracy = num_accurate/num_test
print('Test acc %.2f, loss %.2f' % (accuracy, avg_loss))
# reset the mode into training for continous training
for layer in self.layers:
layer.set_mode(training=True)
return avg_loss, accuracy
def val(self, dataset, val_batch):
# set the mode into testing mode
for layer in self.layers:
layer.set_mode(training=False)
val_loader = dataset.val_loader(val_batch)
num_val = dataset.num_val
num_accurate = 0
sum_loss = 0
try:
while True:
x, y = next(val_loader)
loss, probs = self.forward(x, y)
num_accurate += np.sum(np.argmax(probs, axis=-1) == y)
sum_loss += loss
except StopIteration:
avg_loss = sum_loss*val_batch/num_val
accuracy = num_accurate/num_val
print('Val accuracy %.2f, loss %.2f' %
(accuracy, avg_loss))
# reset the mode into training for continous training
for layer in self.layers:
layer.set_mode(training=True)
return avg_loss, accuracy
```
|
{
"source": "jesisca-tandi/video-actions-classification",
"score": 2
}
|
#### File: video-actions-classification/lib/functions.py
```python
import keras
from keras import regularizers
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Flatten, Conv1D, Conv2D, MaxPooling2D, Embedding, LSTM, TimeDistributed, Masking, Lambda, GRU, Bidirectional
from keras.preprocessing import image, sequence
from keras.applications.vgg16 import VGG16
from keras.optimizers import Adam
from keras.utils import to_categorical, np_utils, Sequence
from keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from datetime import datetime
import numpy as np
import pandas as pd
import os, torch, pickle
import scipy.stats as stats
from .read_datasetBreakfast import load_data, read_mapping_dict
def getData(split, COMP_PATH=''):
'''Load train / test data. Input: (str) 'test', 'training' '''
train_split = os.path.join(COMP_PATH, 'splits/train.split1.bundle') #Train Split
test_split = os.path.join(COMP_PATH, 'splits/test.split1.bundle') #Test Split
GT_folder = os.path.join(COMP_PATH, 'groundTruth/') #Ground Truth Labels for each training video
DATA_folder = os.path.join(COMP_PATH, 'data/') #Frame I3D features for all videos
mapping_loc = os.path.join(COMP_PATH, 'splits/mapping_bf.txt')
actions_dict = read_mapping_dict(mapping_loc)
if split == 'training':
data_feat, data_labels = load_data(train_split, actions_dict, GT_folder, DATA_folder, datatype = split) #Get features and labels
return data_feat, data_labels
if split == 'test':
data_feat = load_data(test_split, actions_dict, GT_folder, DATA_folder, datatype = split) #Get features only
return data_feat
def processTrainData(**kwargs):
'''Load and then process training data
Input: (str) 'test' or 'training'
Process:
1. one-hot encoding of labels
2. sequence padding of features and labels
3. train-validation split (80:20)
'''
X, y = getData('training', **kwargs)
# Check the maximum no of frames in the train dataset
maxFrames = max([i.shape[0] for i in X])
# Transform labels into categorical labels (one-hot encoding)
y_cat = [to_categorical(i, 48) for i in y]
# Padding of different sequence length
# As the dataset is of different number of frames for each of the videos,
# we are doing a post-padding with -1 to make sure all the videos are of equal number of frames (pad to the max data length)
# (i.e. padded at the end of the videos)
y_padded = sequence.pad_sequences(y_cat, maxlen=maxFrames,padding='post', truncating='post', value=-1, dtype='int')
X_padded = sequence.pad_sequences(X, maxlen=maxFrames,padding='post', truncating='post', value=-1, dtype='float16')
# To facilitate the comparison of the validation data using the training_segment.txt file provided,
# We split the training segment together with the train-validation split.
training_segment_rev = processSegment('training_segment.txt')
# Train-validation split (80:20)
# As we do not have ground truth for test data, we are further splitting the train dataset into train and validation set.
X_train, X_val, y_train, y_val, segment_train, segment_val = train_test_split(X_padded, y_padded, training_segment_rev, random_state=1, test_size=0.2)
X_train = np.array(X_train)
y_train = np.array(y_train)
X_val = np.array(X_val)
y_val = np.array(y_val)
print('Train set: X shape {}, y shape {}'.format(X_train.shape, y_train.shape))
print('Validation set: X shape {}, y shape {}'.format(X_val.shape, y_val.shape))
return X_train, X_val, y_train, y_val, segment_train, segment_val
def processTestData(**kwargs):
'''Load and process test data (with padding of sequences)'''
# Load data
X = getData('test', **kwargs)
# Check the maximum no of frames in the train dataset
maxFrames = max([i.shape[0] for i in X])
# Post-padding of sequences of unequal length by values of -1
X_padded = sequence.pad_sequences(X, maxlen=maxFrames,padding='post', truncating='post', value=-1, dtype='float16')
return X_padded
def processSegment(file):
'''Get segments of videos for the purpose of validation'''
file_open = open(file, 'r')
segment = file_open.read().split('\n')[:-1]
segment_rev = []
for i in range(len(segment)):
segment_rev.append([int(j) for j in segment[i].split()])
return segment_rev
def getMajorityVotes(y, segments):
'''Function to get the majority vote of labels within each segment'''
votes = []
for i,j in zip(y, segments):
for m,n in zip(j[0:-1], j[1:]):
votes.append(stats.mode(i[m:n])[0][0])
return votes
def createDir(wdir):
'''Function to create directory'''
if not os.path.exists(wdir):
os.makedirs(wdir)
def train(model, modelName, savePath, batchSize=50, epochs=50, COMP_PATH=''):
'''
Run training
1. Load train-validation data
2. Train model (and save models after each epochs)
3. Evaluate on validation set
'''
createDir(savePath)
createDir(os.path.join(savePath, 'checkPoints'))
# Load and process data
X_train, X_val, y_train, y_val, segment_train, segment_val = processTrainData(COMP_PATH=COMP_PATH)
# Show model summary
model.summary()
# Start training
checkPointsModel = os.path.join(savePath, 'checkPoints', 'saved-model-{epoch:02d}.h5')
checkpoint = ModelCheckpoint(checkPointsModel, monitor='val_loss', verbose=1, save_best_only=False, save_weights_only=False, mode='auto')
model.fit(X_train, y_train, validation_data = (X_val, y_val), batch_size=batchSize, epochs=epochs, callbacks=[checkpoint])
# Save model
model.save(os.path.join(savePath, 'model_{}_{}.h5'.format(modelName, str(datetime.now()).replace(' ', '_').replace(':', '')[:17])))
# Check validation scores
validate(model, X_val, y_val, segment_val)
return model
def validate(trainedModel, X_val, y_val, segment_val):
'''
Evaluate validation
1. Loss function of the model
2. Calculate accuracy of video segment classification using majority vote
'''
# Get validation performance
val_loss, val_acc = trainedModel.evaluate(X_val, y_val)
print('Test Loss: {}, Accuracy: {}'.format(val_loss, val_acc))
# Get classification accuracy of classification of each video segment (majority voting) to simulate final testing
yhat_val = trainedModel.predict_classes(X_val)
sliced_y_val = getMajorityVotes(np.argmax(y_val, axis=-1), segment_val)
sliced_yhat_val = getMajorityVotes(yhat_val, segment_val)
acc = accuracy_score(sliced_y_val, sliced_yhat_val)
print("Accuracy based on sliced data: " + str(acc))
def test(trainedModel, savePath, COMP_PATH=''):
'''Evaluate test data predictions'''
createDir(savePath)
X = processTestData(COMP_PATH=COMP_PATH)
test_segment_rev = processSegment('test_segment.txt')
# Predict
yhat = trainedModel.predict_classes(X)
# Get majority votes
yhat_maj = getMajorityVotes(yhat, test_segment_rev)
# Save out predictions
new_test = pd.DataFrame()
new_test['Id'] = list(range(len(yhat_maj)))
new_test['Category'] = yhat_maj
new_test.to_csv(os.path.join(savePath, 'Predicted_Category_{}.csv'.format(modelName)))
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.