ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a321b3f1739667e8e7b92363ad4a4f0df645fb6 | """Sensor platform for Brottsplatskartan information."""
from __future__ import annotations
from collections import defaultdict
from datetime import timedelta
import logging
import uuid
import brottsplatskartan
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
_LOGGER = logging.getLogger(__name__)
CONF_AREA = "area"
DEFAULT_NAME = "Brottsplatskartan"
SCAN_INTERVAL = timedelta(minutes=30)
AREAS = [
"Blekinge län",
"Dalarnas län",
"Gotlands län",
"Gävleborgs län",
"Hallands län",
"Jämtlands län",
"Jönköpings län",
"Kalmar län",
"Kronobergs län",
"Norrbottens län",
"Skåne län",
"Stockholms län",
"Södermanlands län",
"Uppsala län",
"Värmlands län",
"Västerbottens län",
"Västernorrlands län",
"Västmanlands län",
"Västra Götalands län",
"Örebro län",
"Östergötlands län",
]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Inclusive(CONF_LATITUDE, "coordinates"): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, "coordinates"): cv.longitude,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_AREA, default=[]): vol.All(cv.ensure_list, [vol.In(AREAS)]),
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Brottsplatskartan platform."""
area = config.get(CONF_AREA)
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
name = config[CONF_NAME]
# Every Home Assistant instance should have their own unique
# app parameter: https://brottsplatskartan.se/sida/api
app = f"ha-{uuid.getnode()}"
bpk = brottsplatskartan.BrottsplatsKartan(
app=app, area=area, latitude=latitude, longitude=longitude
)
add_entities([BrottsplatskartanSensor(bpk, name)], True)
class BrottsplatskartanSensor(SensorEntity):
"""Representation of a Brottsplatskartan Sensor."""
def __init__(self, bpk, name):
"""Initialize the Brottsplatskartan sensor."""
self._brottsplatskartan = bpk
self._attr_name = name
def update(self):
"""Update device state."""
incident_counts = defaultdict(int)
incidents = self._brottsplatskartan.get_incidents()
if incidents is False:
_LOGGER.debug("Problems fetching incidents")
return
for incident in incidents:
incident_type = incident.get("title_type")
incident_counts[incident_type] += 1
self._attr_extra_state_attributes = {
ATTR_ATTRIBUTION: brottsplatskartan.ATTRIBUTION
}
self._attr_extra_state_attributes.update(incident_counts)
self._attr_native_value = len(incidents)
|
py | 1a321d845bd6c84bbce98df4fe46722d99dc785d | import torch
def images_to_levels(target, num_levels):
"""Convert targets by image to targets by feature level.
[target_img0, target_img1] -> [target_level0, target_level1, ...]
"""
target = torch.stack(target, 0)
level_targets = []
start = 0
for n in num_levels:
end = start + n
# level_targets.append(target[:, start:end].squeeze(0))
level_targets.append(target[:, start:end])
start = end
return level_targets
def anchor_inside_flags(flat_anchors,
valid_flags,
img_shape,
allowed_border=0):
img_h, img_w = img_shape[:2]
if allowed_border >= 0:
inside_flags = valid_flags & \
(flat_anchors[:, 0] >= -allowed_border) & \
(flat_anchors[:, 1] >= -allowed_border) & \
(flat_anchors[:, 2] < img_w + allowed_border) & \
(flat_anchors[:, 3] < img_h + allowed_border)
else:
inside_flags = valid_flags
return inside_flags
def calc_region(bbox, ratio, featmap_size=None):
"""Calculate a proportional bbox region.
The bbox center are fixed and the new h' and w' is h * ratio and w * ratio.
Args:
bbox (Tensor): Bboxes to calculate regions, shape (n, 4)
ratio (float): Ratio of the output region.
featmap_size (tuple): Feature map size used for clipping the boundary.
Returns:
tuple: x1, y1, x2, y2
"""
x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long()
y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long()
x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long()
y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long()
if featmap_size is not None:
x1 = x1.clamp(min=0, max=featmap_size[1])
y1 = y1.clamp(min=0, max=featmap_size[0])
x2 = x2.clamp(min=0, max=featmap_size[1])
y2 = y2.clamp(min=0, max=featmap_size[0])
return (x1, y1, x2, y2)
|
py | 1a321e97d1b6bf99255031ee179c6083a8e5fd34 | import argparse
import json
import os
import sys
from datetime import datetime
from pathlib import Path
from shlex import quote
import fuckit
import mutagen
import pandas as pd
from joblib import Parallel, delayed
from rich import inspect, print
from tinytag import TinyTag
from db import fetchall_dict, sqlite_con
from subtitle import get_subtitle
from utils import chunks, cmd, get_video_files, log
def parse_mutagen_tags(m, tiny_tags):
def c(l):
if isinstance(l, str):
l = [l]
if l is None or len(l) == 0:
return None
no_comma = sum([s.split(",") for s in l], [])
no_semicol = sum([s.split(";") for s in no_comma], [])
no_unknown = [x for x in no_semicol if x.lower() not in ["unknown", ""]]
return ";".join(no_unknown)
def ss(idx, l):
if l is None:
return None
try:
return l[idx]
except IndexError:
return None
return {
"albumgenre": c(m.tags.get("albumgenre")),
"albumgrouping": c(m.tags.get("albumgrouping")),
"mood": c(
list(
set(
(m.tags.get("albummood") or [])
+ (m.tags.get("MusicMatch_Situation") or [])
+ (m.tags.get("Songs-DB_Occasion") or [])
)
)
),
"genre": c(list(set((m.tags.get("genre") or []) + list(filter(None, [tiny_tags["genre"]]))))),
"year": ss(
0,
ss(
0,
list(
filter(
None,
[
m.tags.get("originalyear"),
m.tags.get("TDOR"),
m.tags.get("TORY"),
m.tags.get("date"),
m.tags.get("TDRC"),
m.tags.get("TDRL"),
],
)
),
),
),
"bpm": ss(
0,
ss(
0,
list(
filter(
None,
[m.tags.get("fBPM"), m.tags.get("bpm_accuracy")],
)
),
),
),
"key": ss(
0,
ss(
0,
list(
filter(
None,
[
m.tags.get("TIT1"),
m.tags.get("key_accuracy"),
m.tags.get("TKEY"),
],
)
),
),
),
"gain": ss(0, m.tags.get("replaygain_track_gain")),
"time": c(ss(0, m.tags.get("time_signature"))),
"decade": ss(0, m.tags.get("Songs-DB_Custom1")),
"categories": ss(0, m.tags.get("Songs-DB_Custom2")),
"city": ss(0, m.tags.get("Songs-DB_Custom3")),
"country": c(
ss(
0,
list(
filter(
None,
[
m.tags.get("Songs-DB_Custom4"),
m.tags.get("MusicBrainz Album Release Country"),
],
)
),
)
),
}
def extract_metadata(args, f):
try:
ffprobe = json.loads(
cmd(
f"ffprobe -loglevel quiet -print_format json=compact=1 -show_entries format {quote(f)}", quiet=True
).stdout
)
except:
try:
cmd(f"trash-put {quote(f)}")
print(f"Failed reading {f}", file=sys.stderr)
except:
pass
return
if not "format" in ffprobe:
print(f"Failed reading format {f}", file=sys.stderr)
print(ffprobe)
return
stat = os.stat(f)
blocks_allocated = stat.st_blocks * 512
if "tags" in ffprobe["format"]:
del ffprobe["format"]["tags"]
if "size" in ffprobe["format"]:
ffprobe["format"]["size"] = int(ffprobe["format"]["size"])
if blocks_allocated == 0:
sparseness = 0
else:
sparseness = ffprobe["format"]["size"] / blocks_allocated
media = dict(
**ffprobe["format"],
# streams=ffprobe["streams"],
sparseness=sparseness,
time_created=datetime.fromtimestamp(stat.st_ctime),
time_modified=datetime.fromtimestamp(stat.st_mtime),
)
if args.audio:
media = {**media, "listen_count": 0}
try:
tiny_tags = TinyTag.get(f).as_dict()
mutagen_tags = mutagen.File(f)
assert mutagen_tags.tags
if "extra" in tiny_tags:
del tiny_tags["extra"]
except:
return media
mutagen_tags_p = parse_mutagen_tags(mutagen_tags, tiny_tags)
audio = {
**media,
**tiny_tags,
**mutagen_tags_p,
}
# print(audio)
@fuckit
def get_rid_of_known_tags():
del mutagen_tags.tags["encoder"]
del mutagen_tags.tags["TMED"]
del mutagen_tags.tags["TSO2"]
del mutagen_tags.tags["artist-sort"]
del mutagen_tags.tags["ASIN"]
del mutagen_tags.tags["Acoustid Id"]
del mutagen_tags.tags["Artists"]
del mutagen_tags.tags["BARCODE"]
del mutagen_tags.tags["CATALOGNUMBER"]
del mutagen_tags.tags["MusicBrainz Album Artist Id"]
del mutagen_tags.tags["MusicBrainz Album Id"]
del mutagen_tags.tags["MusicBrainz Album Release Country"]
del mutagen_tags.tags["MusicBrainz Album Status"]
del mutagen_tags.tags["MusicBrainz Album Type"]
del mutagen_tags.tags["MusicBrainz Artist Id"]
del mutagen_tags.tags["MusicBrainz Release Group Id"]
del mutagen_tags.tags["MusicBrainz Release Track Id"]
del mutagen_tags.tags["SCRIPT"]
del mutagen_tags.tags["originalyear"]
del mutagen_tags.tags["artist"]
del mutagen_tags.tags["album"]
del mutagen_tags.tags["ALBUMARTIST"]
del mutagen_tags.tags["title"]
del mutagen_tags.tags["TORY"]
del mutagen_tags.tags["TDOR"]
del mutagen_tags.tags["publisher"]
del mutagen_tags.tags["TRACKNUMBER"]
del mutagen_tags.tags["DISCNUMBER"]
del mutagen_tags.tags["replaygain_track_peak"]
del mutagen_tags.tags["replaygain_track_gain"]
del mutagen_tags.tags["date"]
return mutagen_tags.tags
new_tags = get_rid_of_known_tags()
if new_tags is not None:
print(new_tags)
return audio
return media
def main():
parser = argparse.ArgumentParser()
parser.add_argument("db")
parser.add_argument("paths", nargs="*")
parser.add_argument("-a", "--audio", action="store_true")
parser.add_argument("-s", "--subtitle", action="store_true")
parser.add_argument("-yt", "--youtube-only", action="store_true")
parser.add_argument("-sl", "--subliminal-only", action="store_true")
parser.add_argument("-f", "--force-rescan", action="store_true")
parser.add_argument("-v", "--verbose", action="count", default=0)
args = parser.parse_args()
if args.force_rescan:
Path(args.db).unlink(missing_ok=True)
if Path(args.db).exists():
cmd(f"sqlite-utils optimize {args.db}")
columns = cmd(f"sqlite-utils tables {args.db} --columns | jq -r '.[0].columns[]' ", quiet=True).stdout.splitlines()
for column in columns:
cmd(f"sqlite-utils create-index --if-not-exists --analyze {args.db} media {column}")
con = sqlite_con(args.db)
for path in args.paths:
path = Path(path).resolve()
print(f"{path} : Scanning...")
video_files = get_video_files(path)
new_files = set(video_files)
try:
existing = set(
map(
lambda x: x["filename"],
fetchall_dict(con, f"select filename from media where filename like '{path}%'"),
)
)
except:
video_files = list(new_files)
else:
video_files = list(new_files - existing)
deleted_files = list(existing - new_files)
if len(deleted_files) > 0:
print(f"Removing {len(deleted_files)} orphaned metadata")
df_chunked = chunks(deleted_files, 32765) # sqlite_param_limit
for l in df_chunked:
con.execute(
"delete from media where filename in (" + ",".join(["?"] * len(l)) + ")",
(*l,),
)
con.commit()
if len(video_files) > 0:
print(f"Adding {len(video_files)} new media")
log.info(video_files)
metadata = (
Parallel(n_jobs=-1 if args.verbose == 0 else 1, backend="threading")(
delayed(extract_metadata)(args, file) for file in video_files
)
or []
)
DF = pd.DataFrame(list(filter(None, metadata)))
if args.audio:
if DF.get(["year"]) is not None:
DF.year = DF.year.astype(str)
DF.apply(pd.to_numeric, errors="ignore").convert_dtypes().to_sql( # type: ignore
"media",
con=con,
if_exists="append",
index=False,
chunksize=70,
method="multi",
)
if args.subtitle:
Parallel(n_jobs=5)(delayed(get_subtitle)(args, file) for file in video_files)
if __name__ == "__main__":
main()
|
py | 1a3221ee4951a3fdbbe7f6f9b5037527968b24f2 | import sys
def days_in_month(mon):
if mon == "January" or mon == "March" or mon == "May" or mon == "July" or mon == "August" or mon == "October" or mon == "December":
return 31
elif mon == "April" or mon == "June" or mon == "September" or mon == "November":
return 30
elif mon == "February":
return 28
else:
return None
def test(did_pass):
""" Print the result of a test. """
linenum = sys._getframe(1).f_lineno # Get the caller's line number.
if did_pass:
msg = "Test at line {0} ok.".format(linenum)
else:
msg = ("Test at line {0} FAILED.".format(linenum))
print(msg)
test(days_in_month("February") == 28)
test(days_in_month("June") == 30)
test(days_in_month("fire") == None) |
py | 1a3221f8007eacc464c765f01c5247ee146e1024 | import math
import csv
import numpy as np
from math import sin, cos
from numpy.random.mtrand import seed
import kalman
import matplotlib.pyplot as plt
import particle
magic_coeff = 0.047
wheel_radius = 2.7
wheel_base_half = 7.5
sonar_zero_distance = 13.8
init_x = 0.0
init_y = 0.0
init_angle = 0.0
x_cam_noise = (0.0, 49.0)
y_cam_noise = (0.0, 49.0)
gyro_noise = (0.0, math.radians(16.0))
sonar_normal_noise = (0.0, 4.0)
sonar_invalid_noise = (0.0, 1e+6)
def print_plot(plots=None, coords=None, bounded=True, title=None):
if plots is not None:
(t_plot, x_plot, y_plot) = plots
else:
t_plot = []
x_plot = []
y_plot = []
for tuple in coords:
t_plot.append(tuple[0])
x_plot.append(tuple[2])
y_plot.append(tuple[1])
def print_p(xlabel, t_plot, y_axis, boundary=None):
plt.ylabel(xlabel)
plt.xlabel("y(t)")
plt.plot(t_plot, y_axis)
if title is not None:
plt.title(title)
if boundary is not None:
plt.axis(boundary)
plt.show()
# print_p("x(t)", t_plot, x_plot, [1509976324.240, 1509976340.20860, 0, 140] if bounded else None)
# print_p("y(t)", t_plot, y_plot, [1509976324.240, 1509976340.20860, -10, 40] if bounded else None)
print_p("x(t)", y_plot, x_plot, [-10, 40, 0, 140] if bounded else None)
def follow_by_wheels():
coords = []
with open('log_robot_2.csv') as csvfile:
spamreader = csv.reader(csvfile, delimiter=';')
x = init_x
y = init_y
angle = init_angle
t_prev = 0
is_init = False
for row in spamreader:
try:
t = float(row[0])
if not is_init:
t_prev = t
vl = float(row[3]) * magic_coeff
vr = float(row[4]) * magic_coeff
is_init = True
dt = t - t_prev
if abs(vr - vl) < 0.0001:
x_next = x + vl * dt * cos(angle)
y_next = y + vl * dt * sin(angle)
angle_next = angle
else:
R = wheel_base_half * (vl + vr) / (vr - vl)
wt = (vr - vl) / (wheel_base_half * 2) * dt
ICCx = x - R * sin(angle)
ICCy = y + R * cos(angle)
x_next = cos(wt) * (x - ICCx) - sin(wt) * (y - ICCy) + ICCx
y_next = sin(wt) * (x - ICCx) + cos(wt) * (y - ICCy) + ICCy
angle_next = angle + wt
x = x_next
y = y_next
angle = angle_next
vl = float(row[3]) * magic_coeff
vr = float(row[4]) * magic_coeff
t_prev = t
coords.append((t, -y, x))
except ValueError:
pass
print_plot(coords=coords, title="By wheels")
def follow_by_gyro():
coords = []
with open('log_robot_2.csv') as csvfile:
spamreader = csv.reader(csvfile, delimiter=';')
x = init_x
y = init_y
# angle = init_angle
t_prev = 0
is_init = False
for row in spamreader:
try:
t = float(row[0])
angle = float(row[2]) * math.pi / 180
if not is_init:
t_prev = t
vl = float(row[3]) * magic_coeff
vr = float(row[4]) * magic_coeff
is_init = True
# print(t, d, a, vl, vr, sep=', ')
dt = t - t_prev
avg_speed = (vr + vl) / 2
x_next = x + avg_speed * dt * sin(angle)
y_next = y + avg_speed * dt * cos(angle)
x = x_next
y = y_next
vl = float(row[3]) * magic_coeff
vr = float(row[4]) * magic_coeff
t_prev = t
coords.append((t, x, y))
except ValueError:
pass
print_plot(coords=coords, title="By gyro")
def print_log_camera():
t_plot = []
x_plot = []
y_plot = []
with open('log_camera_2.csv') as csvfile:
spamreader = csv.reader(csvfile, delimiter=';')
k = False
for row in spamreader:
if not k:
k = True
continue
t_plot.append(float(row[0]))
x_plot.append(float(row[1]))
y_plot.append(float(row[2]))
print_plot(plots=(t_plot, x_plot, y_plot), title="From camera")
def print_log_camera_kalman():
t_plot = []
x_plot = []
y_plot = []
with open('log_camera_2.csv') as csvfile:
spamreader = csv.reader(csvfile, delimiter=';')
k = False
for row in spamreader:
if not k:
k = True
continue
t_plot.append(float(row[0]))
x_plot.append(float(row[1]))
y_plot.append(float(row[2]))
Q = 1
x_plot = kalman.apply_filter(x_plot, Q, x_cam_noise[1])
y_plot = kalman.apply_filter(y_plot, Q, y_cam_noise[1])
print_plot(plots=(t_plot, x_plot, y_plot), title="From camera with Kalman Q=" + str(Q))
def follow_by_gyro_kalman():
coords = []
v = []
t = []
angle = [0]
Q = 0.05
with open('log_robot_2.csv') as csvfile:
spamreader = csv.reader(csvfile, delimiter=';')
x = init_x
y = init_y
is_init = False
for row in spamreader:
try:
t.append(float(row[0]))
if not is_init:
t.append(float(row[0]))
v.append((float(row[4]) + float(row[3])) * magic_coeff / 2)
is_init = True
angle.append(float(row[2]) * math.pi / 180)
v.append((float(row[4]) + float(row[3])) * magic_coeff / 2)
except ValueError:
pass
angle = kalman.apply_filter(angle, Q=Q, R=gyro_noise[1])
for i in range(1, len(t)):
dt = t[i] - t[i - 1]
x_next = x + v[i - 1] * dt * sin(angle[i])
y_next = y + v[i - 1] * dt * cos(angle[i])
x = x_next
y = y_next
coords.append((t[i], x, y))
print_plot(coords=coords, title="By gyro with Kalman, Q=" + str(Q))
def sensor_fusion():
coords_gyro = []
coords_wheels = []
vl = []
vr = []
t = []
angle = [0]
Q = 0.1
with open('log_robot_2.csv') as csvfile:
spamreader = csv.reader(csvfile, delimiter=';')
x = init_x
y = init_y
is_init = False
for row in spamreader:
try:
t.append(float(row[0]))
if not is_init:
t.append(float(row[0]))
vl.append((float(row[3])) * magic_coeff)
vr.append((float(row[4])) * magic_coeff)
is_init = True
angle.append(float(row[2]) * math.pi / 180)
vl.append((float(row[3])) * magic_coeff)
vr.append((float(row[4])) * magic_coeff)
except ValueError:
pass
for i in range(1, len(t)):
dt = t[i] - t[i - 1]
avg_speed = (vl[i - 1] + vr[i - 1]) / 2
x_next = x + avg_speed * dt * sin(angle[i])
y_next = y + avg_speed * dt * cos(angle[i])
x = x_next
y = y_next
coords_gyro.append((t[i], x, y))
a = init_angle
x = init_x
y = init_y
for i in range(1, len(t)):
dt = t[i] - t[i - 1]
if abs(vr[i - 1] - vl[i - 1]) < 0.0001:
x_next = x + vl[i - 1] * dt * cos(a)
y_next = y + vl[i - 1] * dt * sin(a)
angle_next = a
else:
R = wheel_base_half * (vl[i - 1] + vr[i - 1]) / (vr[i - 1] - vl[i - 1])
wt = (vr[i - 1] - vl[i - 1]) / (wheel_base_half * 2) * dt
ICCx = x - R * sin(a)
ICCy = y + R * cos(a)
x_next = cos(wt) * (x - ICCx) - sin(wt) * (y - ICCy) + ICCx
y_next = sin(wt) * (x - ICCx) + cos(wt) * (y - ICCy) + ICCy
angle_next = a + wt
x = x_next
y = y_next
a = angle_next
coords_wheels.append((t[i], -y, x))
x_w = [0]
x_g = [0]
for i in range(0, len(coords_gyro)):
x_w.append(coords_wheels[i][1])
x_g.append(coords_gyro[i][1])
x_matrix = np.matrix([x_w, x_g]).transpose()
Q = 0.5
R = np.matrix([[100, 0], [0, 100]]).transpose()
y_w = [0]
y_g = [0]
for i in range(0, len(coords_gyro)):
y_w.append(coords_wheels[i][2])
y_g.append(coords_gyro[i][2])
y_matrix = np.matrix([y_w, y_g]).transpose()
x_kalman = kalman.apply_filter_x(x_matrix, Q, R, (len(x_w),)).tolist()
y_kalman = kalman.apply_filter_x(y_matrix, Q, R, (len(y_w),)).tolist()
print_plot(plots=(t, y_kalman, x_kalman), title="Kalman with 2 sensors")
def particle_filter():
vl = []
vr = []
t = []
dist = [sonar_zero_distance]
angle = [0]
with open('log_robot_2.csv') as csvfile:
spamreader = csv.reader(csvfile, delimiter=';')
is_init = False
for row in spamreader:
try:
t.append(float(row[0]))
if not is_init:
t.append(float(row[0]))
vl.append((float(row[3])) * magic_coeff)
vr.append((float(row[4])) * magic_coeff)
is_init = True
dist.append(float(row[1]))
angle.append(float(row[2]) * math.pi / 180)
vl.append((float(row[3])) * magic_coeff)
vr.append((float(row[4])) * magic_coeff)
except ValueError:
pass
particle.run_pf1(N=5000, plot_particles=True, vl=vl, vr=vr, t=t, angle=angle, dist=dist,
initial_x=(10, 10, np.pi / 4))
if __name__ == '__main__':
follow_by_wheels()
follow_by_gyro()
follow_by_gyro_kalman()
print_log_camera()
print_log_camera_kalman()
sensor_fusion()
# seed(2)
# particle_filter()
|
py | 1a3222e59c57ae715f5eec95e0cebe28e2c1bb16 | from config_handler import ConfigHandler
import smtplib
class EmailNotifier:
def __init__(self):
self.config_data = ConfigHandler()
self.gmail_smtp_server = None
self.gmail_smtp_port = None
self.sender_email = None
self.sender_email_pass = None
self.gmail_smtp_server, self.gmail_smtp_port, self.sender_email, self.sender_email_pass = self.config_data.get_email_data()
def send_emails(self, name, receiver, from_city, to_city, deal_price, deal_date):
"""
Function:
Send an email notification.
Params:
receiver: str
Receiver's email address
"""
with smtplib.SMTP(str(self.gmail_smtp_server), int(self.gmail_smtp_port)) as connection:
connection.starttls()
connection.login(user=self.sender_email, password=self.sender_email_pass)
connection.sendmail(from_addr=self.sender_email,
to_addrs=receiver,
msg=f'Subject: Deal found! {from_city} to {to_city}!\n\nHi {name}! The deal price is, INR {deal_price} only and it is available on {deal_date}')
|
py | 1a32231167d34b04a188cf158e3aec88f8a56253 |
class Vendor(object):
def __init__(self):
self._vendor = None # altera, lattice, xilinx
self._model = None # vendor model
@property
def vendor(self):
return self._vendor
@vendor.setter
def vendor(self, v):
assert v.lower() in ('altera', 'lattice', 'xilinx')
self._vendor = v.lower()
|
py | 1a3223a81984c3aac0c10e0a5902a598da8f8e2d | #! /usr/local/bin/python3
# Consume and display messages from a Kafka topic
import argparse
from kafka import KafkaConsumer
def parse():
"""Parse command line"""
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--brokers', default='kafka:9092', help='Kafka bootstrap brokers')
parser.add_argument('-t', '--topic', default='test-topic', help='Name of topic to consume from')
return parser.parse_args()
if __name__ == '__main__':
args = parse()
# Create Kafka consumer client
consumer = KafkaConsumer(bootstrap_servers=args.brokers)
# Subscribe to topic
print('Subscribing to topic {}'.format(args.topic))
consumer.subscribe(args.topic)
try:
# Poll the topic for new messages
for msg in consumer:
# Decode the value for display
decoded_val = msg.value.decode('utf-8')
# Display the value of the message that was consumed
print('Consumed message from {}: "{}"'.format(args.topic, decoded_val))
except KeyboardInterrupt:
consumer.close()
|
py | 1a32246d9868d618fc63c3f378e638d62b91e5f6 | import json
from sqlalchemy import Column, Integer, SmallInteger, String, ForeignKey, Text, JSON, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from ...policy import Policy, ALLOW_ACCESS, DENY_ACCESS, TYPE_STRING_BASED
from ...rules.base import Rule
from ...parser import compile_regex
Base = declarative_base()
class PolicySubjectModel(Base):
"""Storage model for policy subjects"""
__tablename__ = 'vakt_policy_subjects'
id = Column(Integer, primary_key=True)
uid = Column(String(255), ForeignKey('vakt_policies.uid', ondelete='CASCADE'))
subject = Column(JSON(), comment='JSON value for rule-based policies')
subject_string = Column(String(255), index=True, comment='Initial string value for string-based policies')
subject_regex = Column(String(520),
index=True,
comment='Regexp from initial string value for string-based policies')
class PolicyResourceModel(Base):
"""Storage model for policy resources"""
__tablename__ = 'vakt_policy_resources'
id = Column(Integer, primary_key=True)
uid = Column(String(255), ForeignKey('vakt_policies.uid', ondelete='CASCADE'))
resource = Column(JSON(), comment='JSON value for rule-based policies')
resource_string = Column(String(255), index=True, comment='Initial string value for string-based policies')
resource_regex = Column(String(520),
index=True,
comment='Regexp from initial string value for string-based policies')
class PolicyActionModel(Base):
"""Storage model for policy actions"""
__tablename__ = 'vakt_policy_actions'
id = Column(Integer, primary_key=True)
uid = Column(String(255), ForeignKey('vakt_policies.uid', ondelete='CASCADE'))
action = Column(JSON(), comment='JSON value for rule-based policies')
action_string = Column(String(255), index=True, comment='Initial string value for string-based policies')
action_regex = Column(String(520),
index=True,
comment='Regexp from initial string value for string-based policies')
class PolicyModel(Base):
"""Storage model for policy"""
__tablename__ = 'vakt_policies'
uid = Column(String(255), primary_key=True)
type = Column(SmallInteger)
description = Column(Text())
effect = Column(Boolean())
context = Column(JSON())
subjects = relationship(PolicySubjectModel, passive_deletes=True, lazy='joined')
resources = relationship(PolicyResourceModel, passive_deletes=True, lazy='joined')
actions = relationship(PolicyActionModel, passive_deletes=True, lazy='joined')
@classmethod
def from_policy(cls, policy):
"""
Instantiate from policy object
:param policy: object of type policy
"""
rvalue = cls()
return cls._save(policy, model=rvalue)
def update(self, policy):
"""
Update object attributes to match given policy
:param policy: object of type policy
"""
self._save(policy, model=self)
def to_policy(self):
"""
Create a policy object
:return: object of type `Policy`
"""
return Policy(uid=self.uid,
effect=ALLOW_ACCESS if self.effect else DENY_ACCESS,
description=self.description,
context=Rule.from_json(self.context),
subjects=[
self._policy_element_from_db(self.type, x.subject, x.subject_string)
for x in self.subjects
],
resources=[
self._policy_element_from_db(self.type, x.resource, x.resource_string)
for x in self.resources
],
actions=[
self._policy_element_from_db(self.type, x.action, x.action_string)
for x in self.actions
])
@classmethod
def _save(cls, policy, model):
"""
Helper to create PolicyModel from Policy object for add and update operations.
:param policy: object of type Policy
:param model: object of type PolicyModel
"""
policy_json = policy.to_json()
policy_dict = json.loads(policy_json)
model.uid = policy_dict['uid']
model.type = policy_dict['type']
model.effect = policy_dict['effect'] == ALLOW_ACCESS
model.description = policy_dict['description']
model.context = json.dumps(policy_dict['context'])
model.subjects = [
PolicySubjectModel(subject=x, subject_string=string, subject_regex=compiled)
for y in policy_dict['subjects']
for (x, string, compiled) in cls._policy_element_to_db(policy, y)
]
model.resources = [
PolicyResourceModel(resource=x, resource_string=string, resource_regex=compiled)
for y in policy_dict['resources']
for (x, string, compiled) in cls._policy_element_to_db(policy, y)
]
model.actions = [
PolicyActionModel(action=x, action_string=string, action_regex=compiled)
for y in policy_dict['actions']
for (x, string, compiled) in cls._policy_element_to_db(policy, y)
]
return model
@classmethod
def _policy_element_to_db(cls, policy, el):
json_value, string_value, compiled = None, None, None
if policy.type == TYPE_STRING_BASED:
string_value = el
if policy.start_tag in el and policy.end_tag in el:
compiled = compile_regex(el, policy.start_tag, policy.end_tag).pattern
else: # it's a rule-based policy and it's value is a json
json_value = json.dumps(el)
yield (json_value, string_value, compiled)
@classmethod
def _policy_element_from_db(cls, policy_type, element_json, element_string):
if policy_type == TYPE_STRING_BASED:
return element_string
return Rule.from_json(element_json)
|
py | 1a32256916e7317feed03f7316b77ccfce87106a | import collections
import contextlib
import inspect
import json
import jsonschema
import numpy as np
import pandas as pd
# If DEBUG_MODE is True, then schema objects are converted to dict and
# validated at creation time. This slows things down, particularly for
# larger specs, but leads to much more useful tracebacks for the user.
# Individual schema classes can override this by setting the
# class-level _class_is_valid_at_instantiation attribute to False
DEBUG_MODE = True
def enable_debug_mode():
global DEBUG_MODE
DEBUG_MODE = True
def disable_debug_mode():
global DEBUG_MODE
DEBUG_MODE = True
@contextlib.contextmanager
def debug_mode(arg):
global DEBUG_MODE
original = DEBUG_MODE
DEBUG_MODE = arg
try:
yield
finally:
DEBUG_MODE = original
def _subclasses(cls):
"""Breadth-first sequence of all classes which inherit from cls."""
seen = set()
current_set = {cls}
while current_set:
seen |= current_set
current_set = set.union(*(set(cls.__subclasses__()) for cls in current_set))
for cls in current_set - seen:
yield cls
def _todict(obj, validate, context):
"""Convert an object to a dict representation."""
if isinstance(obj, SchemaBase):
return obj.to_dict(validate=validate, context=context)
elif isinstance(obj, (list, tuple, np.ndarray)):
return [_todict(v, validate, context) for v in obj]
elif isinstance(obj, dict):
return {
k: _todict(v, validate, context)
for k, v in obj.items()
if v is not Undefined
}
elif hasattr(obj, "to_dict"):
return obj.to_dict()
elif isinstance(obj, np.number):
return float(obj)
elif isinstance(obj, (pd.Timestamp, np.datetime64)):
return pd.Timestamp(obj).isoformat()
else:
return obj
def _resolve_references(schema, root=None):
"""Resolve schema references."""
resolver = jsonschema.RefResolver.from_schema(root or schema)
while "$ref" in schema:
with resolver.resolving(schema["$ref"]) as resolved:
schema = resolved
return schema
class SchemaValidationError(jsonschema.ValidationError):
"""A wrapper for jsonschema.ValidationError with friendlier traceback"""
def __init__(self, obj, err):
super(SchemaValidationError, self).__init__(**self._get_contents(err))
self.obj = obj
@staticmethod
def _get_contents(err):
"""Get a dictionary with the contents of a ValidationError"""
try:
# works in jsonschema 2.3 or later
contents = err._contents()
except AttributeError:
try:
# works in Python >=3.4
spec = inspect.getfullargspec(err.__init__)
except AttributeError:
# works in Python <3.4
spec = inspect.getargspec(err.__init__)
contents = {key: getattr(err, key) for key in spec.args[1:]}
return contents
def __str__(self):
cls = self.obj.__class__
schema_path = ["{}.{}".format(cls.__module__, cls.__name__)]
schema_path.extend(self.schema_path)
schema_path = "->".join(
str(val)
for val in schema_path[:-1]
if val not in ("properties", "additionalProperties", "patternProperties")
)
return """Invalid specification
{}, validating {!r}
{}
""".format(
schema_path, self.validator, self.message
)
class UndefinedType(object):
"""A singleton object for marking undefined attributes"""
__instance = None
def __new__(cls, *args, **kwargs):
if not isinstance(cls.__instance, cls):
cls.__instance = object.__new__(cls, *args, **kwargs)
return cls.__instance
def __repr__(self):
return "Undefined"
Undefined = UndefinedType()
class SchemaBase(object):
"""Base class for schema wrappers.
Each derived class should set the _schema class attribute (and optionally
the _rootschema class attribute) which is used for validation.
"""
_schema = None
_rootschema = None
_class_is_valid_at_instantiation = True
def __init__(self, *args, **kwds):
# Two valid options for initialization, which should be handled by
# derived classes:
# - a single arg with no kwds, for, e.g. {'type': 'string'}
# - zero args with zero or more kwds for {'type': 'object'}
if self._schema is None:
raise ValueError(
"Cannot instantiate object of type {}: "
"_schema class attribute is not defined."
"".format(self.__class__)
)
if kwds:
assert len(args) == 0
else:
assert len(args) in [0, 1]
# use object.__setattr__ because we override setattr below.
object.__setattr__(self, "_args", args)
object.__setattr__(self, "_kwds", kwds)
if DEBUG_MODE and self._class_is_valid_at_instantiation:
self.to_dict(validate=True)
def copy(self, deep=True, ignore=()):
"""Return a copy of the object
Parameters
----------
deep : boolean or list, optional
If True (default) then return a deep copy of all dict, list, and
SchemaBase objects within the object structure.
If False, then only copy the top object.
If a list or iterable, then only copy the listed attributes.
ignore : list, optional
A list of keys for which the contents should not be copied, but
only stored by reference.
"""
def _shallow_copy(obj):
if isinstance(obj, SchemaBase):
return obj.copy(deep=False)
elif isinstance(obj, list):
return obj[:]
elif isinstance(obj, dict):
return obj.copy()
else:
return obj
def _deep_copy(obj, ignore=()):
if isinstance(obj, SchemaBase):
args = tuple(_deep_copy(arg) for arg in obj._args)
kwds = {
k: (_deep_copy(v, ignore=ignore) if k not in ignore else v)
for k, v in obj._kwds.items()
}
with debug_mode(False):
return obj.__class__(*args, **kwds)
elif isinstance(obj, list):
return [_deep_copy(v, ignore=ignore) for v in obj]
elif isinstance(obj, dict):
return {
k: (_deep_copy(v, ignore=ignore) if k not in ignore else v)
for k, v in obj.items()
}
else:
return obj
try:
deep = list(deep)
except TypeError:
deep_is_list = False
else:
deep_is_list = True
if deep and not deep_is_list:
return _deep_copy(self, ignore=ignore)
with debug_mode(False):
copy = self.__class__(*self._args, **self._kwds)
if deep_is_list:
for attr in deep:
copy[attr] = _shallow_copy(copy._get(attr))
return copy
def _get(self, attr, default=Undefined):
"""Get an attribute, returning default if not present."""
attr = self._kwds.get(attr, Undefined)
if attr is Undefined:
attr = default
return attr
def __getattr__(self, attr):
# reminder: getattr is called after the normal lookups
if attr == "_kwds":
raise AttributeError()
if attr in self._kwds:
return self._kwds[attr]
else:
try:
_getattr = super(SchemaBase, self).__getattr__
except AttributeError:
_getattr = super(SchemaBase, self).__getattribute__
return _getattr(attr)
def __setattr__(self, item, val):
self._kwds[item] = val
def __getitem__(self, item):
return self._kwds[item]
def __setitem__(self, item, val):
self._kwds[item] = val
def __repr__(self):
if self._kwds:
args = (
"{}: {!r}".format(key, val)
for key, val in sorted(self._kwds.items())
if val is not Undefined
)
args = "\n" + ",\n".join(args)
return "{0}({{{1}\n}})".format(
self.__class__.__name__, args.replace("\n", "\n ")
)
else:
return "{}({!r})".format(self.__class__.__name__, self._args[0])
def __eq__(self, other):
return (
type(self) is type(other)
and self._args == other._args
and self._kwds == other._kwds
)
def to_dict(self, validate=True, ignore=None, context=None):
"""Return a dictionary representation of the object
Parameters
----------
validate : boolean or string
If True (default), then validate the output dictionary
against the schema. If "deep" then recursively validate
all objects in the spec. This takes much more time, but
it results in friendlier tracebacks for large objects.
ignore : list
A list of keys to ignore. This will *not* passed to child to_dict
function calls.
context : dict (optional)
A context dictionary that will be passed to all child to_dict
function calls
Returns
-------
dct : dictionary
The dictionary representation of this object
Raises
------
jsonschema.ValidationError :
if validate=True and the dict does not conform to the schema
"""
if context is None:
context = {}
if ignore is None:
ignore = []
sub_validate = "deep" if validate == "deep" else False
if self._args and not self._kwds:
result = _todict(self._args[0], validate=sub_validate, context=context)
elif not self._args:
result = _todict(
{k: v for k, v in self._kwds.items() if k not in ignore},
validate=sub_validate,
context=context,
)
else:
raise ValueError(
"{} instance has both a value and properties : "
"cannot serialize to dict".format(self.__class__)
)
if validate:
try:
self.validate(result)
except jsonschema.ValidationError as err:
raise SchemaValidationError(self, err)
return result
def to_json(
self, validate=True, ignore=[], context={}, indent=2, sort_keys=True, **kwargs
):
"""Emit the JSON representation for this object as a string.
Parameters
----------
validate : boolean or string
If True (default), then validate the output dictionary
against the schema. If "deep" then recursively validate
all objects in the spec. This takes much more time, but
it results in friendlier tracebacks for large objects.
ignore : list
A list of keys to ignore. This will *not* passed to child to_dict
function calls.
context : dict (optional)
A context dictionary that will be passed to all child to_dict
function calls
indent : integer, default 2
the number of spaces of indentation to use
sort_keys : boolean, default True
if True, sort keys in the output
**kwargs
Additional keyword arguments are passed to ``json.dumps()``
Returns
-------
spec : string
The JSON specification of the chart object.
"""
dct = self.to_dict(validate=validate, ignore=ignore, context=context)
return json.dumps(dct, indent=indent, sort_keys=sort_keys, **kwargs)
@classmethod
def _default_wrapper_classes(cls):
"""Return the set of classes used within cls.from_dict()"""
return _subclasses(SchemaBase)
@classmethod
def from_dict(cls, dct, validate=True, _wrapper_classes=None):
"""Construct class from a dictionary representation
Parameters
----------
dct : dictionary
The dict from which to construct the class
validate : boolean
If True (default), then validate the input against the schema.
_wrapper_classes : list (optional)
The set of SchemaBase classes to use when constructing wrappers
of the dict inputs. If not specified, the result of
cls._default_wrapper_classes will be used.
Returns
-------
obj : Schema object
The wrapped schema
Raises
------
jsonschema.ValidationError :
if validate=True and dct does not conform to the schema
"""
if validate:
cls.validate(dct)
if _wrapper_classes is None:
_wrapper_classes = cls._default_wrapper_classes()
converter = _FromDict(_wrapper_classes)
return converter.from_dict(dct, cls)
@classmethod
def from_json(cls, json_string, validate=True, **kwargs):
"""Instantiate the object from a valid JSON string
Parameters
----------
json_string : string
The string containing a valid JSON chart specification.
validate : boolean
If True (default), then validate the input against the schema.
**kwargs :
Additional keyword arguments are passed to json.loads
Returns
-------
chart : Chart object
The altair Chart object built from the specification.
"""
dct = json.loads(json_string, **kwargs)
return cls.from_dict(dct, validate=validate)
@classmethod
def validate(cls, instance, schema=None):
"""
Validate the instance against the class schema in the context of the
rootschema.
"""
if schema is None:
schema = cls._schema
resolver = jsonschema.RefResolver.from_schema(cls._rootschema or cls._schema)
return jsonschema.validate(instance, schema, resolver=resolver)
@classmethod
def resolve_references(cls, schema=None):
"""Resolve references in the context of this object's schema or root schema."""
return _resolve_references(
schema=(schema or cls._schema),
root=(cls._rootschema or cls._schema or schema),
)
@classmethod
def validate_property(cls, name, value, schema=None):
"""
Validate a property against property schema in the context of the
rootschema
"""
value = _todict(value, validate=False, context={})
props = cls.resolve_references(schema or cls._schema).get("properties", {})
resolver = jsonschema.RefResolver.from_schema(cls._rootschema or cls._schema)
return jsonschema.validate(value, props.get(name, {}), resolver=resolver)
def __dir__(self):
return list(self._kwds.keys())
class _FromDict(object):
"""Class used to construct SchemaBase class hierarchies from a dict
The primary purpose of using this class is to be able to build a hash table
that maps schemas to their wrapper classes. The candidate classes are
specified in the ``class_list`` argument to the constructor.
"""
_hash_exclude_keys = ("definitions", "title", "description", "$schema", "id")
def __init__(self, class_list):
# Create a mapping of a schema hash to a list of matching classes
# This lets us quickly determine the correct class to construct
self.class_dict = collections.defaultdict(list)
for cls in class_list:
if cls._schema is not None:
self.class_dict[self.hash_schema(cls._schema)].append(cls)
@classmethod
def hash_schema(cls, schema, use_json=True):
"""
Compute a python hash for a nested dictionary which
properly handles dicts, lists, sets, and tuples.
At the top level, the function excludes from the hashed schema all keys
listed in `exclude_keys`.
This implements two methods: one based on conversion to JSON, and one based
on recursive conversions of unhashable to hashable types; the former seems
to be slightly faster in several benchmarks.
"""
if cls._hash_exclude_keys and isinstance(schema, dict):
schema = {
key: val
for key, val in schema.items()
if key not in cls._hash_exclude_keys
}
if use_json:
s = json.dumps(schema, sort_keys=True)
return hash(s)
else:
def _freeze(val):
if isinstance(val, dict):
return frozenset((k, _freeze(v)) for k, v in val.items())
elif isinstance(val, set):
return frozenset(map(_freeze, val))
elif isinstance(val, list) or isinstance(val, tuple):
return tuple(map(_freeze, val))
else:
return val
return hash(_freeze(schema))
def from_dict(self, dct, cls=None, schema=None, rootschema=None):
"""Construct an object from a dict representation"""
if (schema is None) == (cls is None):
raise ValueError("Must provide either cls or schema, but not both.")
if schema is None:
schema = schema or cls._schema
rootschema = rootschema or cls._rootschema
rootschema = rootschema or schema
def _passthrough(*args, **kwds):
return args[0] if args else kwds
if isinstance(dct, SchemaBase):
return dct
if cls is None:
# If there are multiple matches, we use the first one in the dict.
# Our class dict is constructed breadth-first from top to bottom,
# so the first class that matches is the most general match.
matches = self.class_dict[self.hash_schema(schema)]
cls = matches[0] if matches else _passthrough
schema = _resolve_references(schema, rootschema)
if "anyOf" in schema or "oneOf" in schema:
schemas = schema.get("anyOf", []) + schema.get("oneOf", [])
for possible_schema in schemas:
resolver = jsonschema.RefResolver.from_schema(rootschema)
try:
jsonschema.validate(dct, possible_schema, resolver=resolver)
except jsonschema.ValidationError:
continue
else:
return self.from_dict(
dct,
schema=possible_schema,
rootschema=rootschema,
)
if isinstance(dct, dict):
# TODO: handle schemas for additionalProperties/patternProperties
props = schema.get("properties", {})
kwds = {}
for key, val in dct.items():
if key in props:
val = self.from_dict(val, schema=props[key], rootschema=rootschema)
kwds[key] = val
return cls(**kwds)
elif isinstance(dct, list):
item_schema = schema.get("items", {})
dct = [
self.from_dict(val, schema=item_schema, rootschema=rootschema)
for val in dct
]
return cls(dct)
else:
return cls(dct)
|
py | 1a32259bebd789be6c36d1bbbb2c14fa458cca80 | #!/usr/bin/python
# -_- encoding: utf8 -_-
import sys
import time
sys.path.append('./t')
from http_utils import *
VERBOSE = False
BASE_URL = 'http://0.0.0.0:8081'
# =============
#
print('[+] Test status codes')
http_codes = [
200,
201,
202,
# NO-Content !!!
# 204,
206,
# Moved !!! [[
# 300,
# 301,
# 302,
# ]]
# See others !!! [[
# 303,
# ]]
# Not modified [[
# 304,
# ]]
# Temorary redirected [[
# 307,
# ]]
400,
401,
403,
404,
405,
408,
409,
411,
412,
413,
414,
415,
416,
421,
500,
501,
502,
503,
504,
507
]
def do_post(url, code, headers):
return post_2(url, {'params':[1, 2]}, headers)
def do_get(url, code, headers):
# Python's urllib2 does not suppor these codes! [[
if code > 200:
return (True, [])
# ]]
return get_2(url, [], headers)
methods = [
[do_post, 'POST'],
[do_get, 'GET']
]
prev_result = None
for method in methods:
for code in http_codes:
curl = BASE_URL + '/lua?status_code=' + str(code)
(rcode, result) = method[0](curl, code, {'X-From': 'eval_basic'})
# Python does not work if server returns some codes!
if rcode == True:
continue;
assert(code == rcode)
print('[+] OK')
|
py | 1a3225a5ba5b93c2a1acb859d585fc9c311cd4ec | import nltk
from nltk.model import build_vocabulary, count_ngrams, LaplaceNgramModel, LidstoneNgramModel
'''
lincoln_address_file = open('files/FirstInauguralAddress.txt')
raw_lincoln_address = lincoln_address_file.read().lower()
# lb_train_1 = raw_lincoln_address.lower().split()
lb_train_1_sents = nltk.sent_tokenize(raw_lincoln_address, language="english")
lb_train_1_words = nltk.word_tokenize(raw_lincoln_address, language='english')
'''
'''
gettysburg_address_file = open('files/Gettysburg.txt')
raw_gettysburg_address = gettysburg_address_file.read().lower()
# lb_train_2 = raw_gettysburg_address.lower().split()
lb_train_2_sents = nltk.sent_tokenize(raw_gettysburg_address, language="english")
lb_train_2_words = nltk.word_tokenize(raw_gettysburg_address, language='english')
'''
lb_train_file = open('files/LB-Train.txt')
raw_lb_train_file = lb_train_file.read().lower()
lb_train_words = nltk.word_tokenize(raw_lb_train_file, language='english')
lb_vocab = build_vocabulary(2, lb_train_words)
# lb_vocab = build_vocabulary(1, lb_train_1_words, lb_train_2_words)
# print(lb_vocab)
lb_train = []
lb_train.append(lb_train_words)
'''
lb_train.append(lb_train_1_words)
lb_train.append(lb_train_2_words)
'''
# print(lb_train)
lb_bigram_counts = count_ngrams(2, lb_vocab, lb_train)
# print(lb_bigram_counts.ngrams[2])
# print(sorted(lb_bigram_counts.ngrams[2].conditions()))
lb = LidstoneNgramModel(0.2, lb_bigram_counts)
# print("lincoln score ", lb.score("never", ["had"]))
lincoln_address_file_2 = open('files/SecondInauguralAddress.txt')
lb_test = lincoln_address_file_2.read().lower()
lb_test_words = nltk.word_tokenize(lb_test)
print("Perplexity of LB on LB-Test = ", lb.perplexity(lb_test_words))
'''
for ngram in lb_bigram_counts.to_ngrams(lb_test_words):
print(ngram)
'''
'''
nelson_address_file = open('files/IamPreparedToDie.txt')
raw_nelson_address = nelson_address_file.read().lower()
# mb_train_1 = raw_nelson_address.lower().split()
mb_train_1_sents = nltk.sent_tokenize(raw_nelson_address, language="english")
mb_train_1_words = nltk.word_tokenize(raw_nelson_address, language="english")
freedom_award_file = open('files/InternationalFreedomAward.txt')
raw_freedom_award = freedom_award_file.read().lower()
# mb_train_2 = raw_freedom_award.lower().split()
mb_train_2_sents = nltk.sent_tokenize(raw_freedom_award, language='english')
mb_train_2_words = nltk.word_tokenize(raw_freedom_award, language='english')
'''
mb_train_file = open('files/MB-Train.txt')
raw_mb_train_file = mb_train_file.read().lower()
mb_train_words = nltk.word_tokenize(raw_mb_train_file, language='english')
mb_vocab = build_vocabulary(2, mb_train_words)
# mb_vocab = build_vocabulary(1, mb_train_1_words, mb_train_2_words)
mb_train = []
mb_train.append(mb_train_words)
'''
mb_train.append(lb_train_1_words)
mb_train.append(lb_train_2_words)
'''
mb_bigram_counts = count_ngrams(2, mb_vocab, mb_train)
mb = LidstoneNgramModel(0.2, mb_bigram_counts)
# print("mandela score ", mb.score("the", ["and"]))
nelson_address_file_2 = open('files/AfricanNationalCongress.txt')
mb_test = nelson_address_file_2.read()
mb_test_words = nltk.word_tokenize(mb_test)
print("Perplexity of MB on MB-Test = ", mb.perplexity(mb_test_words))
# print("Perplexity of MB on LB-Test = ", mb.perplexity(lb_test_words))
# print("Perplexity of LB on MB-Test = ", lb.perplexity(mb_test_words))
print("Perplexity of LB on LB-Train = ", lb.perplexity(lb_train_words))
print("Perplexity of MB on MB-Train = ", mb.perplexity(mb_train_words))
print("Perplexity of MB on LB-Train = ", mb.perplexity(lb_train_words))
print("Perplexity of LB on MB-Train = ", lb.perplexity(mb_train_words))
|
py | 1a3225d125a515b335048d96dfc150bd832acc24 | default_app_config = "pinaxcon.registrasion.apps.RegistrasionConfig"
|
py | 1a322680575d33e13361bd7bb1f6b59276354367 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
autodoc_mock_imports = ['numpy', 'tifffile']
# -- Project information -----------------------------------------------------
project = 'pyCUDAdecon'
copyright = '2019, Talley Lambert'
author = 'Talley Lambert'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.1.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyCUDAdecondoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyCUDAdecon.tex', 'pyCUDAdecon Documentation',
'Talley Lambert', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pycudadecon', 'pyCUDAdecon Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyCUDAdecon', 'pyCUDAdecon Documentation',
author, 'pyCUDAdecon', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
|
py | 1a32287135e625222e685eb11654d8d3243832ad | # Copyright The IETF Trust 2013-2020, All Rights Reserved
# -*- coding: utf-8 -*-
import io
import os
import debug # pyflakes:ignore
from pyquery import PyQuery
from io import StringIO
from textwrap import wrap
from django.conf import settings
from django.urls import reverse as urlreverse
from ietf.doc.factories import DocumentFactory, IndividualRfcFactory, WgRfcFactory
from ietf.doc.models import ( Document, DocAlias, State, DocEvent,
BallotPositionDocEvent, NewRevisionDocEvent, TelechatDocEvent, WriteupDocEvent )
from ietf.doc.utils import create_ballot_if_not_open
from ietf.doc.views_status_change import default_approval_text
from ietf.group.models import Person
from ietf.iesg.models import TelechatDate
from ietf.utils.test_utils import TestCase
from ietf.utils.mail import outbox, empty_outbox, get_payload_text
from ietf.utils.test_utils import login_testing_unauthorized
class StatusChangeTests(TestCase):
def test_start_review(self):
url = urlreverse('ietf.doc.views_status_change.start_rfc_status_change')
login_testing_unauthorized(self, "secretary", url)
# normal get should succeed and get a reasonable form
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('form select[name=create_in_state]')),1)
ad_strpk = str(Person.objects.get(name='Areað Irector').pk)
state_strpk = str(State.objects.get(slug='adrev',type__slug='statchg').pk)
# faulty posts
## Must set a responsible AD
r = self.client.post(url,dict(document_name="bogus",title="Bogus Title",ad="",create_in_state=state_strpk,notify='[email protected]'))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue(len(q('form .is-invalid')) > 0)
## Must set a name
r = self.client.post(url,dict(document_name="",title="Bogus Title",ad=ad_strpk,create_in_state=state_strpk,notify='[email protected]'))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue(len(q('form .is-invalid')) > 0)
## Must not choose a document name that already exists
r = self.client.post(url,dict(document_name="imaginary-mid-review",title="Bogus Title",ad=ad_strpk,create_in_state=state_strpk,notify='[email protected]'))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue(len(q('form .is-invalid')) > 0)
## Must set a title
r = self.client.post(url,dict(document_name="bogus",title="",ad=ad_strpk,create_in_state=state_strpk,notify='[email protected]'))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue(len(q('form .is-invalid')) > 0)
# successful status change start
r = self.client.post(url,dict(document_name="imaginary-new",title="A new imaginary status change",ad=ad_strpk,
create_in_state=state_strpk,notify='[email protected]',new_relation_row_blah="rfc9999",
statchg_relation_row_blah="tois"))
self.assertEqual(r.status_code, 302)
status_change = Document.objects.get(name='status-change-imaginary-new')
self.assertEqual(status_change.get_state('statchg').slug,'adrev')
self.assertEqual(status_change.rev,'00')
self.assertEqual(status_change.ad.name,'Areað Irector')
self.assertEqual(status_change.notify,'[email protected]')
self.assertTrue(status_change.relateddocument_set.filter(relationship__slug='tois',target__docs__name='draft-ietf-random-thing'))
def test_change_state(self):
doc = Document.objects.get(name='status-change-imaginary-mid-review')
url = urlreverse('ietf.doc.views_status_change.change_state',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "ad", url)
# normal get
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('form select[name=new_state]')),1)
# faulty post
r = self.client.post(url,dict(new_state=""))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue(len(q('form .is-invalid')) > 0)
# successful change to AD Review
adrev_pk = str(State.objects.get(slug='adrev',type__slug='statchg').pk)
r = self.client.post(url,dict(new_state=adrev_pk,comment='RDNK84ZD'))
self.assertEqual(r.status_code, 302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertEqual(doc.get_state('statchg').slug,'adrev')
self.assertTrue(doc.latest_event(DocEvent,type="added_comment").desc.startswith('RDNK84ZD'))
self.assertFalse(doc.active_ballot())
# successful change to Last Call Requested
messages_before = len(outbox)
doc.ad = Person.objects.get(user__username='ad')
doc.save_with_history([DocEvent.objects.create(doc=doc, rev=doc.rev, type="changed_document", by=Person.objects.get(user__username="secretary"), desc="Test")])
lc_req_pk = str(State.objects.get(slug='lc-req',type__slug='statchg').pk)
r = self.client.post(url,dict(new_state=lc_req_pk))
self.assertEqual(r.status_code, 200)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertEqual(doc.get_state('statchg').slug,'lc-req')
self.assertEqual(len(outbox), messages_before + 1)
self.assertTrue('Last Call:' in outbox[-1]['Subject'])
# successful change to IESG Evaluation
iesgeval_pk = str(State.objects.get(slug='iesgeval',type__slug='statchg').pk)
r = self.client.post(url,dict(new_state=iesgeval_pk,comment='TGmZtEjt'))
self.assertEqual(r.status_code, 302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertEqual(doc.get_state('statchg').slug,'iesgeval')
self.assertTrue(doc.latest_event(DocEvent,type="added_comment").desc.startswith('TGmZtEjt'))
self.assertTrue(doc.active_ballot())
self.assertEqual(doc.latest_event(BallotPositionDocEvent, type="changed_ballot_position").pos_id,'yes')
def test_edit_notices(self):
doc = Document.objects.get(name='status-change-imaginary-mid-review')
url = urlreverse('ietf.doc.views_doc.edit_notify;status-change',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "ad", url)
# normal get
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('form input[name=notify]')),1)
self.assertEqual(doc.notify,q('form input[name=notify]')[0].value)
# change notice list
newlist = '"Foo Bar" <[email protected]>'
r = self.client.post(url,dict(notify=newlist,save_addresses="1"))
self.assertEqual(r.status_code,302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertEqual(doc.notify,newlist)
self.assertTrue(doc.latest_event(DocEvent,type="added_comment").desc.startswith('Notification list changed'))
# Some additional setup so there's something to put in a generated notify list
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9999'),relationship_id='tois')
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9998'),relationship_id='tohist')
# Ask the form to regenerate the list
r = self.client.post(url,dict(regenerate_addresses="1"))
self.assertEqual(r.status_code,200)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
# Regenerate does not save!
self.assertEqual(doc.notify,newlist)
q = PyQuery(r.content)
formlist = q('form input[name=notify]')[0].value
self.assertEqual(None,formlist)
def test_edit_title(self):
doc = Document.objects.get(name='status-change-imaginary-mid-review')
url = urlreverse('ietf.doc.views_status_change.edit_title',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "ad", url)
# normal get
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('input[name=title]')),1)
# change title
r = self.client.post(url,dict(title='New title'))
self.assertEqual(r.status_code,302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertEqual(doc.title,'New title')
self.assertTrue(doc.latest_event(DocEvent,type="added_comment").desc.startswith('Title changed'))
def test_edit_ad(self):
doc = Document.objects.get(name='status-change-imaginary-mid-review')
url = urlreverse('ietf.doc.views_status_change.edit_ad',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "ad", url)
# normal get
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('select[name=ad]')),1)
# change ads
ad2 = Person.objects.get(name='Ad No2')
r = self.client.post(url,dict(ad=str(ad2.pk)))
self.assertEqual(r.status_code,302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertEqual(doc.ad,ad2)
self.assertTrue(doc.latest_event(DocEvent,type="added_comment").desc.startswith('Shepherding AD changed'))
def test_edit_telechat_date(self):
doc = Document.objects.get(name='status-change-imaginary-mid-review')
url = urlreverse('ietf.doc.views_doc.telechat_date;status-change',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "ad", url)
# normal get
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('select[name=telechat_date]')),1)
# set a date
self.assertFalse(doc.latest_event(TelechatDocEvent, "scheduled_for_telechat"))
telechat_date = TelechatDate.objects.active().order_by('date')[0].date
r = self.client.post(url,dict(telechat_date=telechat_date.isoformat()))
self.assertEqual(r.status_code,302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertEqual(doc.latest_event(TelechatDocEvent, "scheduled_for_telechat").telechat_date,telechat_date)
# move it forward a telechat (this should NOT set the returning item bit)
telechat_date = TelechatDate.objects.active().order_by('date')[1].date
r = self.client.post(url,dict(telechat_date=telechat_date.isoformat()))
self.assertEqual(r.status_code,302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertFalse(doc.returning_item())
# set the returning item bit without changing the date
r = self.client.post(url,dict(telechat_date=telechat_date.isoformat(),returning_item="on"))
self.assertEqual(r.status_code,302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertTrue(doc.returning_item())
# clear the returning item bit
r = self.client.post(url,dict(telechat_date=telechat_date.isoformat()))
self.assertEqual(r.status_code,302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertFalse(doc.returning_item())
# Take the doc back off any telechat
r = self.client.post(url,dict(telechat_date=""))
self.assertEqual(r.status_code, 302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertEqual(doc.latest_event(TelechatDocEvent, "scheduled_for_telechat").telechat_date,None)
def test_edit_lc(self):
doc = Document.objects.get(name='status-change-imaginary-mid-review')
url = urlreverse('ietf.doc.views_status_change.last_call',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "ad", url)
# additional setup
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9999'),relationship_id='tois')
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9998'),relationship_id='tohist')
doc.ad = Person.objects.get(name='Ad No2')
doc.save_with_history([DocEvent.objects.create(doc=doc, rev=doc.rev, type="changed_document", by=Person.objects.get(user__username="secretary"), desc="Test")])
# get
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('form.edit-last-call-text')),1)
self.assertContains(r, 'RFC9999 from Proposed Standard to Internet Standard')
self.assertContains(r, 'RFC9998 from Informational to Historic')
# save
r = self.client.post(url,dict(last_call_text="Bogus last call text",save_last_call_text="1"))
self.assertEqual(r.status_code, 200)
last_call_event = doc.latest_event(WriteupDocEvent, type="changed_last_call_text")
self.assertEqual(last_call_event.text,"Bogus last call text")
# reset
r = self.client.post(url,dict(regenerate_last_call_text="1"))
self.assertEqual(r.status_code,200)
self.assertContains(r, 'RFC9999 from Proposed Standard to Internet Standard')
self.assertContains(r, 'RFC9998 from Informational to Historic')
# request last call
messages_before = len(outbox)
r = self.client.post(url,dict(last_call_text='stuff',send_last_call_request='Save+and+Request+Last+Call'))
self.assertEqual(r.status_code,200)
self.assertContains(r, 'Last call requested')
self.assertEqual(len(outbox), messages_before + 1)
self.assertTrue('Last Call:' in outbox[-1]['Subject'])
self.assertTrue('Last Call Request has been submitted' in ''.join(wrap(outbox[-1].as_string(), width=2**16)))
def test_approve(self):
doc = Document.objects.get(name='status-change-imaginary-mid-review')
url = urlreverse('ietf.doc.views_status_change.approve',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "secretary", url)
# Some additional setup
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9999'),relationship_id='tois')
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9998'),relationship_id='tohist')
create_ballot_if_not_open(None, doc, Person.objects.get(user__username="secretary"), "statchg")
doc.set_state(State.objects.get(slug='appr-pend',type='statchg'))
# get
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('[type=submit]:contains("Send announcement")')), 1)
# There should be two messages to edit
self.assertEqual(q('input#id_form-TOTAL_FORMS').val(),'2')
self.assertContains(r, '(rfc9999) to Internet Standard')
self.assertContains(r, '(rfc9998) to Historic')
# submit
messages_before = len(outbox)
msg0=default_approval_text(doc,doc.relateddocument_set.all()[0])
msg1=default_approval_text(doc,doc.relateddocument_set.all()[1])
r = self.client.post(url,{'form-0-announcement_text':msg0,'form-1-announcement_text':msg1,'form-TOTAL_FORMS':'2','form-INITIAL_FORMS':'2','form-MAX_NUM_FORMS':''})
self.assertEqual(r.status_code, 302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertEqual(doc.get_state_slug(),'appr-sent')
self.assertFalse(doc.ballot_open("statchg"))
self.assertEqual(len(outbox), messages_before + 2)
self.assertTrue('Action:' in outbox[-1]['Subject'])
self.assertTrue('ietf-announce' in outbox[-1]['To'])
self.assertTrue('rfc-editor' in outbox[-1]['Cc'])
self.assertTrue('(rfc9998) to Historic' in ''.join(wrap(outbox[-1].as_string()+outbox[-2].as_string(), 2**16)))
self.assertTrue('(rfc9999) to Internet Standard' in ''.join(wrap(outbox[-1].as_string()+outbox[-2].as_string(),2**16)))
self.assertTrue(doc.latest_event(DocEvent,type="added_comment").desc.startswith('The following approval message was sent'))
def approval_pend_notice_test_helper(self, role):
"""Test notification email when review state changed to the appr-pend state"""
doc = Document.objects.get(name='status-change-imaginary-mid-review')
url = urlreverse('ietf.doc.views_status_change.change_state',kwargs=dict(name=doc.name))
# Add some status change related documents
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9999'),relationship_id='tois')
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9998'),relationship_id='tohist')
# And a non-status change related document
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc14'),relationship_id='updates')
login_testing_unauthorized(self, role, url)
empty_outbox()
# Issue the request
appr_pend_pk = str(State.objects.get(used=True,
slug='appr-pend',
type__slug='statchg').pk)
r = self.client.post(url,dict(new_state=appr_pend_pk,comment='some comment or other'))
# Check the results
self.assertEqual(r.status_code, 302)
if role == 'ad':
self.assertEqual(len(outbox), 1)
notification = outbox[0]
self.assertIn(doc.title, notification['Subject'])
self.assertIn('[email protected]', notification['To'])
self.assertTrue(notification['Subject'].startswith('Approved:'))
notification_text = get_payload_text(notification)
self.assertIn('The AD has approved changing the status', notification_text)
self.assertIn(DocAlias.objects.get(name='rfc9999').document.canonical_name(), notification_text)
self.assertIn(DocAlias.objects.get(name='rfc9998').document.canonical_name(), notification_text)
self.assertNotIn(DocAlias.objects.get(name='rfc14').document.canonical_name(), notification_text)
self.assertNotIn('No value found for', notification_text) # make sure all interpolation values were set
else:
self.assertEqual(len(outbox), 0)
def test_approval_pend_notice_ad(self):
"""Test that an approval notice is sent to secretariat when AD approves status change"""
self.approval_pend_notice_test_helper('ad')
def test_no_approval_pend_notice_secr(self):
"""Test that no approval notice is sent when secretariat approves status change"""
self.approval_pend_notice_test_helper('secretariat')
def test_edit_relations(self):
doc = Document.objects.get(name='status-change-imaginary-mid-review')
url = urlreverse('ietf.doc.views_status_change.edit_relations',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "secretary", url)
# Some additional setup
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9999'),relationship_id='tois')
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9998'),relationship_id='tohist')
# get
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q('#content [type=submit]:contains("Save")')),1)
# There should be three rows on the form
self.assertEqual(len(q('#content .input-group')),3)
# Try to add a relation to an RFC that doesn't exist
r = self.client.post(url,dict(new_relation_row_blah="rfc9997",
statchg_relation_row_blah="tois"))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue(len(q('form ul.errorlist')) > 0)
# Try to add a relation leaving the relation type blank
r = self.client.post(url,dict(new_relation_row_blah="rfc9999",
statchg_relation_row_blah=""))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue(len(q('form ul.errorlist')) > 0)
# Try to add a relation with an unknown relationship type
r = self.client.post(url,dict(new_relation_row_blah="rfc9999",
statchg_relation_row_blah="badslug"))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue(len(q('form ul.errorlist')) > 0)
# Successful change of relations
r = self.client.post(url,dict(new_relation_row_blah="rfc9999",
statchg_relation_row_blah="toexp",
new_relation_row_foo="rfc9998",
statchg_relation_row_foo="tobcp",
new_relation_row_nob="rfc14",
statchg_relation_row_nob="tohist"))
self.assertEqual(r.status_code, 302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertEqual(doc.relateddocument_set.count(),3)
def verify_relations(doc,target_name,status):
target_doc=doc.relateddocument_set.filter(target__name=target_name)
self.assertTrue(target_doc)
self.assertEqual(target_doc.count(),1)
self.assertEqual(target_doc[0].relationship.slug,status)
verify_relations(doc,'rfc9999','toexp' )
verify_relations(doc,'rfc9998','tobcp' )
verify_relations(doc,'rfc14' ,'tohist')
self.assertTrue(doc.latest_event(DocEvent,type="added_comment").desc.startswith('Affected RFC list changed.'))
def setUp(self):
super().setUp()
IndividualRfcFactory(alias2__name='rfc14',name='draft-was-never-issued',std_level_id='unkn')
WgRfcFactory(alias2__name='rfc9999',name='draft-ietf-random-thing',std_level_id='ps')
WgRfcFactory(alias2__name='rfc9998',name='draft-ietf-random-other-thing',std_level_id='inf')
DocumentFactory(type_id='statchg',name='status-change-imaginary-mid-review',notify='[email protected]')
class StatusChangeSubmitTests(TestCase):
settings_temp_path_overrides = TestCase.settings_temp_path_overrides + ['STATUS_CHANGE_PATH']
def test_initial_submission(self):
doc = Document.objects.get(name='status-change-imaginary-mid-review')
url = urlreverse('ietf.doc.views_status_change.submit',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "ad", url)
# normal get
r = self.client.get(url)
self.assertEqual(r.status_code,200)
q = PyQuery(r.content)
self.assertTrue(q('textarea')[0].text.strip().startswith("Provide a description"))
# Faulty posts using textbox
# Right now, nothing to test - we let people put whatever the web browser will let them put into that textbox
# sane post using textbox
path = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev))
self.assertEqual(doc.rev,'00')
self.assertFalse(os.path.exists(path))
r = self.client.post(url,dict(content="Some initial review text\n",submit_response="1"))
self.assertEqual(r.status_code,302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertEqual(doc.rev,'00')
with io.open(path) as f:
self.assertEqual(f.read(),"Some initial review text\n")
self.assertTrue( "mid-review-00" in doc.latest_event(NewRevisionDocEvent).desc)
def test_subsequent_submission(self):
doc = Document.objects.get(name='status-change-imaginary-mid-review')
url = urlreverse('ietf.doc.views_status_change.submit',kwargs=dict(name=doc.name))
login_testing_unauthorized(self, "ad", url)
# A little additional setup
# doc.rev is u'00' per the test setup - double-checking that here - if it fails, the breakage is in setUp
self.assertEqual(doc.rev,'00')
path = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev))
with io.open(path,'w') as f:
f.write('This is the old proposal.')
f.close()
# Put the old proposal into IESG review (exercises ballot tab when looking at an older revision below)
state_change_url = urlreverse('ietf.doc.views_status_change.change_state',kwargs=dict(name=doc.name))
iesgeval_pk = str(State.objects.get(slug='iesgeval',type__slug='statchg').pk)
r = self.client.post(state_change_url,dict(new_state=iesgeval_pk))
self.assertEqual(r.status_code, 302)
# normal get
r = self.client.get(url)
self.assertEqual(r.status_code,200)
q = PyQuery(r.content)
self.assertTrue(q('textarea')[0].text.strip().startswith("This is the old proposal."))
# faulty posts trying to use file upload
# Copied from wgtracker tests - is this really testing the server code, or is it testing
# how client.post populates Content-Type?
test_file = StringIO("\x10\x11\x12") # post binary file
test_file.name = "unnamed"
r = self.client.post(url, dict(txt=test_file,submit_response="1"))
self.assertEqual(r.status_code, 200)
self.assertContains(r, "does not appear to be a text file")
# sane post uploading a file
test_file = StringIO("This is a new proposal.")
test_file.name = "unnamed"
r = self.client.post(url,dict(txt=test_file,submit_response="1"))
self.assertEqual(r.status_code, 302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertEqual(doc.rev,'01')
path = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev))
with io.open(path) as f:
self.assertEqual(f.read(),"This is a new proposal.")
f.close()
self.assertTrue( "mid-review-01" in doc.latest_event(NewRevisionDocEvent).desc)
# verify reset text button works
r = self.client.post(url,dict(reset_text="1"))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertTrue(q('textarea')[0].text.strip().startswith("Provide a description"))
# make sure we can see the old revision
url = urlreverse('ietf.doc.views_doc.document_main',kwargs=dict(name=doc.name,rev='00'))
r = self.client.get(url)
self.assertEqual(r.status_code,200)
self.assertContains(r, "This is the old proposal.")
def setUp(self):
super().setUp()
DocumentFactory(type_id='statchg',name='status-change-imaginary-mid-review',notify='[email protected]') |
py | 1a3229f536b45d2817991346387a64d2c522192e | from __future__ import absolute_import, division, print_function
class FrozenInstanceError(AttributeError):
"""
A frozen/immutable instance has been attempted to be modified.
It mirrors the behavior of ``namedtuples`` by using the same error message
and subclassing :exc:`AttributeError`.
.. versionadded:: 16.1.0
"""
msg = "can't set attribute"
args = [msg]
class AttrsAttributeNotFoundError(ValueError):
"""
An ``attrs`` function couldn't find an attribute that the user asked for.
.. versionadded:: 16.2.0
"""
class NotAnAttrsClassError(ValueError):
"""
A non-``attrs`` class has been passed into an ``attrs`` function.
.. versionadded:: 16.2.0
"""
class DefaultAlreadySetError(RuntimeError):
"""
A default has been set using ``attr.ib()`` and is attempted to be reset
using the decorator.
.. versionadded:: 17.1.0
"""
class UnannotatedAttributeError(RuntimeError):
"""
A class with ``auto_attribs=True`` has an ``attr.ib()`` without a type
annotation.
.. versionadded:: 17.3.0
"""
|
py | 1a322a989dc65aed128b1338e090c124904a0e0b | import logging
from celery import Celery
celery = Celery('tasks', broker='amqp://')
celery.conf.update(
CELERY_RESULT_BACKEND = "amqp",
CELERY_RESULT_SERIALIZER='json',
)
@celery.task
def add(x, y):
return x + y
@celery.task
def sleep(x):
time.sleep(x)
return x
@celery.task
def time():
import time
return time.time()
@celery.task
def error(msg):
raise Exception(msg)
@celery.task
def echo(msg):
return msg
@celery.task
def send_email(to='[email protected]', title='hi'):
logging.info("Sending email to '%s' with title '%s'" % (to, title))
if __name__ == "__main__":
celery.start()
|
py | 1a322d615c3cd7f6f3af4dbc454604a044e97664 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Author : Viacheslav Zamaraev
# email : [email protected]
# Script Name : 02_csv2xlsx.py
# Created : 25th September 2019
# Last Modified : 25th September 2019
# Version : 1.0
# PIP : pip install pandas openpyxl
# RESULT : Excel File
# Modifications : 1.1 -
# : 1.2 -
#
# Description : This script will conver csv file to Excel file
import os.path
from datetime import datetime
from sys import platform as _platform
import os.path
try:
import pandas as pd
except:
print("we need pands. try: pip install pandas")
#some global configurations
import cfg
def get_output_directory():
dir_out = str(os.getcwd())
# Linux platform
if _platform == "linux" or _platform == "linux2" or _platform == "darwin":
dir_out = cfg.folder_out_linux
if os.path.exists(dir_out) and os.path.isdir(dir_out):
print('Using Output directory: ' + dir_out)
return dir_out
if _platform == "win32" or _platform == "win64": # Windows or Windows 64-bit
dir_out = cfg.folder_out_win
if os.path.exists(dir_out) and os.path.isdir(dir_out):
print('Using Output directory: ' + dir_out)
return dir_out
else:
dir_out = str(os.getcwd())
print('Output directories from config wrong: ' + cfg.folder_out_win + ' or ' + cfg.folder_out_linux + ' Using current directory: ' + dir_out)
print('Using Output directory: ' + dir_out)
return dir_out
def csv2xls(filename=''):
if (os.path.exists(filename) and os.path.isfile(filename)):
file_excel = filename.split('.')[0] + '.xlsx'
df_new = pd.read_csv(filename, sep=cfg.csv_delimiter)
writer = pd.ExcelWriter(file_excel)
df_new.to_excel(writer, index=False)
writer.save()
else:
print('ERROR! can\'t read a file OR file does not exist. File: ' + filename)
# ---------------- do main --------------------------------
def main():
time1 = datetime.now()
print('Starting at :' + str(time1))
file_csv = str(os.path.join(get_output_directory(), cfg.file_csv))
csv2xls(file_csv)
time2 = datetime.now()
print('Finishing at :' + str(time2))
print('Total time : ' + str(time2 - time1))
print('DONE !!!!')
if __name__ == '__main__':
main() |
py | 1a322d68c4d2902d2b1904e66af4c33a7b4f82f8 | # %% [markdown]
from tensorflow.keras.models import load_model
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.layers import Dense, GRU, Embedding
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.sequence import pad_sequences
import re
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras import models
from tensorflow.keras.datasets import imdb
(X_train, y_train), (X_test, y_test) = imdb.load_data()
print(X_train[0])
print(y_train[0])
# %% [markdown]
word_to_index = imdb.get_word_index()
index_to_word = {}
for key, value in word_to_index.items():
index_to_word[value + 3] = key
# %%
vocab_size = 10000
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=vocab_size)
max_len = 500
X_train = pad_sequences(sequences=X_train, maxlen=max_len)
X_test = pad_sequences(sequences=X_test, maxlen=max_len)
# %% [markdown]
model = Sequential()
model.add(Embedding(input_dim=vocab_size, output_dim=100))
model.add(GRU(units=128))
model.add(Dense(units=1, activation="sigmoid"))
es = EarlyStopping(monitor="val_loss", mode="min", verbose=1, patience=4)
mc = ModelCheckpoint(
"GRU_model.h5", monitor="val_acc", mode="max", verbose=1, save_best_only=True
)
model.compile(optimizer="rmsprop", loss="binary_crossentropy", metrics=["acc"])
history = model.fit(
X_train, y_train, epochs=15, callbacks=[es, mc], batch_size=60, validation_split=0.2
)
# %%
loaded_model = load_model("GRU_model.h5")
print("\n 테스트 정확도: %.4f" % (loaded_model.evaluate(X_test, y_test)[1]))
# %%
def sentiment_predict(new_sentence):
# 알파벳과 숫자를 제외하고 모두 제거 및 알파벳 소문자화
new_sentence = re.sub("[^0-9a-zA-Z ]", "", new_sentence).lower()
# 정수 인코딩
encoded = []
for word in new_sentence.split():
# 단어 집합의 크기를 10,000으로 제한.
try:
if word_to_index[word] <= 10000:
encoded.append(word_to_index[word] + 3)
else:
# 10,000 이상의 숫자는 <unk> 토큰으로 취급.
encoded.append(2)
# 단어 집합에 없는 단어는 <unk> 토큰으로 취급.
except KeyError:
encoded.append(2)
pad_new = pad_sequences([encoded], maxlen=max_len) # 패딩
score = float(loaded_model.predict(pad_new)) # 예측
if score > 0.5:
print("{:.2f}% 확률로 긍정 리뷰입니다.".format(score * 100))
else:
print("{:.2f}% 확률로 부정 리뷰입니다.".format((1 - score) * 100))
# %%
a = "This movie was just way too overrated. The fighting was not professional and in slow motion. I was expecting more from a 200 million budget movie. The little sister of T.Challa was just trying too hard to be funny. The story was really dumb as well. Don't watch this movie if you are going because others say its great unless you are a Black Panther fan or Marvels fan."
sentiment_predict(a)
|
py | 1a322e249514327b383d17696750417de3e363bb | #!C:\xampp\htdocs\map\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
|
py | 1a322f2f69cf146e70175351a37c9dafe1ff2964 | from pocketsphinx import *
import pyaudio
from time import sleep
import requests
import json
from configobj import ConfigObj
import os.path
import wave
#pyaudio needed to open a audio stream needed for
pyAudio = pyaudio.PyAudio()
headers = {'Content-Type':'application/json'}
config = None
commandAudioFile = None
class CommandListener:
def __init__(self):
#config
self.hmm = config['pocketsphinx']['hmm']
self.lm = config['pocketsphinx']['lm']
self.dict = config['pocketsphinx']['dict']
self.log = config['pocketsphinx']['log']
self.bitsize = int(config['audio']['bitsize'])
self.bufferSize = int(config['audio']['buffersize'])
self.channels = int(config['audio']['channels'])
self.sampleRate = int(config['audio']['samplerate'])
#set decoder configuration
self.config = Decoder.default_config()
self.config.set_string('-hmm',self.hmm)
self.config.set_string('-lm',self.lm)
self.config.set_string('-dict',self.dict)
self.config.set_string('-logfn', self.log)
self.config.set_boolean("-allphone_ci", True)
self.decoder = Decoder(self.config)
def fromAudio(self):
config = {
'hmm':self.hmm,
'lm': self.lm,
'dict':self.dict
}
ps = Pocketsphinx(**config)
ps.decode(
audio_file= commandAudioFile,
buffer_size= self.bufferSize,
no_search= False,
full_utt= False,
)
return ps.hypothesis()
def listen(self):
# get stream from pyAudio
# open stream
self.stream = pyAudio.open(format=pyaudio.paInt16, channels=self.channels, rate=self.sampleRate, input=True, frames_per_buffer=self.bitsize)
utterance = False
#start utterance
self.decoder.start_utt()
print("Listening...")
# now we are starting to listen
while True:
#check if an external command is used by the user
if os.path.isfile(commandAudioFile):
#stop the utterance
self.decoder.end_utt()
print("external command detected - Processing...")
#get the command from the audio file
commandFromAudio = self.fromAudio()
#check if a command is detected
if not commandFromAudio:
commandFromAudio = ""
print("No command found in file!")
#audio file not needed anymore
os.remove(commandAudioFile)
print("external command processed - Deleting...")
self.stream.stop_stream()
self.stream.close()
#return the command from the audio
return commandFromAudio
try:
soundBite = self.stream.read(self.bitsize)
except Exception as e:
pass
if soundBite:
self.decoder.process_raw(soundBite, False, False)
inSpeech = self.decoder.get_in_speech()
if inSpeech and not utterance:
utterance = True
if utterance:
#end utterance
self.decoder.end_utt()
utterance = False
#get hypothesis of from the decoder
hypothesis = self.decoder.hyp()
if hypothesis is not None:
bestGuess = hypothesis.hypstr
#check for empty command
if not bestGuess.strip():
#restart utterance
sleep(0.5)
self.decoder.start_utt()
else:
#stop the stream
self.stream.stop_stream()
self.stream.close()
#return the bestGuess of the decoder
return bestGuess
if __name__ == "__main__":
config = ConfigObj('pihome.conf')
#get path to audio file
commandAudioFile = config['audio']['audiofile']
#get backend url
backendUrl = config['backend']['url']
#Listener for the commands the user is speaking out
listener = CommandListener()
while True:
#listen for the next command of the user
command = listener.listen()
print("command:" + command)
#let the backend know what the user said
try:
res = requests.post(backendUrl, data=json.dumps({'command':command}), headers=headers);
except Exception as ex:
print(ex)
pass
|
py | 1a3230ea375a78639fcc80e97097ab0f4a8f8a6f | """Reader is module to read the url list and return shards"""
import pandas as pd
import math
import fsspec
class Reader:
"""
The reader class reads an url list and returns shards
It provides an iter method
It provides attributes:
- column_list: the list of columns to read
- input_format: the format of the input file
- url_col: the column name of the url
- caption_col: the column name of the caption
- save_additional_columns: the list of additional columns to save
- number_sample_per_shard: the number of samples per shard
- start_shard_id: the id of the first shard
"""
def __init__(
self,
url_list,
input_format,
url_col,
caption_col,
save_additional_columns,
number_sample_per_shard,
start_shard_id,
tmp_path,
) -> None:
self.input_format = input_format
self.url_col = url_col
self.caption_col = caption_col
self.save_additional_columns = save_additional_columns
self.number_sample_per_shard = number_sample_per_shard
self.start_shard_id = start_shard_id
fs, url_path = fsspec.core.url_to_fs(url_list)
self.fs = fs
self.tmp_path = tmp_path
if fs.isdir(url_path):
self.input_files = sorted(fs.glob(url_path + "/*." + input_format))
else:
self.input_files = [url_path]
if self.input_format == "txt":
self.column_list = ["url"]
elif self.input_format in ["json", "csv", "tsv", "tsv.gz", "parquet"]:
self.column_list = self.save_additional_columns if self.save_additional_columns is not None else []
if self.caption_col is not None:
self.column_list = self.column_list + ["caption", "url"]
else:
self.column_list = self.column_list + ["url"]
def _save_to_arrow(self, input_file):
"""Read the input file and save to arrow files in a temporary directory"""
if self.input_format in ["txt", "json", "csv", "tsv"]:
with self.fs.open(input_file, encoding="utf-8", mode="r") as file:
if self.input_format == "txt":
df = pd.DataFrame([(url.rstrip(),) for url in file.readlines()], columns=self.column_list)
elif self.input_format == "json":
df = pd.read_json(file)
elif self.input_format == "csv":
df = pd.read_csv(file)
elif self.input_format == "tsv":
df = pd.read_table(file)
elif self.input_format in ["tsv", "tsv.gz", "parquet"]:
with self.fs.open(input_file, mode="rb") as file:
if self.input_format == "tsv.gz":
df = pd.read_table(file, compression="gzip")
elif self.input_format == "parquet":
columns_to_read = [self.url_col]
if self.caption_col is not None:
columns_to_read += [self.caption_col]
if self.save_additional_columns is not None:
columns_to_read += self.save_additional_columns
df = pd.read_parquet(file, columns=columns_to_read)
else:
assert False, f"Unexpected input format ({self.input_format})."
df = df.rename(columns={self.caption_col: "caption", self.url_col: "url"})
df = df.where(pd.notnull(df), None)
number_samples = len(df)
number_shards = math.ceil(len(df) / self.number_sample_per_shard)
shards = []
for shard_id in range(number_shards):
begin_shard = shard_id * self.number_sample_per_shard
end_shard = min(number_samples, (1 + shard_id) * self.number_sample_per_shard)
df_shard = df[begin_shard:end_shard][self.column_list]
df_shard = df_shard.reset_index(drop=True)
tmp_file = self.tmp_path + f"/{shard_id + self.start_shard_id}.feather"
fs, tmp_path = fsspec.core.url_to_fs(tmp_file)
with fs.open(tmp_path, "wb") as file:
df_shard.to_feather(file)
shards.append((shard_id, tmp_file))
del df
return shards
def __iter__(self):
"""
Iterate over shards, yield shards of size number_sample_per_shard or less for the last one
Each shard is a tuple (shard_id, shard)
shard is a tuple (sample id, sample)
sample is a tuple of the columns
"""
for i, input_file in enumerate(self.input_files):
print(
"Downloading file number " + str(i + 1) + " of " + str(len(self.input_files)) + " called " + input_file
)
shards = self._save_to_arrow(input_file)
num_shard = 0
for num_shard, arrow_file in shards:
yield (
num_shard + self.start_shard_id,
arrow_file,
)
num_shard += 1
self.start_shard_id += num_shard
|
py | 1a3231287e94bb7c05181dbeb1bc6d711fa7e812 | # -*- coding: utf-8 -*-
# This file was generated
from enum import Enum
class BitOrder(Enum):
MSB = 2500
r'''
The most significant bit is first. The first bit is in the 2^n place, where n is the number of bits.
'''
LSB = 2501
r'''
The least significant bit is first. The first bit is in the 2^0 place.
'''
class DigitalEdge(Enum):
RISING = 1800
r'''
Asserts the trigger when the signal transitions from low level to high level.
'''
FALLING = 1801
r'''
Asserts the trigger when the signal transitions from high level to low level.
'''
class DriveFormat(Enum):
NR = 1500
r'''
Drive format remains at logic level after each bit.
'''
RL = 1501
r'''
Drive format returns to a logic level low after each bit.
'''
RH = 1502
r'''
Drive format returns to a logic level high after each bit.
'''
SBC = 1503
r'''
Drive format returns to the complement logic level of the bit after each bit.
'''
class FrequencyMeasurementMode(Enum):
BANKED = 3700
r'''
Frequency measurements are made serially for groups of channels associated with a single frequency counter for each group.
Maximum frequency measured: 200 MHz.
'''
PARALLEL = 3701
r'''
Frequency measurements are made by multiple frequency counters in parallel.
Maximum frequency measured: 100 MHz.
'''
class HistoryRAMCyclesToAcquire(Enum):
FAILED = 2303
r'''
Acquires failed cycles.
'''
ALL = 2304
r'''
Acquires all cycles.
'''
class HistoryRAMTriggerType(Enum):
FIRST_FAILURE = 2200
r'''
First Failure History RAM trigger
'''
CYCLE_NUMBER = 2201
r'''
Cycle Number History RAM trigger.
'''
PATTERN_LABEL = 2202
r'''
Pattern Label History RAM trigger
'''
class PPMUApertureTimeUnits(Enum):
SECONDS = 2100
r'''
Unit in seconds.
'''
class PPMUCurrentLimitBehavior(Enum):
REGULATE = 3100
r'''
Controls output current so that it does not exceed the current limit. Power continues to generate even if the current limit is reached.
'''
class PPMUMeasurementType(Enum):
CURRENT = 2400
r'''
The PPMU measures current.
'''
VOLTAGE = 2401
r'''
The PPMU measures voltage.
'''
class PPMUOutputFunction(Enum):
VOLTAGE = 1300
r'''
The PPMU forces voltage to the DUT.
'''
CURRENT = 1301
r'''
The PPMU forces current to the DUT.
'''
class PinState(Enum):
ZERO = 0
r'''
A digital state of 0.
'''
ONE = 1
r'''
A digital state of 1.
'''
L = 3
r'''
A digital state of L (low).
'''
H = 4
r'''
A digital state of H (high).
'''
X = 5
r'''
A digital state of X (non-drive state).
'''
M = 6
r'''
A digital state of M (midband).
'''
V = 7
r'''
A digital state of V (compare high or low, not midband; store results from capture functionality if configured).
'''
D = 8
r'''
A digital state of D (drive data from source functionality if configured).
'''
E = 9
r'''
A digital state of E (compare data from source functionality if configured).
'''
NOT_A_PIN_STATE = 254
r'''
Not a pin state is used for non-existent DUT cycles.
'''
PIN_STATE_NOT_ACQUIRED = 255
r'''
Pin state could not be acquired because none of the pins mapped to the instrument in a multi-instrument session had any failures.
'''
def __str__(self):
return {
'ZERO': '0',
'ONE': '1',
'NOT_A_PIN_STATE': 'Not a Pin State',
'PIN_STATE_NOT_ACQUIRED': 'Pin State Not Acquired',
}.get(self.name, self.name)
class SelectedFunction(Enum):
DIGITAL = 1100
r'''
The pattern sequencer controls the specified pin(s). If a pattern is currently bursting, the pin immediately switches to bursting the pattern. This option disconnects the PPMU.
'''
PPMU = 1101
r'''
The PPMU controls the specified pin(s) and connects the PPMU. The pin driver is in a non-drive state, and the active load is disabled. The PPMU does not start sourcing or measuring until Source or Measure(PpmuMeasurementType) is called.
'''
OFF = 1102
r'''
Puts the digital driver in a non-drive state, disables the active load, disconnects the PPMU, and closes the I/O switch connecting the instrument channel.
'''
DISCONNECT = 1103
r'''
The I/O switch connecting the instrument channel is open to the I/O connector. If the PPMU is sourcing, it is stopped prior to opening the I/O switch.
'''
RIO = 1104
r'''
Yields control of the specified pin(s) to LabVIEW FPGA.
'''
class SequencerFlag(Enum):
FLAG0 = 'seqflag0'
FLAG1 = 'seqflag1'
FLAG2 = 'seqflag2'
FLAG3 = 'seqflag3'
class SequencerRegister(Enum):
REGISTER0 = 'reg0'
REGISTER1 = 'reg1'
REGISTER2 = 'reg2'
REGISTER3 = 'reg3'
REGISTER4 = 'reg4'
REGISTER5 = 'reg5'
REGISTER6 = 'reg6'
REGISTER7 = 'reg7'
REGISTER8 = 'reg8'
REGISTER9 = 'reg9'
REGISTER10 = 'reg10'
REGISTER11 = 'reg11'
REGISTER12 = 'reg12'
REGISTER13 = 'reg13'
REGISTER14 = 'reg14'
REGISTER15 = 'reg15'
class _SiteResultType(Enum):
PASS_FAIL = 3300
r'''
Pass/fail site result.
'''
CAPTURE_WAVEFORM = 3301
r'''
Capture waveform site result.
'''
class SoftwareTrigger(Enum):
START = 2000
r'''
Overrides the start trigger.
'''
CONDITIONAL_JUMP = 2001
r'''
Specifies to route a pattern opcode event signal.
'''
class SourceDataMapping(Enum):
BROADCAST = 2600
r'''
Broadcasts the waveform you specify to all sites.
'''
SITE_UNIQUE = 2601
r'''
Sources unique waveform data to each site.
'''
class TDREndpointTermination(Enum):
OPEN = 3600
r'''
TDR channels are connected to an open circuit.
'''
SHORT_TO_GROUND = 3601
r'''
TDR channels are connected to a short to ground.
'''
class TerminationMode(Enum):
ACTIVE_LOAD = 1200
r'''
The active load provides a constant current to a commutating voltage (Vcom).
'''
VTERM = 1201
r'''
The pin driver drives Vterm.
'''
HIGH_Z = 1202
r'''
The pin driver is in a non-drive state (in a high-impedance state) and the active load is disabled.
'''
class TimeSetEdgeType(Enum):
DRIVE_ON = 2800
r'''
Specifies the drive on edge of the time set.
'''
DRIVE_DATA = 2801
r'''
Specifies the drive data edge of the time set.
'''
DRIVE_RETURN = 2802
r'''
Specifies the drive return edge of the time set.
'''
DRIVE_OFF = 2803
r'''
Specifies the drive off edge of the time set.
'''
COMPARE_STROBE = 2804
r'''
Specifies the compare strobe of the time set.
'''
DRIVE_DATA2 = 2805
r'''
Specifies the drive data 2 edge of the time set.
'''
DRIVE_RETURN2 = 2806
r'''
Specifies the drive return 2 edge of the time set.
'''
COMPARE_STROBE2 = 2807
r'''
Specifies the compare strobe 2 of the time set.
'''
class TriggerType(Enum):
NONE = 1700
r'''
Disables the start trigger.
'''
DIGITAL_EDGE = 1701
r'''
Digital edge trigger.
'''
SOFTWARE = 1702
r'''
Software start trigger.
'''
class WriteStaticPinState(Enum):
ZERO = 0
r'''
Specifies to drive low.
'''
ONE = 1
r'''
Specifies to drive high.
'''
X = 5
r'''
Specifies to not drive.
'''
def __str__(self):
return {
'ZERO': '0',
'ONE': '1',
}.get(self.name, self.name)
|
py | 1a3231713a8e7d901835cddb80a801eb9d7a5aae | """
Demonstrates the use of the ProgressMeter class.
Author: Tucker Beck
Last Tested: 3/2/2009
Verified with: Python 2.6, Tkinter 8.4
"""
from __future__ import division
from Tkinter import *
from random import randint
from time import sleep
class ProgressMeter( Frame ):
"""
The ProgressMeter is-a Frame widget provides a progress bar and
accompanying information to a user regarding a long, computationaly
intensive process. A ProgressMetar can control any generator function
that returns string message or None after each iteration. Furthermore,
the ProgressMeter can interrupt the process at any time.
"""
def __init__( self, parent, height=30 ):
"""
Initializes this ProgressMeter
Arguments:
parent: The master widget for this ProgressMeter
height: The desired height of the progress bar
"""
self.parent = parent
Frame.__init__( self, parent )
self.columnconfigure( 0, weight=1 ) # Forces the canv object to resize any time this widget is resized
self.rowconfigure( 0, weight=1 )
self.statusMessage = 'Normal'
self.w = 0
self.h = 0
self.canv = Canvas( self, height=height) # This canvas will display the progress bar and accompanying percentage text
self.canv.grid( row=1, column=0, sticky=N+S+E+W )
self.canv.bind( '<Configure>', lambda e:
self.resize( e.width, e.height ) ) # When the canvas is resized the progress bar should be redrawn.
self.killVar = IntVar() # The killBtn can cancel execution
self.killVar.set( 0 )
self.killBtn = Button( self, text='Cancel',
command=lambda: self.killVar.set(1) )
self.killBtn.configure( state=DISABLED )
self.killBtn.grid( row=1, column=1 )
self.targetGen = None # Placekeeper for the generator function that will be metered
self.targetArgs = [] # Argument list for the generator function
self.targetKwds = {} # Keyword dictionary for the generator funciton
self.targetIdx = 0 # Keeps track of which step in iteration is currently being executed
self.targetLen = 0 # Total number of steps in exectuion
def resize( self, w, h ):
"""
Handles resize events for the canv widget. Adjusts the height and width
of the canvas for the progress bar calculations.
Arguments:
w: The new width
h: The new height
"""
self.w = w
self.h = h
self.canv.delete( 'frame' )
self.canv.create_rectangle( 1, 1, self.w, self.h, outline='black',
fill='gray75', tag='frame' )
def reset( self ):
"""
Resets the control values or the generator function and also clears the
progress bar
"""
self.canv.delete( 'bar' )
self.canv.delete( 'text' )
self.killBtn.configure( state=DISABLED )
self.targetGen = None
self.targetArgs = []
self.targetKwds = []
self.killVar.set( 0 )
self.targetIdx = 0
self.targetLen = 0
def clearStatus( self ):
""""
Clears the statusMessage member. Might be used by parent GUI that
reports child status.
"""
self.statusMessage = 'Normal'
def drawBar( self ):
"""
Updates the status bar for the percentage of completion.
"""
pct = self.targetIdx / self.targetLen # The percentage of completion
x0 = 2 # The bar is inset by 2 pixels
x1 = pct * ( self.w - 3 ) + 2
y0 = 2
y1 = self.h
self.canv.delete( 'bar' )
self.canv.create_rectangle( x0, y0, x1, y1, fill='SteelBlue3',
outline='', tag='bar' )
self.canv.delete( 'text' )
pctTxt = '%02.2f%%' % ( pct*100, )
self.canv.create_text( self.w/2, self.h/2, text=pctTxt,
anchor=CENTER, tag='text' )
def startGen( self, targetGen, targetLen, targetArgs=[], targetKwds={} ):
"""
Initializes the target generator function with supplied arguments and
keyword. Requests Tk to call iterGen after all idle events have been
handled.
Arguments:
targetGen: The target generator function
targetLen: The number of iterations in the target generator
targetArgs: The arguments for the generator function
targetKwds: The keyword arguments fo the generator function
Note:
Having iterGen called by Tk ensures that redraws and other sorts of
normal Tkinter events can be processed. Results in the status bar
updating real-time with execution while allowing the GUI to function
normally.
"""
self.targetGen = targetGen( *targetArgs, **targetKwds )
self.targetLen = targetLen
self.killBtn.configure( state=NORMAL )
self.after_idle( self.iterGen )
def iterGen( self ):
"""
Iterates through the target generator using delayed self referencing
funcition calls to allow GUI updates between iterations
"""
try:
msg = self.targetGen.next() # Execute the next iteration of the genrator
except StopIteration:
self.reset() # When the generator is finished, a StopIteration exception is raised. This signals a normal finish in the generator
self.statusMessage = 'Completed'
self.event_generate( '<<Finished>>' ) # A <<Finished>> virtual event signals the GUI that the progress meter is finished
return
self.targetIdx += 1
self.drawBar()
if msg == None:
pass
elif msg.startswith( 'AbortIteration' ): # The target generator can signal that something irrevocable has happend by yielding a value of 'AbortIteration'
self.reset()
self.statusMessage = msg
self.event_generate( '<<Finished>>' )
return
else:
self.statusMessage = msg # If the generator yields a value other than None or 'AbortIteration', this message will be sent out to the controlling gui
self.event_generate( '<<StatusRequest>>' )
if self.killVar.get() == 1: # Occurs if the user clicks the killBtn
self.reset()
self.statusMessage = 'Canceled'
self.event_generate( '<<Finished>>' )
return
self.update_idletasks()
self.after_idle( self.iterGen )
def dummy_gen( alices, bobs ):
"""
A simple, stupid example of a ProgressMeter iterable generator function
"""
for alice in alices:
for bob in bobs:
if bob==alice:
yield 'Match: %s==%s' % ( str(alice), str(bob) )
else:
yield 'No Match: %s!=%s' % ( str(alice), str(bob) )
def main():
root = Tk()
root.title( 'ProgressMeter Demo' )
pgress = ProgressMeter( root ) # Initialize the ProgressMeter with default arguments
pgress.grid( row=1 )
alices = range( 53 )
bobs = [ randint( 0,53 ) for i in range( 53 ) ]
btn = Button( root, text="Go!", command=lambda:
pgress.startGen( dummy_gen, len(alices) * len(bobs), [alices, bobs] ) )# Starts the ProgressMeter going when the button is clicked
btn.grid( row=0 )
statusVar = StringVar( root, 'None' )
status = Label( root, textvariable=statusVar )
status.grid( row=2 ) # This label will be used to display status messages from the ProgressMeter
root.bind( '<<StatusRequest>>', lambda event:
statusVar.set(pgress.statusMessage) )
root.bind( '<<Finished>>', lambda event:
statusVar.set( pgress.statusMessage ) )
root.mainloop()
if __name__=='__main__':
main()
|
py | 1a32321551c7c44d4e570a2d10fc2af68987e01a | from bs4 import BeautifulSoup
import requests
def get_articles_from_page_data(page, depth = 1):
base_url = page.url.replace("http://","").replace("www.","")
print(base_url)
soup = BeautifulSoup(page.content(), 'html.parser')
url_strings = [link.get('href') for link in soup.find_all('a')]
internal_url_strings = [link for link in url_strings if possible_article_link(link,base_url)]
return internal_url_strings
def possible_article_link(url, base_url):
is_part_of_site = base_url in url or './' in url
ends_as_webpage = ".htm" in url
not_an_index_page = "index.html" not in url
return is_part_of_site and ends_as_webpage and not_an_index_page
|
py | 1a3232564d25075d72503ae7b7228bc64417883f | """
Test for management command generating exchange rates
"""
from unittest.mock import patch
from django.test import TestCase
from django.test.utils import override_settings
from financialaid.constants import get_currency_exchange_rate_api_request_url
from financialaid.management.commands import update_exchange_rates
from financialaid.models import CurrencyExchangeRate
@patch('financialaid.tasks.requests.get')
class GenerateExchangeRatesTest(TestCase):
"""
Tests for generate_exchange_rates management command
"""
@classmethod
def setUpTestData(cls):
cls.command = update_exchange_rates.Command()
def setUp(self):
super(GenerateExchangeRatesTest, self).setUp()
self.data = {
"extraneous information": "blah blah blah",
"rates": {
"CBA": "3.5",
"FED": "1.9",
"RQP": "0.5"
}
}
@override_settings(OPEN_EXCHANGE_RATES_APP_ID='foo_id', OPEN_EXCHANGE_RATES_URL='http://foo.bar.com')
def test_currency_exchange_rate_command(self, mocked_request):
"""
Assert currency exchange rates are created using management command
"""
mocked_request.return_value.json.return_value = self.data
mocked_request.return_value.status_code = 200
assert CurrencyExchangeRate.objects.count() == 0
self.command.handle("generate_exchange_rates")
called_args, _ = mocked_request.call_args
assert called_args[0] == get_currency_exchange_rate_api_request_url()
assert CurrencyExchangeRate.objects.count() == 3
currency_cba = CurrencyExchangeRate.objects.get(currency_code="CBA")
assert currency_cba.exchange_rate == 3.5
currency_fed = CurrencyExchangeRate.objects.get(currency_code="FED")
assert currency_fed.exchange_rate == 1.9
currency_rqp = CurrencyExchangeRate.objects.get(currency_code="RQP")
assert currency_rqp.exchange_rate == 0.5
|
py | 1a3232723908fc855045a3607bc9137c6b489fe5 | import torch
from torch import nn
import torch.optim as optim
import torch.multiprocessing as mp
import numpy as np
import time
class MPManager(object):
def __init__(self, num_workers):
"""
manage a single-instruction-multiple-data (SIMD) scheme
:param int num_workers: The number of processors to run.
"""
mp.set_start_method('spawn')
# Counting the current batch size
self.num_workers = num_workers
# A pool of processes
self.pool = mp.Pool(processes=num_workers)
def run(self, function, arguments):
"""
:param function : the instruction
:param arguments : list of things processors loop over
can be anything the function works on, e.g. model + data
"""
output_and_grads = self.pool.map(function, arguments)
return output_and_grads
|
py | 1a323362be31c5055b2901813379e974351f0ad9 | import os
from setuptools import setup, find_packages
from relationships import VERSION
f = open(os.path.join(os.path.dirname(__file__), 'README.rst'))
readme = f.read()
f.close()
setup(
name='django-relationships',
version=".".join(map(str, VERSION)),
description='descriptive relationships between auth.User',
long_description=readme,
author='Charles Leifer',
author_email='[email protected]',
url='http://github.com/coleifer/django-relationships/tree/master',
packages=find_packages(),
package_data={
'relationships': [
'fixtures/*.json',
'templates/*.html',
'templates/*/*.html',
'locale/*/LC_MESSAGES/*',
'relationships_tests/fixtures/*.json',
],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
test_suite='runtests.runtests',
)
|
py | 1a323430c58c43206ad10debea0db7d063b08bbf | # Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2014-2015 The Dash developers
# Copyright (c) 2015-2017 The PIVX developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "praxis.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
praxisd and praxis-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run praxisd:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "praxisd"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("BITCOINCLI", "praxis-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:[email protected]:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in praxis.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a praxisd and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("BITCOIND", "praxisd"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("BITCOINCLI", "praxis-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple praxisds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
|
py | 1a3234aad66c3a980406340d04e02d1ba60ad69b | # Generated by Django 3.2.4 on 2021-07-07 18:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('leads', '0003_auto_20210707_1055'),
]
operations = [
migrations.AddField(
model_name='lead',
name='organisation',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='leads.userprofile'),
preserve_default=False,
),
migrations.AlterField(
model_name='lead',
name='agent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='leads.agent'),
),
]
|
py | 1a3234fece70455b233c6602760821d425ddb3ed | if __name__ == "__main__":
from tests.block_tools import BlockTools, test_constants
from cannabis.util.default_root import DEFAULT_ROOT_PATH
# TODO: mariano: fix this with new consensus
bt = BlockTools(root_path=DEFAULT_ROOT_PATH)
new_genesis_block = bt.create_genesis_block(test_constants, b"0")
print(bytes(new_genesis_block))
|
py | 1a3235d2aed1fa6068665f5bce4ca621fe6f713b | import json
from kubernetes import client
from django.conf import settings
from libs.paths.data_paths import get_data_paths
from libs.paths.jobs import get_job_logs_path, get_job_outputs_path
from libs.utils import get_list
from polyaxon_k8s import constants as k8s_constants
from polyaxon_schemas.exceptions import PolyaxonConfigurationError
from polyaxon_schemas.utils import to_list
from scheduler.spawners.templates import constants
from scheduler.spawners.templates.env_vars import (
get_env_var,
get_job_env_vars,
get_resources_env_vars
)
from scheduler.spawners.templates.gpu_volumes import get_gpu_volumes_def
from scheduler.spawners.templates.init_containers import InitCommands, get_output_args
from scheduler.spawners.templates.node_selectors import get_node_selector
from scheduler.spawners.templates.resources import get_resources
from scheduler.spawners.templates.sidecars import get_sidecar_args, get_sidecar_container
from scheduler.spawners.templates.volumes import get_pod_outputs_volume
class PodManager(object):
def __init__(self,
namespace,
name,
project_name,
project_uuid,
job_name,
job_uuid,
job_docker_image,
job_container_name=None,
sidecar_container_name=None,
sidecar_docker_image=None,
init_container_name=None,
init_docker_image=None,
role_label=None,
type_label=None,
ports=None,
use_sidecar=False,
sidecar_config=None,
log_level=None):
self.namespace = namespace
self.name = name
self.project_name = project_name
self.project_uuid = project_uuid
self.job_name = job_name
self.job_uuid = job_uuid
self.job_container_name = job_container_name or settings.CONTAINER_NAME_JOB
self.job_docker_image = job_docker_image
self.sidecar_container_name = sidecar_container_name or settings.CONTAINER_NAME_SIDECAR
self.sidecar_docker_image = sidecar_docker_image or settings.JOB_SIDECAR_DOCKER_IMAGE
self.init_container_name = init_container_name or settings.CONTAINER_NAME_INIT
self.init_docker_image = init_docker_image or settings.JOB_INIT_DOCKER_IMAGE
self.role_label = role_label or settings.ROLE_LABELS_WORKER
self.type_label = type_label or settings.TYPE_LABELS_EXPERIMENT
self.app_label = settings.APP_LABELS_JOB
self.labels = self.get_labels()
self.k8s_job_name = self.get_k8s_job_name()
self.ports = to_list(ports) if ports else []
self.use_sidecar = use_sidecar
if use_sidecar and not sidecar_config:
raise PolyaxonConfigurationError(
'In order to use a `sidecar_config` is required. '
'The `sidecar_config` must correspond to the sidecar docker image used.')
self.sidecar_config = sidecar_config
self.log_level = log_level
def get_k8s_job_name(self):
return constants.JOB_NAME.format(name=self.name, job_uuid=self.job_uuid)
def get_labels(self):
labels = {
'project_name': self.project_name,
'project_uuid': self.project_uuid,
'job_name': self.job_name,
'job_uuid': self.job_uuid,
'role': self.role_label,
'type': self.type_label,
'app': self.app_label
}
return labels
def get_pod_container(self,
volume_mounts,
persistence_outputs,
persistence_data,
outputs_refs_jobs=None,
outputs_refs_experiments=None,
env_vars=None,
command=None,
args=None,
resources=None):
"""Pod job container for task."""
env_vars = get_list(env_vars)
env_vars += get_job_env_vars(
log_level=self.log_level,
outputs_path=get_job_outputs_path(persistence_outputs=persistence_outputs,
job_name=self.job_name),
data_paths=get_data_paths(persistence_data),
logs_path=get_job_logs_path(job_name=self.job_name),
outputs_refs_jobs=outputs_refs_jobs,
outputs_refs_experiments=outputs_refs_experiments
)
env_vars += [
get_env_var(name=constants.CONFIG_MAP_JOB_INFO_KEY_NAME, value=json.dumps(self.labels)),
]
env_vars += get_resources_env_vars(resources=resources)
ports = [client.V1ContainerPort(container_port=port) for port in self.ports]
return client.V1Container(name=self.job_container_name,
image=self.job_docker_image,
command=command,
args=args,
ports=ports or None,
env=env_vars,
resources=get_resources(resources),
volume_mounts=volume_mounts)
def get_sidecar_container(self):
"""Pod sidecar container for job logs."""
return get_sidecar_container(
job_name=self.k8s_job_name,
job_container_name=self.job_container_name,
sidecar_container_name=self.sidecar_container_name,
sidecar_docker_image=self.sidecar_docker_image,
namespace=self.namespace,
app_label=self.app_label,
sidecar_config=self.sidecar_config,
sidecar_args=get_sidecar_args(pod_id=self.k8s_job_name))
def get_init_container(self, persistence_outputs):
"""Pod init container for setting outputs path."""
outputs_path = get_job_outputs_path(persistence_outputs=persistence_outputs,
job_name=self.job_name)
_, outputs_volume_mount = get_pod_outputs_volume(persistence_outputs=persistence_outputs)
return client.V1Container(
name=self.init_container_name,
image=self.init_docker_image,
command=["/bin/sh", "-c"],
args=to_list(get_output_args(command=InitCommands.CREATE,
outputs_path=outputs_path)),
volume_mounts=outputs_volume_mount)
def get_task_pod_spec(self,
volume_mounts,
volumes,
persistence_outputs=None,
persistence_data=None,
outputs_refs_jobs=None,
outputs_refs_experiments=None,
env_vars=None,
command=None,
args=None,
resources=None,
node_selector=None,
restart_policy='OnFailure'):
"""Pod spec to be used to create pods for tasks: master, worker, ps."""
volume_mounts = get_list(volume_mounts)
volumes = get_list(volumes)
gpu_volume_mounts, gpu_volumes = get_gpu_volumes_def(resources)
volume_mounts += gpu_volume_mounts
volumes += gpu_volumes
pod_container = self.get_pod_container(volume_mounts=volume_mounts,
persistence_outputs=persistence_outputs,
persistence_data=persistence_data,
outputs_refs_jobs=outputs_refs_jobs,
outputs_refs_experiments=outputs_refs_experiments,
env_vars=env_vars,
command=command,
args=args,
resources=resources)
containers = [pod_container]
if self.use_sidecar:
sidecar_container = self.get_sidecar_container()
containers.append(sidecar_container)
node_selector = get_node_selector(
node_selector=node_selector,
default_node_selector=settings.NODE_SELECTORS_JOBS)
service_account_name = None
if settings.K8S_RBAC_ENABLED:
service_account_name = settings.K8S_SERVICE_ACCOUNT_NAME
return client.V1PodSpec(
restart_policy=restart_policy,
service_account_name=service_account_name,
init_containers=to_list(self.get_init_container(persistence_outputs)),
containers=containers,
volumes=volumes,
node_selector=node_selector)
def get_pod(self,
volume_mounts,
volumes,
persistence_outputs=None,
persistence_data=None,
outputs_refs_jobs=None,
outputs_refs_experiments=None,
env_vars=None,
command=None,
args=None,
resources=None,
node_selector=None,
restart_policy=None):
metadata = client.V1ObjectMeta(name=self.k8s_job_name,
labels=self.labels,
namespace=self.namespace)
pod_spec = self.get_task_pod_spec(
volume_mounts=volume_mounts,
volumes=volumes,
persistence_outputs=persistence_outputs,
persistence_data=persistence_data,
outputs_refs_jobs=outputs_refs_jobs,
outputs_refs_experiments=outputs_refs_experiments,
env_vars=env_vars,
command=command,
args=args,
resources=resources,
node_selector=node_selector,
restart_policy=restart_policy)
return client.V1Pod(api_version=k8s_constants.K8S_API_VERSION_V1,
kind=k8s_constants.K8S_POD_KIND,
metadata=metadata,
spec=pod_spec)
|
py | 1a32360fb1b2323fa18b8e05e667b96147c57ede | from django.contrib import admin
from .album import Album
from .description import Description # noqa
from .photo import Photo
from .place import Place
from .user import User
from .userprofile import UserProfile
admin.site.register(Album)
admin.site.register(Photo)
admin.site.register(Place)
admin.site.register(UserProfile)
admin.site.register(User)
|
py | 1a32365f5a12e799dfc91a282c08fc9efe186e27 | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.1, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_1 import models
class AdminApiTokenGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'continuation_token': 'str',
'total_item_count': 'int',
'items': 'list[AdminApiToken]'
}
attribute_map = {
'continuation_token': 'continuation_token',
'total_item_count': 'total_item_count',
'items': 'items'
}
required_args = {
}
def __init__(
self,
continuation_token=None, # type: str
total_item_count=None, # type: int
items=None, # type: List[models.AdminApiToken]
):
"""
Keyword args:
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the `continuation_token` to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The `continuation_token` is generated if the `limit` is less than the remaining number of items, and the default sort is used (no sort is specified).
total_item_count (int): Total number of items after applying `filter` params.
items (list[AdminApiToken]): A list of administrator API tokens.
"""
if continuation_token is not None:
self.continuation_token = continuation_token
if total_item_count is not None:
self.total_item_count = total_item_count
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `AdminApiTokenGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AdminApiTokenGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AdminApiTokenGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a32368d8cd781a33777b3291861d0e9f1a8f6c6 | # -*- coding: utf-8 -*-
"""
This package contains the elements for interpreting product data.
"""
def open(*args, **kwargs):
from .converter import open_product
return open_product(*args, **kwargs)
|
py | 1a3236db254ad363772bbaf74f14ac0fcf175e7a | """
This file offers the methods to automatically retrieve the graph Sodalis glossinidius.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def SodalisGlossinidius(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Sodalis glossinidius graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Sodalis glossinidius graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="SodalisGlossinidius",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
py | 1a32384810de2ba162e636a636c818183abdda16 | # (c) 2019-2020 Mikhail Paulyshka
# SPDX-License-Identifier: MIT
import logging
import json
import os
import random
import string
import sys
import pprint
import threading
from urllib.parse import parse_qs
from typing import Dict, List
import common.mglx_http
from .gw2_constants import GW2AuthorizationResult
class GW2API(object):
API_DOMAIN = 'https://api.guildwars2.com'
API_URL_ACHIEVEMENTS = '/v2/achievements'
API_URL_ACCOUNT = '/v2/account'
API_URL_ACCOUNT_ACHIVEMENTS = '/v2/account/achievements'
LOCALSERVER_HOST = '127.0.0.1'
LOCALSERVER_PORT = 13338
RETRIES_COUNT = 5
def __init__(self, plugin_version):
self.__http = common.mglx_http.MglxHttp(user_agent='gog_gw2/%s' % plugin_version, verify_ssl=False)
self.__logger = logging.getLogger('gw2_api')
self._api_key = None
self._account_info = None
async def shutdown(self):
await self.__http.shutdown()
#
# Getters
#
def get_api_key(self) -> str:
return self._api_key
def get_account_id(self) -> str:
if self._account_info is None:
self.__logger.error('get_account_id: account info is None', exc_info=True)
return None
return self._account_info['id']
def get_account_name(self) -> str:
if self._account_info is None:
self.__logger.error('get_account_name: account info is None', exc_info=True)
return None
return self._account_info['name']
def get_owned_games(self) -> List[str]:
if self._account_info is None:
self.__logger.error('get_owned_games: account info is None', exc_info=True)
return list()
return self._account_info['access']
def get_account_age(self) -> int:
if self._account_info is None:
self.__logger.error('get_account_age: account info is None', exc_info=True)
return None
if 'age' not in self._account_info:
return 0
return self._account_info['age']
async def get_account_achievements(self) -> List[int]:
result = list()
if not self._api_key:
self.__logger.error('get_account_achievements: api_key is None', exc_info=True)
return result
(status, achievements_account) = await self.__api_get_response(self._api_key, self.API_URL_ACCOUNT_ACHIVEMENTS)
if status != 200:
self.__logger.warn('get_account_achievements: failed to get achievements %s' % status)
return result
for achievement in achievements_account:
if achievement['done'] == True:
result.append(achievement['id'])
return result
#
# Authorization server
#
async def do_auth_apikey(self, api_key : str) -> GW2AuthorizationResult:
self._api_key = None
self._account_info = None
if not api_key:
self.__logger.warn('do_auth_apikey: api_key is is None')
return GW2AuthorizationResult.FAILED
(status_code, account_info) = await self.__api_get_response(api_key, self.API_URL_ACCOUNT)
if status_code != 200:
if (account_info is not None) and ('text' in account_info):
if account_info['text'] == 'Invalid access token':
return GW2AuthorizationResult.FAILED_INVALID_TOKEN
elif account_info['text'] == 'invalid key':
return GW2AuthorizationResult.FAILED_INVALID_KEY
elif account_info['text'] == 'no game account':
return GW2AuthorizationResult.FAILED_NO_ACCOUNT
elif account_info['text'] == 'ErrBadData':
return GW2AuthorizationResult.FAILED_BAD_DATA
elif account_info['text'] == 'ErrTimeout':
return GW2AuthorizationResult.FAILED_TIMEOUT
else:
self.__logger.error('do_auth_apikey: unknown error description %s, %s' % (status_code, account_info))
self.__logger.warn('do_auth_apikey: %s, %s' % (status_code, account_info))
return GW2AuthorizationResult.FAILED
if account_info is None:
self.__logger.warn('do_auth_apikey: account info is None')
return GW2AuthorizationResult.FAILED
self._api_key = api_key
self._account_info = account_info
return GW2AuthorizationResult.FINISHED
async def __api_get_response(self, api_key, url, parameters = None):
result = None
#update authorization cookie
self.__http.update_headers({'Authorization': 'Bearer ' + api_key})
#make request
retries = self.RETRIES_COUNT
while retries > 0:
#decrement remaining retries counter
retries = retries - 1
#send request
resp = None
try:
resp = await self.__http.request_get(self.API_DOMAIN+url, params=parameters)
except Exception:
self.__logger.exception('__api_get_response: failed to perform GET request for url %s' % url)
return (0, None)
#log response status
if resp.status == 400:
self.__logger.warning('__api_get_response: TIMEOUT for url %s' % url)
elif resp.status == 404:
self.__logger.error('__api_get_response: NOT FOUND for url %s' % url)
elif resp.status == 502:
self.__logger.warning('__api_get_response: BAD GATEWAY for url %s' % url)
elif resp.status == 504:
self.__logger.warning('__api_get_response: GATEWAY TIMEOUT for url %s' % url)
elif (resp.status == 200) and (resp.text is not None):
try:
result = json.loads(resp.text)
except Exception:
self.__logger.exception('__api_get_response: failed to parse response, url=%s, status=%s, text=%s' % (url, resp.status, resp.text))
else:
self.__logger.error('__api_get_response: unknown error, url=%s, status=%s, text=%s' % (url, resp.status, resp.text))
return (resp.status, result)
|
gyp | 1a32388775886c969940efb67e54979d99bc2824 | {
'variables': {
'conditions': [
['OS == "win"', {
'asm_format': 'peobj',
'lj_vm': '<(INTERMEDIATE_DIR)/luajit/src/lj_vm.obj',
}],
['OS == "mac"', {
'asm_format': 'machasm',
'lj_vm': '<(INTERMEDIATE_DIR)/luajit/src/lj_vm.s',
}],
['OS == "linux" or OS == "freebsd"', {
'asm_format': 'elfasm',
'lj_vm': '<(INTERMEDIATE_DIR)/luajit/src/lj_vm.s',
}]
]
},
'target_defaults': {
'defines': [
'LUAJIT_ENABLE_LUA52COMPAT',
'LUA_USE_APICHECK',
],
'conditions': [
['target_arch=="x64"', {
'defines': [
'LUAJIT_TARGET=LUAJIT_ARCH_x64',
],
}],
['target_arch=="ia32"', {
'defines': [
'LUAJIT_TARGET=LUAJIT_ARCH_x86',
],
}],
['OS != "win"', {
'defines': [
'_LARGEFILE_SOURCE',
'_FILE_OFFSET_BITS=64',
'_GNU_SOURCE',
],
}],
['OS == "win"', {
'defines': [
'LUA_BUILD_AS_DLL',
],
}],
['OS=="solaris"', {
'cflags': ['-pthreads'],
'ldflags': ['-pthreads'],
}],
['OS=="freebsd"', {
'cflags': ['-pthread'],
'ldflags': ['-pthread'],
}],
],
},
'targets': [
{
'target_name': 'luajit',
'type': 'executable',
'dependencies': [
'buildvm',
'libluajit',
'luajit-datafiles',
],
'conditions': [
['OS == "linux"', { 'libraries': ['-ldl'] }, ],
],
'sources': [
'luajit/src/luajit.c',
]
},
{
'target_name': 'luajit-datafiles',
'type': 'none',
'copies': [
{
'destination': '<(PRODUCT_DIR)/lua/jit',
'files': [
'../deps/luajit/lib/bc.lua',
'../deps/luajit/lib/bcsave.lua',
'../deps/luajit/lib/dis_arm.lua',
'../deps/luajit/lib/dis_ppc.lua',
'../deps/luajit/lib/dis_x86.lua',
'../deps/luajit/lib/dis_x64.lua',
'../deps/luajit/lib/dump.lua',
'../deps/luajit/lib/v.lua',
]
}],
},
{
'target_name': 'libluajit',
'conditions': [
['OS == "win"', { 'type': 'shared_library' },
{ 'type': 'static_library' } ],
],
'dependencies': [
'buildvm',
],
'variables': {
'lj_sources': [
'luajit/src/lib_base.c',
'luajit/src/lib_math.c',
'luajit/src/lib_bit.c',
'luajit/src/lib_string.c',
'luajit/src/lib_table.c',
'luajit/src/lib_io.c',
'luajit/src/lib_os.c',
'luajit/src/lib_package.c',
'luajit/src/lib_debug.c',
'luajit/src/lib_jit.c',
'luajit/src/lib_ffi.c',
]
},
'include_dirs': [
'<(INTERMEDIATE_DIR)',
'luajit/src',
],
'direct_dependent_settings': {
'include_dirs': [
'<(INTERMEDIATE_DIR)',
'luajit/src',
]
},
'sources': [
'<(lj_vm)',
'luajit/src/lib_aux.c',
'luajit/src/lib_init.c',
'luajit/src/lib_base.c',
'luajit/src/lib_math.c',
'luajit/src/lib_string.c',
'luajit/src/lib_table.c',
'luajit/src/lib_io.c',
'luajit/src/lib_os.c',
'luajit/src/lib_package.c',
'luajit/src/lib_debug.c',
'luajit/src/lib_bit.c',
'luajit/src/lib_jit.c',
'luajit/src/lib_ffi.c',
'luajit/src/lj_gc.c',
'luajit/src/lj_alloc.c',
'luajit/src/lj_api.c',
'luajit/src/lj_asm.c',
'luajit/src/lj_bc.c',
'luajit/src/lj_bcread.c',
'luajit/src/lj_bcwrite.c',
'luajit/src/lj_carith.c',
'luajit/src/lj_ccall.c',
'luajit/src/lj_ccallback.c',
'luajit/src/lj_cconv.c',
'luajit/src/lj_cdata.c',
'luajit/src/lj_char.c',
'luajit/src/lj_clib.c',
'luajit/src/lj_cparse.c',
'luajit/src/lj_crecord.c',
'luajit/src/lj_ctype.c',
'luajit/src/lj_debug.c',
'luajit/src/lj_dispatch.c',
'luajit/src/lj_err.c',
'luajit/src/lj_ffrecord.c',
'luajit/src/lj_func.c',
'luajit/src/lj_gdbjit.c',
'luajit/src/lj_ir.c',
'luajit/src/lj_lex.c',
'luajit/src/lj_lib.c',
'luajit/src/lj_mcode.c',
'luajit/src/lj_meta.c',
'luajit/src/lj_obj.c',
'luajit/src/lj_opt_dce.c',
'luajit/src/lj_opt_fold.c',
'luajit/src/lj_opt_loop.c',
'luajit/src/lj_opt_mem.c',
'luajit/src/lj_opt_narrow.c',
'luajit/src/lj_opt_split.c',
'luajit/src/lj_parse.c',
'luajit/src/lj_record.c',
'luajit/src/lj_snap.c',
'luajit/src/lj_state.c',
'luajit/src/lj_str.c',
'luajit/src/lj_tab.c',
'luajit/src/lj_trace.c',
'luajit/src/lj_udata.c',
'luajit/src/lj_vmevent.c',
'luajit/src/lj_vmmath.c',
'<(INTERMEDIATE_DIR)/lj_libdef.h',
'<(INTERMEDIATE_DIR)/lj_recdef.h',
'<(INTERMEDIATE_DIR)/lj_folddef.h',
'<(INTERMEDIATE_DIR)/lj_vmdef.h',
'<(INTERMEDIATE_DIR)/lj_ffdef.h',
'<(INTERMEDIATE_DIR)/lj_bcdef.h',
],
'actions': [
{
'action_name': 'generate_lj_libdef',
'outputs': ['<(INTERMEDIATE_DIR)/lj_libdef.h'],
'inputs': [ '<(PRODUCT_DIR)/buildvm' ],
'action': [
'<(PRODUCT_DIR)/buildvm', '-m', 'libdef', '-o', '<(INTERMEDIATE_DIR)/lj_libdef.h', '<@(lj_sources)'
]
},
{
'action_name': 'generate_lj_recdef',
'outputs': ['<(INTERMEDIATE_DIR)/lj_recdef.h'],
'inputs': [ '<(PRODUCT_DIR)/buildvm' ],
'action': [
'<(PRODUCT_DIR)/buildvm', '-m', 'recdef', '-o', '<(INTERMEDIATE_DIR)/lj_recdef.h', '<@(lj_sources)'
]
},
{
'action_name': 'generate_lj_folddef',
'outputs': ['<(INTERMEDIATE_DIR)/lj_folddef.h'],
'inputs': [ '<(PRODUCT_DIR)/buildvm' ],
'action': [
'<(PRODUCT_DIR)/buildvm', '-m', 'folddef', '-o', '<(INTERMEDIATE_DIR)/lj_folddef.h', 'luajit/src/lj_opt_fold.c'
]
},
{
'action_name': 'generate_lj_vmdef',
'outputs': ['<(INTERMEDIATE_DIR)/vmdef.lua'],
'inputs': [ '<(PRODUCT_DIR)/buildvm' ],
'action': [
'<(PRODUCT_DIR)/buildvm', '-m', 'vmdef', '-o', '<(INTERMEDIATE_DIR)/vmdef.lua', '<@(lj_sources)'
]
},
{
'action_name': 'generate_lj_ffdef',
'outputs': ['<(INTERMEDIATE_DIR)/lj_ffdef.h'],
'inputs': [ '<(PRODUCT_DIR)/buildvm' ],
'action': [
'<(PRODUCT_DIR)/buildvm', '-m', 'ffdef', '-o', '<(INTERMEDIATE_DIR)/lj_ffdef.h', '<@(lj_sources)'
]
},
{
'action_name': 'generate_lj_bcdef',
'outputs': ['<(INTERMEDIATE_DIR)/lj_bcdef.h'],
'inputs': [ '<(PRODUCT_DIR)/buildvm' ],
'action': [
'<(PRODUCT_DIR)/buildvm', '-m', 'bcdef', '-o', '<(INTERMEDIATE_DIR)/lj_bcdef.h', '<@(lj_sources)'
]
},
{
'action_name': 'generate_lj_vm',
'outputs': ['<(lj_vm)'],
'inputs': [ '<(PRODUCT_DIR)/buildvm' ],
'action': [
'<(PRODUCT_DIR)/buildvm', '-m', '<(asm_format)', '-o', '<(lj_vm)'
]
}
],
},
{
'target_name': 'buildvm',
'type': 'executable',
'sources': [
'luajit/src/buildvm.c',
'luajit/src/buildvm_asm.c',
'luajit/src/buildvm_peobj.c',
'luajit/src/buildvm_lib.c',
'luajit/src/buildvm_fold.c',
],
'rules': [
{
'rule_name': 'generate_header_from_dasc',
'extension': 'dasc',
'outputs': [
'luajit/src/<(RULE_INPUT_ROOT).h'
],
'action': [
'<(PRODUCT_DIR)/lua',
'luajit/dynasm/dynasm.lua',
'-LN',
'-o', 'luajit/src/<(RULE_INPUT_ROOT).h',
'<(RULE_INPUT_PATH)'
],
'process_outputs_as_sources': 0,
'message': 'dynasm <(RULE_INPUT_PATH)'
}
],
},
],
}
|
py | 1a32389d3fda23bd18c2e4a7745efe62d872ce45 | # """Django Endless Pagination settings file."""
from __future__ import unicode_literals
from django.conf import settings
# How many objects are normally displayed in a page
# (overwriteable by templatetag).
PER_PAGE = getattr(settings, 'EL_PAGINATION_PER_PAGE', 10)
# The querystring key of the page number.
PAGE_LABEL = getattr(settings, 'EL_PAGINATION_PAGE_LABEL', 'page')
# See django *Paginator* definition of orphans.
ORPHANS = getattr(settings, 'EL_PAGINATION_ORPHANS', 0)
# If you use the default *show_more* template, here you can customize
# the content of the loader hidden element.
# Html is safe here, e.g. you can show your pretty animated gif:
# EL_PAGINATION_LOADING = """
# <img src="/static/img/loader.gif" alt="loading" />
# """
LOADING = getattr(
settings, 'EL_PAGINATION_LOADING', 'loading')
# Labels for previous and next page links.
PREVIOUS_LABEL = getattr(
settings, 'EL_PAGINATION_PREVIOUS_LABEL', '<')
NEXT_LABEL = getattr(settings, 'EL_PAGINATION_NEXT_LABEL', '>')
# Labels for first and last page links.
FIRST_LABEL = getattr(
settings, 'EL_PAGINATION_FIRST_LABEL', '<<')
LAST_LABEL = getattr(settings, 'EL_PAGINATION_LAST_LABEL', '>>')
# Set to True if your SEO alchemist wants all the links in Digg-style
# pagination to be ``nofollow``.
ADD_NOFOLLOW = getattr(settings, 'EL_PAGINATION_ADD_NOFOLLOW', False)
# Callable (or dotted path to a callable) returning pages to be displayed.
# If None, a default callable is used (which produces Digg-style pagination).
PAGE_LIST_CALLABLE = getattr(
settings, 'EL_PAGINATION_PAGE_LIST_CALLABLE', None)
# The default callable returns a sequence of pages producing Digg-style
# pagination, and depending on the settings below.
DEFAULT_CALLABLE_EXTREMES = getattr(
settings, 'EL_PAGINATION_DEFAULT_CALLABLE_EXTREMES', 3)
DEFAULT_CALLABLE_AROUNDS = getattr(
settings, 'EL_PAGINATION_DEFAULT_CALLABLE_AROUNDS', 2)
# Whether or not the first and last pages arrows are displayed.
DEFAULT_CALLABLE_ARROWS = getattr(
settings, 'EL_PAGINATION_DEFAULT_CALLABLE_ARROWS', False)
# Template variable name for *page_template* decorator.
TEMPLATE_VARNAME = getattr(
settings, 'EL_PAGINATION_TEMPLATE_VARNAME', 'template')
# If page out of range, throw a 404 exception
PAGE_OUT_OF_RANGE_404 = getattr(
settings, 'EL_PAGINATION_PAGE_OUT_OF_RANGE_404', False)
|
py | 1a323a2e1a9cb1e9bfe8a806c3fb166d47547a55 | import os
import sys
import glob
import numpy as np
import torch
import utils
import logging
import argparse
import torch.nn as nn
import genotypes
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model import NetworkCIFAR as Network
parser = argparse.ArgumentParser("cifar")
parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=96, help='batch size')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--init_channels', type=int, default=36, help='num of init channels')
parser.add_argument('--layers', type=int, default=20, help='total number of layers')
parser.add_argument('--model_path', type=str, default='eval-EXP-20190627-220154/weights.pt', help='path of pretrained model')
#parser.add_argument('--model_path', type=str, default='EXP/model.pt', help='path of pretrained model')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use')
args = parser.parse_args()
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
CIFAR_CLASSES = 10
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
#模型结构
model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype)
#和训练集里面的model保持一致:model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype)
model = model.cuda()
# 加载模型的参数
utils.load(model, args.model_path)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
#导入测试集数据
_, test_transform = utils._data_transforms_cifar10(args)
test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform) #transform=test_transform:所采用的变换形式
test_queue = torch.utils.data.DataLoader(
test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=1)
model.drop_path_prob = args.drop_path_prob
test_acc, test_obj = infer(test_queue, model, criterion)
logging.info('test_acc %f', test_acc)
def infer(test_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
for step, (input, target) in enumerate(test_queue):
input = Variable(input, volatile=True).cuda()
target = Variable(target, volatile=True).cuda(async=True)
logits, _ = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('test %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
if __name__ == '__main__':
main()
|
py | 1a323b2686372ed10fbad87101a1672b2a04cfc3 | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Learner architectures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gin
import numpy as np
import py_utils
import sonnet as snt
import tensorflow as tf
import tf_utils
nest = tf.contrib.framework.nest
RollingFeaturesState = collections.namedtuple("RollingFeaturesState",
["ms", "rms"])
@gin.configurable
class RollingFeatures(snt.AbstractModule):
"""Helper to construct different decay, momentum, and rms rolling averages.
These are used as features in a learned optimizer and are the exact same as
done in
in SGD+Momentum and RMSProp.
Unlike Adam / RMSProp, we accumulate values at multiple decay rates.
"""
def __init__(self,
name="RollingFeatures",
include_rms=False,
decays=None,
num_features=30,
min=0.0001, # pylint: disable=redefined-builtin
max=0.5, # pylint: disable=redefined-builtin
**kwargs):
self.include_rms = include_rms
if decays is None:
self.decays = tf.constant(
1 - np.logspace(np.log10(min), np.log10(max), num_features),
dtype=tf.float32)
else:
self.decays = tf.constant(decays)
super(RollingFeatures, self).__init__(name=name, **kwargs)
self()
self.var_shapes = []
def _build(self):
pass
@snt.reuse_variables
def initial_state(self, shapes):
ms = []
rms = []
self.var_shapes = shapes
for s in shapes:
# the optimizer works on batches of data, thus 1 batch size.
n_dims = int(np.prod(s))
ms.append(tf.zeros([n_dims, self.decays.shape.as_list()[0]]))
rms.append(tf.zeros([n_dims, self.decays.shape.as_list()[0]]))
return RollingFeaturesState(ms=ms, rms=rms)
@snt.reuse_variables
def current_state(self, shapes):
init_state = self.initial_state(shapes)
return tf_utils.make_variables_matching(init_state)
@snt.reuse_variables
def next_state(self, state, grads):
pad_decay = tf.expand_dims(self.decays, 0)
new_ms_list, new_rms_list = [], []
for ms, rms, g, var_shape in py_utils.eqzip(state.ms, state.rms, grads,
self.var_shapes):
def single_update(grad, ms, rms):
grad = tf.reshape(grad, [-1, 1])
new_ms = ms * pad_decay + grad * (1 - pad_decay)
if self.include_rms:
new_rms = rms * pad_decay + tf.square(grad) * (1 - pad_decay)
return new_ms, new_rms
else:
return new_ms, rms
if isinstance(g, tf.IndexedSlices):
# pylint: disable=unbalanced-tuple-unpacking
new_ms, new_rms = indexed_slices_apply_dense2(single_update, var_shape,
g, [ms, rms], 2)
else:
new_ms, new_rms = single_update(g, ms, rms)
new_ms_list.append(new_ms)
new_rms_list.append(new_rms)
return RollingFeaturesState(ms=new_ms_list, rms=new_rms_list)
def indexed_slices_apply_dense2(fn, var_shape, g_inp, dense_var_inp, n_outs):
"""Helper function to work with sparse tensors.
dense_var_inp has the leading 2 dimensions collapsed forming shape [n_words *
n_words_feat, n_feat]
g_inp on the otherhand is [n_words, n_words_feat]
var_shape is static and is [n_words, n_words_feat]
Arguments:
fn: (gradient: tf.Tensor, *var_args: tf.Tensor) -> [tf.Tensor]
var_shape: list
g_inp: tf.IndexedSlices
dense_var_inp: tf.Tensor list.
n_outs: int
Returns:
dense outputs
"""
grad_idx, grad_value = accumulate_sparse_gradients(g_inp)
n_words, n_word_feat = var_shape
args = []
for a_possibly_nest in dense_var_inp:
def do_on_tensor(a):
n_feat = a.shape.as_list()[1]
n_active = tf.size(grad_idx)
reshaped = tf.reshape(a, [n_words, n_word_feat, n_feat])
sub_reshaped = tf.gather(reshaped, grad_idx)
return tf.reshape(sub_reshaped, [n_active * n_word_feat, n_feat])
args.append(nest.map_structure(do_on_tensor, a_possibly_nest))
returns = fn(grad_value, *args)
def undo((full_val, sub_val)):
"""Undo the slices."""
if tf.shape(full_val).shape.as_list()[0] != 2:
raise NotImplementedError(
"TODO(lmetz) other than this is not implemented.")
n_words, n_word_feat = var_shape
_, n_feat = sub_val.shape.as_list()
n_active = tf.size(grad_idx)
shape = [n_active, n_word_feat * n_feat]
in_shape_form = tf.reshape(sub_val, shape)
new_shape = [n_words, n_word_feat * n_feat]
mask_shape = [n_words, n_word_feat * n_feat]
scattered = tf.scatter_nd(
tf.reshape(tf.to_int32(grad_idx), [-1, 1]),
in_shape_form,
shape=new_shape)
mask = tf.scatter_nd(
tf.reshape(tf.to_int32(grad_idx), [-1, 1]),
tf.ones_like(in_shape_form),
shape=mask_shape)
# put back into the flat format
scattered = tf.reshape(scattered, [n_words * n_word_feat, n_feat])
mask = tf.reshape(mask, [n_words * n_word_feat, n_feat])
# this is the update part / fake scatter_update but with gradients.
return full_val * (1 - mask) + scattered * mask
dense_outs = []
for ret, dense_v in list(py_utils.eqzip(returns, dense_var_inp[0:n_outs])):
flat_out = map(undo,
py_utils.eqzip(nest.flatten(dense_v), nest.flatten(ret)))
dense_outs.append(nest.pack_sequence_as(dense_v, flat_out))
return dense_outs
def accumulate_sparse_gradients(grad):
"""Accumulates repeated indices of a sparse gradient update.
Args:
grad: a tf.IndexedSlices gradient
Returns:
grad_indices: unique indices
grad_values: gradient values corresponding to the indices
"""
grad_indices, grad_segments = tf.unique(grad.indices)
grad_values = tf.unsorted_segment_sum(grad.values, grad_segments,
tf.size(grad_indices))
return grad_indices, grad_values
def tanh_embedding(x):
"""Embed time in a format usable by a neural network.
This embedding involves dividing x by different timescales and running through
a squashing function.
Args:
x: tf.Tensor
Returns:
tf.Tensor
"""
mix_proj = []
for i in [3, 10, 30, 100, 300, 1000, 3000, 10000, 300000]:
mix_proj.append(tf.tanh(tf.to_float(tf.to_float(x) / float(i)) - 1.))
return tf.stack(mix_proj)
class SecondMomentNormalizer(snt.AbstractModule):
def _build(self, x, is_training=True):
normed = x * tf.rsqrt(1e-5 +
tf.reduce_mean(tf.square(x), axis=0, keep_dims=True))
return normed
|
py | 1a323b4f761fda5d138d455f90a12c56b2e1224a | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 29 09:56:48 2019
@author: adamreidsmith
"""
'''
Create datafiles of 1D solution to the Van der Pol equation: x'' - a*(1 - x^2)*x' + b*y = f(t).
Datafiles include the computed solution, its fast Fourier transform, a histogram
of x(t) vs t mod T where T is the first period of f, the phase(s) of f, and the
parameters a and b.
These datafiles are created for use in 'nn_hist.py', 'nn_ft.py', and 'nn_wavelet.py'.
'''
import numpy as np
from scipy.integrate import odeint
from os import path, mkdir
###############################################################################
'''
Inputs:
tmax: The upper bound of the interval [0,tmax] on which to solve
the Van der Pol equation.
initial_cond: Initial condition. Can be set to 'random' or a list of length 2.
n_points: The number of time steps to include in each solution.
num_ab_pairs: The number of times to solve the equation, i.e. the number
of data points.
n_periods: Number of periodic terms in the forcing function.
include_phase: Include or exclude a random phase in the forcing terms.
C: Coefficients of the terms in the forcing function. Must be
a list of length 'n_periods'.
T: Periods of the terms in the forcing function. Must be a list
of length 'n_periods'.
file_name: Name of the datafile.
'''
###############################################################################
def generate_data(tmax=500,
initial_cond='random',
n_points=2**10,
num_ab_pairs=800,
n_periods=3,
include_phase=True,
C=[1, np.sqrt(2), np.pi/3],
T=[5, 10*np.sqrt(2)-2, 30*np.sqrt(3)],
file_name=None):
twopi = 2*np.pi
#Create a directory to store datafiles if it doesn't aready exist
if not path.exists('./datafiles'):
mkdir('./datafiles')
assert type(C) == type(T) == list and n_periods == len(C) == len(T), \
'C and T must be lists of length \'n_periods\'.'
#RHS
def f(t, phi, C, T):
val = 0
if include_phase:
for i in range(n_periods):
val += C[i] * np.cos(twopi/T[i]*t + phi[i])
return val
else:
for i in range(n_periods):
val += C[i] * np.cos(twopi/T[i]*t)
return val
data = []
for i in range(num_ab_pairs):
a = np.random.rand() #Random number in [0,1)
b = np.random.rand() #Random number in [0,1)
if initial_cond == 'random':
ic = [2*np.random.rand() - 1, 2*np.random.rand() - 1]
else:
ic = initial_cond
phi = []
if include_phase:
for _ in range(n_periods):
phi.append(twopi*np.random.rand())
#Van der Pol oscillator equation
def vanderpol(ic,t):
x = ic[0]
y = ic[1]
yd = f(t, phi, C, T) + a*(1 - x**2)*y - b*x
xd = y
return [xd,yd]
#Solve the ivp numerically
npoints = 10*n_points
tfull = np.linspace(0,tmax,npoints)
sol = odeint(vanderpol, ic, tfull)
#Keep every tenth data point
indices = [i for i in range(npoints) if i % 10 == 0]
t = np.array([tfull[i] for i in indices])
tmodT1 = t % T[0]
x = [sol[i][0] for i in indices]
n_bins = 100
soln = np.array([[t[i],x[i]] for i in range(len(t))])
fftdata = np.fft.fft(x)
FT = np.array([[t[i],fftdata[i]] for i in range(len(t))])
data.append(soln)
data.append(FT)
data.append(np.histogram2d(tmodT1, x, bins=n_bins)[0])
data.append(phi)
data.append([a,b])
if i % 10 == 0 and __name__ == '__main__':
print('Iteration:', i, 'of', num_ab_pairs)
if file_name is None:
file_name = 'vdp_data_' + str(num_ab_pairs) + 'pts_[soln,FT,hist,phase(' + \
str(include_phase) + '),param]'
file_path = './datafiles/' + file_name
print('Writing datafile to', file_path + '.npy')
np.save(file_path, data)
print('Done')
if __name__ == '__main__':
generate_data() |
py | 1a323c2aa3d87f57e9f3e93aa9acc0c8040444f7 | from collections import deque
import numpy as np
import time
import os
import json
from isaac.utilities import *
# set the current working directory to the deployed package folder. This is required by isaac.
os.chdir("/home/davis/deploy/davis/rm_isaac_bridge-pkg")
from engine.pyalice import Codelet
from engine.pyalice.gui.composite_widget import CompositeWidget
class IsaacEffector:
def __init__(self, config):
self.config = config
self._timeout = config['timeout']
self._widgets = self.config['widgets']
self._stream_articulations = self.config['stream_articulations']
# Since isaac does not support closed kinematics (4-bar linkage), there are 4 dof, where
# left_finger == left_finger_upper and right_finger == right_finger_upper
self.joint_names = self._load_kinematics(config['effector_type'])
self.joints = CompositeArray(self.joint_names, 'position', [0]*len(self.joint_names))
if self._widgets:
self.finger_widget = CompositeWidget(self.joint_names, 'position', [[-np.pi/2, np.pi/2]]*6)
self._command_queue = deque()
def _load_kinematics(self, effector_type):
valid_kinematics = ['smarthand']
if effector_type not in valid_kinematics:
raise ValueError('No valid kinematic file found for '+effector_type+'. Valid kinematics exist for '+', '.join(valid_kinematics))
self._kinematic_file = "apps/assets/kinematic_trees/{}.kinematic.json".format(effector_type)
joint_names = []
with open(self._kinematic_file,'r') as fd:
kt = json.load(fd)
for link in kt['links']:
if 'motor' in link and link['motor']['type'] != 'constant':
joint_names.append(link['name'])
return joint_names
def command(self, action, payload=None):
if action not in ['get_articulation_angles', 'set_articulation_angles']:
raise ValueError(action+' is not a valid action type')
if type(payload) in [list, np.ndarray]:
if len(list(payload)) == 2:
payload = [payload[0], payload[0], payload[0], payload[1], payload[1], payload[1]]
payload = CompositeArray(self.joint_names, 'position', payload)
command = Command(action, payload)
self._command_queue.append(command)
elapsed = 0
while command.response is None and elapsed < self._timeout:
elapsed += 0.01
time.sleep(0.01)
return command.response
def enable_articulation_stream(self):
self._stream_articulations = True
def disable_articulation_stream(self):
self._stream_articulations = False
def enable_all_streams(self):
self._stream_articulations = True
def disable_all_streams(self):
self._stream_articulations = False
def _JointReciever(self):
parent = self
class JointReciever(Codelet):
def start(self):
self.rx = self.isaac_proto_rx("CompositeProto", "state")
self.tick_on_message(self.rx)
def tick(self):
if len(parent._command_queue) > 0 and parent._command_queue[0].action == 'get_articulation_angles':
msg = self.rx.message
parent.joints.composite = msg
command = parent._command_queue.popleft()
values = parent.joints.values
command.response = values
elif parent._stream_articulations:
msg = self.rx.message
parent.joints.composite = msg
else:
return
if parent._widgets:
parent.finger_widget.composite = msg
return JointReciever
def _JointTransmitter(self):
parent = self
class JointTransmitter(Codelet):
def start(self):
self.tx = self.isaac_proto_tx("CompositeProto", "command")
self.tick_periodically(0.03)
def tick(self):
if len(parent._command_queue) > 0 and parent._command_queue[0].action == 'set_articulation_angles':
self.tx._msg = parent._command_queue[0].payload.composite
self.tx.publish()
command = parent._command_queue.popleft()
command.response = True
elif parent._widgets and parent._stream_articulations:
self.tx._msg = parent.finger_widget.composite
self.tx.publish()
return JointTransmitter
def connect_app(self, app):
# load dependency subgraphs
app.load(filename="packages/planner/apps/multi_joint_lqr_control.subgraph.json", prefix="lqr_gripper")
simulation_interface = app.nodes["simulation.interface"]
lqr_interface = app.nodes["lqr_gripper.subgraph"]["interface"]
# configs
app.nodes["lqr_gripper.kinematic_tree"]["KinematicTree"].config.kinematic_file = self._kinematic_file
lqr_planner = app.nodes["lqr_gripper.local_plan"]["MultiJointLqrPlanner"]
lqr_planner.config.speed_min = [-self.config['joint_speed']] * len(self.joint_names)
lqr_planner.config.speed_max = [self.config['joint_speed']] * len(self.joint_names)
lqr_planner.config.acceleration_min = [-self.config['joint_accel']] * len(self.joint_names)
lqr_planner.config.acceleration_max = [self.config['joint_accel']] * len(self.joint_names)
# create nodes
joints_in_node = app.add("joints_input")
joints_in_node.add(self._JointReciever(), 'articulation_reciever')
joints_out_node = app.add("joints_output")
joints_out_node.add(self._JointTransmitter(), 'articulation_transmitter')
# connect edges
app.connect(simulation_interface["output"], "joint_state", lqr_interface, "joint_state")
app.connect(simulation_interface["output"], "joint_state", joints_in_node['articulation_reciever'], "state")
app.connect(joints_out_node['articulation_transmitter'], "command", lqr_interface, "joint_target")
app.connect(lqr_interface, "joint_command", simulation_interface["input"], "joint_position")
return app |
py | 1a323c3ae012acfe155d41950d7719c3111b2a2e | import representation
from representation import Segment
############# morphologies
tmp = [
[[Segment((0.0, 0.0, 2.0), (4.0, 0.0, 2.0), 1), Segment((4.0, 0.0, 0.8), (8.0, 0.0, 0.8), 3), Segment((8.0, 0.0, 0.8), (12.0, -0.5, 0.8), 3)]],
[[Segment((12.0, -0.5, 0.8), (20.0, 4.0, 0.4), 3), Segment((20.0, 4.0, 0.4), (26.0, 6.0, 0.2), 3)]],
[[Segment((12.0, -0.5, 0.5), (19.0, -3.0, 0.5), 3)]],
[[Segment((19.0, -3.0, 0.5), (24.0, -7.0, 0.2), 3)]],
[[Segment((19.0, -3.0, 0.5), (23.0, -1.0, 0.2), 3), Segment((23.0, -1.0, 0.2), (26.0, -2.0, 0.2), 3)]],
[[Segment((0.0, 0.0, 2.0), (-7.0, 0.0, 0.4), 2), Segment((-7.0, 0.0, 0.4), (-10.0, 0.0, 0.4), 2)]],]
label_morph = representation.make_morph(tmp)
tmp = [
[[Segment((0.0, 0.0, 2.0), (4.0, 0.0, 2.0), 1)], [Segment((5.0, 0.0, 0.8), (8.0, 0.0, 0.8), 3), Segment((8.0, 0.0, 0.8), (12.0, -0.5, 0.8), 3)]],
[[Segment((12.0, -0.5, 0.8), (20.0, 4.0, 0.4), 3), Segment((20.0, 4.0, 0.4), (26.0, 6.0, 0.2), 3)]],
[[Segment((12.0, -0.5, 0.5), (19.0, -3.0, 0.5), 3)]],
[[Segment((19.0, -3.0, 0.5), (24.0, -7.0, 0.2), 3)]],
[[Segment((19.0, -3.0, 0.5), (23.0, -1.0, 0.2), 3), Segment((23.0, -1.0, 0.2), (26.0, -2.0, 0.2), 3)]],
[[Segment((-2.0, 0.0, 0.4), (-10.0, 0.0, 0.4), 2)]],]
detached_morph = representation.make_morph(tmp)
tmp = [
[[Segment((0.0, 0.0, 0.5), (1.0, 0.0, 1.5), 1), Segment((1.0, 0.0, 1.5), (2.0, 0.0, 2.5), 1), Segment((2.0, 0.0, 2.5), (3.0, 0.0, 2.5), 1), Segment((3.0, 0.0, 2.5), (4.0, 0.0, 1.2), 1), Segment((4.0, 0.0, 0.8), (8.0, 0.0, 0.8), 3), Segment((8.0, 0.0, 0.8), (12.0, -0.5, 0.8), 3)]],
[[Segment((12.0, -0.5, 0.8), (20.0, 4.0, 0.4), 3), Segment((20.0, 4.0, 0.4), (26.0, 6.0, 0.2), 3)]],
[[Segment((12.0, -0.5, 0.5), (19.0, -3.0, 0.5), 3)]],
[[Segment((19.0, -3.0, 0.5), (24.0, -7.0, 0.2), 3)]],
[[Segment((19.0, -3.0, 0.5), (23.0, -1.0, 0.2), 3), Segment((23.0, -1.0, 0.2), (26.0, -2.0, 0.2), 3)]],
[[Segment((0.0, 0.0, 0.4), (-7.0, 0.0, 0.4), 2), Segment((-7.0, 0.0, 0.4), (-10.0, 0.0, 0.4), 2)]],]
stacked_morph = representation.make_morph(tmp)
tmp = [
[[Segment((-2.0, 0.0, 2.0), (2.0, 0.0, 2.0), 1)]],]
sphere_morph = representation.make_morph(tmp)
tmp = [
[[Segment((0.0, 0.0, 1.0), (10.0, 0.0, 0.5), 3)]],]
branch_morph1 = representation.make_morph(tmp)
tmp = [
[[Segment((0.0, 0.0, 1.0), (3.0, 0.2, 0.8), 1), Segment((3.0, 0.2, 0.8), (5.0, -0.1, 0.7), 2), Segment((5.0, -0.1, 0.7), (8.0, 0.0, 0.6), 2), Segment((8.0, 0.0, 0.6), (10.0, 0.0, 0.5), 3)]],]
branch_morph2 = representation.make_morph(tmp)
tmp = [
[[Segment((0.0, 0.0, 1.0), (3.0, 0.2, 0.8), 1), Segment((3.0, 0.2, 0.8), (5.0, -0.1, 0.7), 2)], [Segment((6.0, -0.1, 0.7), (9.0, 0.0, 0.6), 2), Segment((9.0, 0.0, 0.6), (11.0, 0.0, 0.5), 3)]],]
branch_morph3 = representation.make_morph(tmp)
tmp = [
[[Segment((0.0, 0.0, 1.0), (3.0, 0.2, 0.8), 1), Segment((3.0, 0.2, 0.8), (5.0, -0.1, 0.7), 2), Segment((5.0, -0.1, 0.7), (8.0, 0.0, 0.5), 2), Segment((8.0, 0.0, 0.3), (10.0, 0.0, 0.5), 3)]],]
branch_morph4 = representation.make_morph(tmp)
tmp = [
[[Segment((0.0, 0.0, 1.0), (10.0, 0.0, 0.5), 3)]],
[[Segment((10.0, 0.0, 0.5), (15.0, 3.0, 0.2), 3)]],
[[Segment((10.0, 0.0, 0.5), (15.0, -3.0, 0.2), 3)]],]
yshaped_morph = representation.make_morph(tmp)
tmp = [
[[Segment((-3.0, 0.0, 3.0), (3.0, 0.0, 3.0), 1)], [Segment((4.0, -1.0, 0.6), (10.0, -2.0, 0.5), 3), Segment((10.0, -2.0, 0.5), (15.0, -1.0, 0.5), 3)]],
[[Segment((15.0, -1.0, 0.5), (18.0, -5.0, 0.3), 3)]],
[[Segment((15.0, -1.0, 0.5), (20.0, 2.0, 0.3), 3)]],]
ysoma_morph1 = representation.make_morph(tmp)
tmp = [
[[Segment((-3.0, 0.0, 3.0), (3.0, 0.0, 3.0), 1)]],
[[Segment((4.0, -1.0, 0.6), (10.0, -2.0, 0.5), 3), Segment((10.0, -2.0, 0.5), (15.0, -1.0, 0.5), 3)]],
[[Segment((15.0, -1.0, 0.5), (18.0, -5.0, 0.3), 3)]],
[[Segment((15.0, -1.0, 0.5), (20.0, 2.0, 0.3), 3)]],
[[Segment((2.0, 1.0, 0.6), (12.0, 4.0, 0.5), 3)]],
[[Segment((12.0, 4.0, 0.5), (18.0, 4.0, 0.3), 3)]],
[[Segment((12.0, 4.0, 0.5), (16.0, 9.0, 0.1), 3)]],
[[Segment((-3.5, 0.0, 1.5), (-6.0, -0.2, 0.5), 2), Segment((-6.0, -0.2, 0.5), (-15.0, -0.1, 0.5), 2)]],]
ysoma_morph2 = representation.make_morph(tmp)
tmp = [
[[Segment((-3.0, 0.0, 3.0), (3.0, 0.0, 3.0), 1)]],
[[Segment((3.0, 0.0, 0.6), (9.0, -1.0, 0.5), 3), Segment((9.0, -1.0, 0.5), (14.0, 0.0, 0.5), 3)]],
[[Segment((14.0, 0.0, 0.5), (17.0, -4.0, 0.3), 3)]],
[[Segment((14.0, 0.0, 0.5), (19.0, 3.0, 0.3), 3)]],
[[Segment((3.0, 0.0, 0.6), (13.0, 3.0, 0.5), 3)]],
[[Segment((13.0, 3.0, 0.5), (19.0, 3.0, 0.3), 3)]],
[[Segment((13.0, 3.0, 0.5), (17.0, 8.0, 0.1), 3)]],
[[Segment((-3.0, 0.0, 1.5), (-5.5, -0.2, 0.5), 2), Segment((-5.5, -0.2, 0.5), (-14.5, -0.1, 0.5), 2)]],]
ysoma_morph3 = representation.make_morph(tmp)
tmp = [
[[Segment((-3.0, 0.0, 0.7), (0.0, 0.0, 1.0), 2)], [Segment((0.0, 0.0, 1.0), (2.0, 0.0, 1.0), 1)], [Segment((2.0, 0.0, 1.0), (20.0, 0.0, 1.0), 3)]],
]
swc_morph = representation.make_morph(tmp)
############# locsets
ls_root = {'type': 'locset', 'value': [(0, 0.0)]}
ls_term = {'type': 'locset', 'value': [(1, 1.0), (3, 1.0), (4, 1.0), (5, 1.0)]}
ls_rand_dend = {'type': 'locset', 'value': [(0, 0.5547193370156588), (0, 0.5841758202819731), (0, 0.607192003545501), (0, 0.6181091003428546), (0, 0.6190845627201184), (0, 0.7027325639263277), (0, 0.7616129092226993), (0, 0.9645150497869694), (1, 0.15382287505908834), (1, 0.2594719824047551), (1, 0.28087652335178354), (1, 0.3729681478609085), (1, 0.3959560134241004), (1, 0.4629424550242548), (1, 0.47346867377446744), (1, 0.5493486883630476), (1, 0.6227685370674116), (1, 0.6362196581003494), (1, 0.6646511214508091), (1, 0.7157318936458146), (1, 0.7464198558822775), (1, 0.77074507802833), (1, 0.7860238136304932), (1, 0.8988928261704698), (1, 0.9581259332943499), (2, 0.12773985425987294), (2, 0.3365926476076694), (2, 0.44454300804769703), (2, 0.5409466695719178), (2, 0.5767511435223905), (2, 0.6340206909931745), (2, 0.6354772583375223), (2, 0.6807941995943213), (2, 0.774655947503608), (3, 0.05020708596877571), (3, 0.25581431877212274), (3, 0.2958305460715556), (3, 0.296698184761692), (3, 0.509669134988683), (3, 0.7662305637426007), (3, 0.8565839889923518), (3, 0.8889077221517746), (4, 0.24311286693286885), (4, 0.4354361205546333), (4, 0.4467752481260171), (4, 0.5308169153994543), (4, 0.5701465671464049), (4, 0.670081739879954), (4, 0.6995486862583797), (4, 0.8186709628604206), (4, 0.9141224600171143)]}
ls_loc15 = {'type': 'locset', 'value': [(1, 0.5)]}
ls_uniform0 = {'type': 'locset', 'value': [(0, 0.5841758202819731), (1, 0.6362196581003494), (1, 0.7157318936458146), (1, 0.7464198558822775), (2, 0.6340206909931745), (2, 0.6807941995943213), (3, 0.296698184761692), (3, 0.509669134988683), (3, 0.7662305637426007), (4, 0.5701465671464049)]}
ls_uniform1 = {'type': 'locset', 'value': [(0, 0.9778060763285382), (1, 0.19973428495790843), (1, 0.8310607916260988), (2, 0.9210229159315735), (2, 0.9244292525837472), (2, 0.9899772550845479), (3, 0.9924233395972087), (4, 0.3641426305909531), (4, 0.4787812247064867), (4, 0.5138656268861914)]}
ls_branchmid = {'type': 'locset', 'value': [(0, 0.5), (1, 0.5), (2, 0.5), (3, 0.5), (4, 0.5), (5, 0.5)]}
ls_distal = {'type': 'locset', 'value': [(1, 0.796025976329944), (3, 0.6666666666666667), (4, 0.39052429175127), (5, 1.0)]}
ls_proximal = {'type': 'locset', 'value': [(1, 0.29602597632994393), (2, 0.0), (5, 0.6124999999999999)]}
ls_distint_in = {'type': 'locset', 'value': [(1, 0.5), (2, 0.7), (5, 0.1)]}
ls_proxint_in = {'type': 'locset', 'value': [(1, 0.8), (2, 0.3)]}
ls_loctest = {'type': 'locset', 'value': [(1, 1.0), (2, 0.0), (5, 0.0)]}
ls_restrict = {'type': 'locset', 'value': [(1, 1.0), (3, 1.0), (4, 1.0)]}
############# regions
reg_empty = {'type': 'region', 'value': []}
reg_all = {'type': 'region', 'value': [(0, 0.0, 1.0), (1, 0.0, 1.0), (2, 0.0, 1.0), (3, 0.0, 1.0), (4, 0.0, 1.0), (5, 0.0, 1.0)]}
reg_tag1 = {'type': 'region', 'value': [(0, 0.0, 0.3324708796524168)]}
reg_tag2 = {'type': 'region', 'value': [(5, 0.0, 1.0)]}
reg_tag3 = {'type': 'region', 'value': [(0, 0.3324708796524168, 1.0), (1, 0.0, 1.0), (2, 0.0, 1.0), (3, 0.0, 1.0), (4, 0.0, 1.0)]}
reg_tag4 = {'type': 'region', 'value': []}
reg_soma = {'type': 'region', 'value': [(0, 0.0, 0.3324708796524168)]}
reg_axon = {'type': 'region', 'value': [(5, 0.0, 1.0)]}
reg_dend = {'type': 'region', 'value': [(0, 0.3324708796524168, 1.0), (1, 0.0, 1.0), (2, 0.0, 1.0), (3, 0.0, 1.0), (4, 0.0, 1.0)]}
reg_radlt5 = {'type': 'region', 'value': [(1, 0.44403896449491587, 1.0), (3, 0.0, 1.0), (4, 0.0, 1.0), (5, 0.65625, 1.0)]}
reg_radle5 = {'type': 'region', 'value': [(1, 0.44403896449491587, 1.0), (2, 0.0, 1.0), (3, 0.0, 1.0), (4, 0.0, 1.0), (5, 0.65625, 1.0)]}
reg_radgt5 = {'type': 'region', 'value': [(0, 0.0, 1.0), (1, 0.0, 0.44403896449491587), (5, 0.0, 0.65625)]}
reg_radge5 = {'type': 'region', 'value': [(0, 0.0, 1.0), (1, 0.0, 0.44403896449491587), (2, 0.0, 1.0), (3, 0.0, 0.0), (4, 0.0, 0.0), (5, 0.0, 0.65625)]}
reg_rad36 = {'type': 'region', 'value': [(1, 0.29602597632994393, 0.796025976329944), (2, 0.0, 1.0), (3, 0.0, 0.6666666666666667), (4, 0.0, 0.39052429175127), (5, 0.6124999999999999, 1.0)]}
reg_branch0 = {'type': 'region', 'value': [(0, 0.0, 1.0)]}
reg_branch3 = {'type': 'region', 'value': [(3, 0.0, 1.0)]}
reg_cable_1_01 = {'type': 'region', 'value': [(1, 0.0, 1.0)]}
reg_cable_1_31 = {'type': 'region', 'value': [(1, 0.3, 1.0)]}
reg_cable_1_37 = {'type': 'region', 'value': [(1, 0.3, 0.7)]}
reg_proxint = {'type': 'region', 'value': [(0, 0.7697564611867647, 1.0), (1, 0.4774887508467626, 0.8), (2, 0.0, 0.3)]}
reg_proxintinf = {'type': 'region', 'value': [(0, 0.0, 1.0), (1, 0.0, 0.8), (2, 0.0, 0.3)]}
reg_distint = {'type': 'region', 'value': [(1, 0.5, 0.8225112491532374), (2, 0.7, 1.0), (3, 0.0, 0.432615327328525), (4, 0.0, 0.3628424955125098), (5, 0.1, 0.6)]}
reg_distintinf = {'type': 'region', 'value': [(1, 0.5, 1.0), (2, 0.7, 1.0), (3, 0.0, 1.0), (4, 0.0, 1.0), (5, 0.1, 1.0)]}
reg_lhs = {'type': 'region', 'value': [(0, 0.5, 1.0), (1, 0.0, 0.5)]}
reg_rhs = {'type': 'region', 'value': [(1, 0.0, 1.0)]}
reg_and = {'type': 'region', 'value': [(1, 0.0, 0.5)]}
reg_or = {'type': 'region', 'value': [(0, 0.5, 1.0), (1, 0.0, 1.0)]}
|
py | 1a323c898e44cca1319eb7c2452115fe4606a385 | # created April 2017
# by TEASER Development Team
from teaser.logic.archetypebuildings.tabula.de.singlefamilyhouse import \
SingleFamilyHouse
class ApartmentBlock(SingleFamilyHouse):
"""Archetype for TABULA Apartment Block
Archetype according to TABULA building typology
(http://webtool.building-typology.eu/#bm).
Description of:
- estimation factors
- always 4 walls, 1 roof, 1 floor, 4 windows, one door (default
orientation?)
- how we calculate facade and window area
- calculate u-values
- zones (one zone)
- differences between TABULA und our approach (net floor area, height
and number of storeys)
- how to proceed with rooftops (keep them as flat roofs or pitched
roofs? what orientation?)
Parameters
----------
parent: Project()
The parent class of this object, the Project the Building belongs to.
Allows for better control of hierarchical structures. If not None it
adds this Building instance to Project.buildings.
(default: None)
name : str
Individual name
year_of_construction : int
Year of first construction
height_of_floors : float [m]
Average height of the buildings' floors
number_of_floors : int
Number of building's floors above ground
net_leased_area : float [m2]
Total net leased area of building. This is area is NOT the footprint
of a building
with_ahu : Boolean
If set to True, an empty instance of BuildingAHU is instantiated and
assigned to attribute central_ahu. This instance holds information for
central Air Handling units. Default is False.
internal_gains_mode: int [1, 2, 3]
mode for the internal gains calculation by persons:
1: Temperature and activity degree dependent calculation. The
calculation is based on SIA 2024 (default)
2: Temperature and activity degree independent calculation, the max.
heatflowrate is prescribed by the parameter
fixed_heat_flow_rate_persons.
3: Temperature and activity degree dependent calculation with
consideration of moisture. The calculation is based on SIA 2024
construction_type : str
Construction type of used wall constructions default is "existing
state"
existing state:
construction of walls according to existing state in TABULA
usual refurbishment:
construction of walls according to usual refurbishment in
TABULA
advanced refurbishment:
construction of walls according to advanced refurbishment in
TABULA
"""
def __init__(
self,
parent,
name=None,
year_of_construction=None,
number_of_floors=None,
height_of_floors=None,
net_leased_area=None,
with_ahu=False,
internal_gains_mode=1,
construction_type=None):
super(ApartmentBlock, self).__init__(
parent,
name,
year_of_construction,
number_of_floors,
height_of_floors,
net_leased_area,
with_ahu,
internal_gains_mode,
construction_type)
self.construction_type = construction_type
self.number_of_floors = number_of_floors
self.height_of_floors = height_of_floors
self._construction_type_1 = self.construction_type + '_1_AB'
self._construction_type_2 = self.construction_type + '_2_AB'
self.zone_area_factors = {"SingleDwelling": [1, "Living"]}
self._outer_wall_names_1 = {
"ExteriorFacadeNorth_1": [90.0, 0.0],
"ExteriorFacadeEast_1": [90.0, 90.0],
"ExteriorFacadeSouth_1": [90.0, 180.0],
"ExteriorFacadeWest_1": [90.0, 270.0]}
self._outer_wall_names_2 = {
"ExteriorFacadeNorth_2": [90.0, 0.0],
"ExteriorFacadeEast_2": [90.0, 90.0],
"ExteriorFacadeSouth_2": [90.0, 180.0],
"ExteriorFacadeWest_2": [90.0, 270.0]}
self.roof_names_1 = {"Rooftop_1": [0, -1]} # [0, -1]
self.roof_names_2 = {"Rooftop_2": [0, -1]}
self.ground_floor_names_1 = {
"GroundFloor_1": [0, -2]} # [0, -2]
self.ground_floor_names_2 = {
"GroundFloor_2": [0, -2]}
self.door_names = {"Door": [90.0, 270]}
self.window_names_1 = {
"WindowFacadeNorth_1": [90.0, 0.0],
"WindowFacadeEast_1": [90.0, 90.0],
"WindowFacadeSouth_1": [90.0, 180.0],
"WindowFacadeWest_1": [90.0, 270.0]}
self.window_names_2 = {
"WindowFacadeNorth_2": [90.0, 0.0],
"WindowFacadeEast_2": [90.0, 90.0],
"WindowFacadeSouth_2": [90.0, 180.0],
"WindowFacadeWest_2": [90.0, 270.0]}
# [tilt, orientation]
self.inner_wall_names = {"InnerWall": [90.0, 0.0]}
self.ceiling_names = {"Ceiling": [0.0, -1]}
self.floor_names = {"Floor": [0.0, -2]}
# Rooftop1, Rooftop2, Wall1, Wall2, GroundFloor1, GroundFloor2,
# Window1, Window2, Door
# Area/ReferenceFloorArea
self.facade_estimation_factors = {
(1860, 1918): {
'rt1': 0.27961,
'rt2': 0.0,
'ow1': 0.36840,
'ow2': 0.0,
'gf1': 0.19747,
'gf2': 0.0,
'win1': 0.16429,
'win2': 0.0,
'door': 0.00241},
(1919, 1948): {
'rt1': 0.25889,
'rt2': 0.0,
'ow1': 0.83827,
'ow2': 0.0,
'gf1': 0.26658,
'gf2': 0.0,
'win1': 0.18767,
'win2': 0.0,
'door': 0.00135},
(1949, 1957): {
'rt1': 0.22052,
'rt2': 0.0,
'ow1': 0.85839,
'ow2': 0.0,
'gf1': 0.22052,
'gf2': 0.0,
'win1': 0.18397,
'win2': 0.0,
'door': 0.00125},
(1958, 1968): {
'rt1': 0.12339,
'rt2': 0.0,
'ow1': 0.83555,
'ow2': 0.0,
'gf1': 0.11814,
'gf2': 0.0,
'win1': 0.17674,
'win2': 0.0,
'door': 0.00051},
(1969, 1978): {
'rt1': 0.16255,
'rt2': 0.0,
'ow1': 0.64118,
'ow2': 0.0,
'gf1': 0.16255,
'gf2': 0.0,
'win1': 0.16406,
'win2': 0.0,
'door': 0.0006}}
self.building_age_group = None
if self.with_ahu is True:
self.central_ahu.temperature_profile = (
7 * [293.15] +
12 * [295.15] +
6 * [293.15])
self.central_ahu.min_relative_humidity_profile = (25 * [0.45])
self.central_ahu.max_relative_humidity_profile = (25 * [0.55])
self.central_ahu.v_flow_profile = (
7 * [0.0] + 12 * [1.0] + 6 * [0.0])
|
py | 1a323d7ccedddd5a7a9c0cf206999a6632213bfc | from setuptools import find_packages, setup
setup(
name="sds011-pm-sensor-python",
version="0.0.1",
description="A parser for the sds011 pm sensor serial interface",
author="Jesse Collis",
author_email="[email protected]",
url="https://github.com/jessedc/sds011-pm-sensor-python",
packages=find_packages(exclude=["*.tests"]),
test_suite="sds011.tests",
install_requires=[
"influxdb>=5.2",
"pyserial>=3.4"
],
setup_requires=[
],
tests_require=[
],
entry_points={
"console_scripts": [
"sds011 = sds011.__main__:main",
],
},
)
|
py | 1a323da8def30d678dd8aefe5bb63b0957abf66c | import logging
from unittest import TestCase
from libumccr.aws import liblambda
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class LibLambdaUnitTests(TestCase):
def test_transpose_fn_url_event(self):
"""
python -m unittest tests.aws.test_liblambda.LibLambdaUnitTests.test_transpose_fn_url_event
"""
mock_event = {
"version": "2.0",
"routeKey": "$default",
"headers": {
"content-type": "application/json",
},
"queryStringParameters": {
"subject_id": "SBJ00001"
},
"requestContext": {
"http": {
"method": "POST",
}
},
"body": {"subject_id": "SBJ00001"},
"isBase64Encoded": False,
}
event = liblambda.transpose_fn_url_event(event=mock_event)
logger.info(event)
self.assertEqual(event['subject_id'], "SBJ00001")
def test_transpose_fn_url_event_merged(self):
"""
python -m unittest tests.aws.test_liblambda.LibLambdaUnitTests.test_transpose_fn_url_event_merged
"""
mock_event = {
"version": "2.0",
"routeKey": "$default",
"headers": {
"content-type": "application/json",
},
"queryStringParameters": {
"library_id": "L9900111"
},
"requestContext": {
"http": {
"method": "POST",
}
},
"body": {"subject_id": "SBJ00001"},
"isBase64Encoded": False,
}
event = liblambda.transpose_fn_url_event(event=mock_event)
logger.info(event)
self.assertEqual(event['subject_id'], "SBJ00001")
self.assertEqual(event['library_id'], "L9900111")
def test_transpose_fn_url_event_passthrough(self):
"""
python -m unittest tests.aws.test_liblambda.LibLambdaUnitTests.test_transpose_fn_url_event_passthrough
"""
mock_event = {
"subject_id": "SBJ00001",
"version": "2.0",
}
event = liblambda.transpose_fn_url_event(event=mock_event)
logger.info(event)
self.assertEqual(mock_event, event)
|
py | 1a323dcc0be95ce3ea9f4be4eb379fad5c97d6f1 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import os
from numpy.testing import assert_allclose
import pytest
from jax import jit, random
import jax.numpy as jnp
import numpyro
import numpyro.distributions as dist
from numpyro.distributions.transforms import AffineTransform
from numpyro.infer import MCMC, NUTS
from numpyro.infer.reparam import TransformReparam
def test_dist_pytree():
from tensorflow_probability.substrates.jax import distributions as tfd
from numpyro.contrib.tfp.distributions import TFPDistribution
@jit
def f(x):
with numpyro.handlers.seed(rng_seed=0), numpyro.handlers.trace() as tr:
numpyro.sample("x", tfd.Normal(x, 1))
return tr["x"]["fn"]
res = f(0.0)
assert isinstance(res, TFPDistribution)
assert res.loc == 0
assert res.scale == 1
@pytest.mark.filterwarnings("ignore:can't resolve package")
def test_transformed_distributions():
from tensorflow_probability.substrates.jax import (
bijectors as tfb,
distributions as tfd,
)
d = dist.TransformedDistribution(dist.Normal(0, 1), dist.transforms.ExpTransform())
d1 = tfd.TransformedDistribution(tfd.Normal(0, 1), tfb.Exp())
x = random.normal(random.PRNGKey(0), (1000,))
d_x = d.log_prob(x).sum()
d1_x = d1.log_prob(x).sum()
assert_allclose(d_x, d1_x)
@pytest.mark.filterwarnings("ignore:can't resolve package")
def test_logistic_regression():
from tensorflow_probability.substrates.jax import distributions as tfd
N, dim = 3000, 3
num_warmup, num_samples = (1000, 1000)
data = random.normal(random.PRNGKey(0), (N, dim))
true_coefs = jnp.arange(1.0, dim + 1.0)
logits = jnp.sum(true_coefs * data, axis=-1)
labels = tfd.Bernoulli(logits=logits).sample(seed=random.PRNGKey(1))
def model(labels):
coefs = numpyro.sample("coefs", tfd.Normal(jnp.zeros(dim), jnp.ones(dim)))
logits = numpyro.deterministic("logits", jnp.sum(coefs * data, axis=-1))
return numpyro.sample("obs", tfd.Bernoulli(logits=logits), obs=labels)
kernel = NUTS(model)
mcmc = MCMC(kernel, num_warmup=num_warmup, num_samples=num_samples)
mcmc.run(random.PRNGKey(2), labels)
mcmc.print_summary()
samples = mcmc.get_samples()
assert samples["logits"].shape == (num_samples, N)
expected_coefs = jnp.array([0.97, 2.05, 3.18])
assert_allclose(jnp.mean(samples["coefs"], 0), expected_coefs, atol=0.22)
@pytest.mark.filterwarnings("ignore:can't resolve package")
# TODO: remove after https://github.com/tensorflow/probability/issues/1072 is resolved
@pytest.mark.filterwarnings("ignore:Explicitly requested dtype")
def test_beta_bernoulli():
from tensorflow_probability.substrates.jax import distributions as tfd
num_warmup, num_samples = (500, 2000)
def model(data):
alpha = jnp.array([1.1, 1.1])
beta = jnp.array([1.1, 1.1])
p_latent = numpyro.sample("p_latent", tfd.Beta(alpha, beta))
numpyro.sample("obs", tfd.Bernoulli(p_latent), obs=data)
return p_latent
true_probs = jnp.array([0.9, 0.1])
data = tfd.Bernoulli(true_probs).sample(
seed=random.PRNGKey(1), sample_shape=(1000, 2)
)
kernel = NUTS(model=model, trajectory_length=0.1)
mcmc = MCMC(kernel, num_warmup=num_warmup, num_samples=num_samples)
mcmc.run(random.PRNGKey(2), data)
mcmc.print_summary()
samples = mcmc.get_samples()
assert_allclose(jnp.mean(samples["p_latent"], 0), true_probs, atol=0.05)
def make_kernel_fn(target_log_prob_fn):
import tensorflow_probability.substrates.jax as tfp
return tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
step_size=0.5 / jnp.sqrt(0.5 ** jnp.arange(4)[..., None]),
num_leapfrog_steps=5,
)
@pytest.mark.parametrize(
"kernel, kwargs",
[
("HamiltonianMonteCarlo", dict(step_size=0.05, num_leapfrog_steps=10)),
("NoUTurnSampler", dict(step_size=0.05)),
("RandomWalkMetropolis", dict()),
("SliceSampler", dict(step_size=1.0, max_doublings=5)),
(
"UncalibratedHamiltonianMonteCarlo",
dict(step_size=0.05, num_leapfrog_steps=10),
),
("UncalibratedRandomWalk", dict()),
],
)
@pytest.mark.filterwarnings("ignore:can't resolve package")
# TODO: remove after https://github.com/tensorflow/probability/issues/1072 is resolved
@pytest.mark.filterwarnings("ignore:Explicitly requested dtype")
def test_mcmc_kernels(kernel, kwargs):
from numpyro.contrib.tfp import mcmc
kernel_class = getattr(mcmc, kernel)
true_coef = 0.9
num_warmup, num_samples = 1000, 1000
def model(data):
alpha = numpyro.sample("alpha", dist.Uniform(0, 1))
with numpyro.handlers.reparam(config={"loc": TransformReparam()}):
loc = numpyro.sample(
"loc",
dist.TransformedDistribution(
dist.Uniform(0, 1), AffineTransform(0, alpha)
),
)
numpyro.sample("obs", dist.Normal(loc, 0.1), obs=data)
data = true_coef + random.normal(random.PRNGKey(0), (1000,))
tfp_kernel = kernel_class(model=model, **kwargs)
mcmc = MCMC(tfp_kernel, num_warmup=num_warmup, num_samples=num_samples)
mcmc.warmup(random.PRNGKey(2), data, collect_warmup=True)
warmup_samples = mcmc.get_samples()
mcmc.run(random.PRNGKey(3), data)
samples = mcmc.get_samples()
assert len(warmup_samples["loc"]) == num_warmup
assert len(samples["loc"]) == num_samples
assert_allclose(jnp.mean(samples["loc"], 0), true_coef, atol=0.05)
@pytest.mark.parametrize(
"kernel, kwargs",
[
("MetropolisAdjustedLangevinAlgorithm", dict(step_size=1.0)),
("RandomWalkMetropolis", dict()),
("SliceSampler", dict(step_size=1.0, max_doublings=5)),
("UncalibratedLangevin", dict(step_size=0.1)),
(
"ReplicaExchangeMC",
dict(
inverse_temperatures=0.5 ** jnp.arange(4), make_kernel_fn=make_kernel_fn
),
),
],
)
@pytest.mark.parametrize("num_chains", [1, 2])
@pytest.mark.skipif(
"XLA_FLAGS" not in os.environ,
reason="without this mark, we have duplicated tests in Travis",
)
@pytest.mark.filterwarnings("ignore:There are not enough devices:UserWarning")
@pytest.mark.filterwarnings("ignore:can't resolve package")
# TODO: remove after https://github.com/tensorflow/probability/issues/1072 is resolved
@pytest.mark.filterwarnings("ignore:Explicitly requested dtype")
def test_unnormalized_normal_chain(kernel, kwargs, num_chains):
from numpyro.contrib.tfp import mcmc
# TODO: remove when this issue is fixed upstream
# https://github.com/tensorflow/probability/pull/1087
if num_chains == 2 and kernel == "ReplicaExchangeMC":
pytest.xfail("ReplicaExchangeMC is not fully compatible with omnistaging yet.")
kernel_class = getattr(mcmc, kernel)
true_mean, true_std = 1.0, 0.5
num_warmup, num_samples = (1000, 8000)
def potential_fn(z):
return 0.5 * ((z - true_mean) / true_std) ** 2
init_params = jnp.array(0.0) if num_chains == 1 else jnp.array([0.0, 2.0])
tfp_kernel = kernel_class(potential_fn=potential_fn, **kwargs)
mcmc = MCMC(
tfp_kernel,
num_warmup=num_warmup,
num_samples=num_samples,
num_chains=num_chains,
progress_bar=False,
)
mcmc.run(random.PRNGKey(0), init_params=init_params)
mcmc.print_summary()
hmc_states = mcmc.get_samples()
assert_allclose(jnp.mean(hmc_states), true_mean, rtol=0.07)
assert_allclose(jnp.std(hmc_states), true_std, rtol=0.07)
# test if sampling from tfp distributions works as expected using
# numpyro sample function: numpyro.sample("name", dist) (bug)
@pytest.mark.filterwarnings("ignore:can't resolve package")
@pytest.mark.filterwarnings("ignore:Importing distributions")
def test_sample_tfp_distributions():
from tensorflow_probability.substrates.jax import distributions as tfd
from numpyro.contrib.tfp.distributions import TFPDistribution
# test no error raised
d = TFPDistribution[tfd.Normal](0, 1)
with numpyro.handlers.seed(rng_seed=random.PRNGKey(0)):
numpyro.sample("normal", d)
# test intermediates are []
value, intermediates = d(sample_intermediates=True, rng_key=random.PRNGKey(0))
assert intermediates == []
# test that sampling from unwrapped tensorflow_probability distributions works as
# expected using numpyro.sample primitive
@pytest.mark.parametrize(
"dist,args",
[
["Bernoulli", (0,)],
["Beta", (1, 1)],
["Binomial", (10, 0)],
["Categorical", ([0, 1, -1],)],
["Cauchy", (0, 1)],
["Dirichlet", ([1, 2, 0.5],)],
["Exponential", (1,)],
["InverseGamma", (1, 1)],
["Normal", (0, 1)],
["OrderedLogistic", ([0, 1], 0.5)],
["Pareto", (1,)],
],
)
def test_sample_unwrapped_tfp_distributions(dist, args):
from tensorflow_probability.substrates.jax import distributions as tfd
# test no error is raised
with numpyro.handlers.seed(rng_seed=random.PRNGKey(0)):
# since we import tfd inside the test, distributions have to be parametrized as
# strings, which is why we use getattr here
numpyro.sample("sample", getattr(tfd, dist)(*args))
# test mixture distributions
def test_sample_unwrapped_mixture_same_family():
from tensorflow_probability.substrates.jax import distributions as tfd
# test no error is raised
with numpyro.handlers.seed(rng_seed=random.PRNGKey(0)):
numpyro.sample(
"sample",
tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(probs=[0.3, 0.7]),
components_distribution=tfd.Normal(
loc=[-1.0, 1], scale=[0.1, 0.5] # One for each component.
),
),
)
# test that MCMC works with unwrapped tensorflow_probability distributions
def test_mcmc_unwrapped_tfp_distributions():
from tensorflow_probability.substrates.jax import distributions as tfd
def model(y):
theta = numpyro.sample("p", tfd.Beta(1, 1))
with numpyro.plate("plate", y.size):
numpyro.sample("y", tfd.Bernoulli(probs=theta), obs=y)
mcmc = MCMC(NUTS(model), num_warmup=1000, num_samples=1000)
mcmc.run(random.PRNGKey(0), jnp.array([0, 0, 1, 1, 1]))
samples = mcmc.get_samples()
assert_allclose(jnp.mean(samples["p"]), 4 / 7, atol=0.05)
|
py | 1a323f2c05c10205d16d4b6eb8f08ab76d394c10 | # ########################################################################
# Copyright 2016-2019 Advanced Micro Devices, Inc.
#
# ########################################################################
#This file contains a number of utilities function which could be independent of
#any specific domain concept
import signal
from subprocess import check_output
import errorHandler
from datetime import datetime
def currentUser():
try:
return check_output("who", shell = True).split()[0];
except:
print 'Unhandled Exception at performanceUtility::currentUser()'
raise
#Details: Generate sorted numbers in radices of 2,3 and 5 upto a given upper limit number
def generate235Radices(maxSize):
sizeList = list()
i = 0
j = 0
k = 0
SUM = int()
sumj = int()
sumk = int()
sumi = 1
while(True):
sumj = 1
j = 0
while(True):
sumk = 1
k = 0
while(True):
SUM = sumi*sumj*sumk
if ( SUM > maxSize ): break
sizeList.append(SUM)
k += 1
sumk *= 2
if (k == 0): break
j += 1
sumj *= 3
if ( j == 0 and k == 0): break
i += 1
sumi *= 5
sizeList.sort()
return sizeList
def timeout(timeout_time, default):
def timeout_function(f):
def f2(args):
def timeout_handler(signum, frame):
raise errorHandler.TimeoutException()
old_handler = signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(timeout_time) # triger alarm in timeout_time seconds
retval = ""
try:
retval = f(args)
except errorHandler.TimeoutException:
raise errorHandler.ApplicationException(__file__, errorHandler.TIME_OUT)
except:
signal.alarm(0)
raise
finally:
#print 'executing finally'
signal.signal(signal.SIGALRM, old_handler)
signal.alarm(0)
return retval
return f2
return timeout_function
def logTxtOutput(fileName, mode, txt):
todayFile = fileName+'-'+datetime.now().strftime('%Y-%b-%d')+'.txt'
with open(todayFile, mode) as f:
f.write('------\n'+txt+'\n')
def log(filename, txt):
with open(filename, 'a') as f:
f.write(datetime.now().ctime()+'# '+txt+'\n')
|
py | 1a323f69120d327e119ad57d0cc714d8a2dfac33 | from netpyne import specs
from netpyne.batch import Batch
def batchTauWeight():
# Create variable of type ordered dictionary (NetPyNE's customized version)
params = specs.ODict()
# fill in with parameters to explore and range of values (key has to coincide with a variable in simConfig)
params['synMechTau2'] = [3.0, 5.0, 7.0]
params['connWeight'] = [0.005, 0.01, 0.15]
params[('analysis', 'plotTraces', 'saveFig')] = [True, False]
# create Batch object with parameters to modify, and specifying files to use
b = Batch(params=params, cfgFile='tut8_cfg.py', netParamsFile='tut8_netParams.py',)
# Set output folder, grid method (all param combinations), and run configuration
b.batchLabel = 'tauWeight'
b.saveFolder = 'tut8_data'
b.method = 'grid'
b.runCfg = {'type': 'mpi_bulletin',
'script': 'tut8_init.py',
'skip': True}
# Run batch simulations
b.run()
# Main code
if __name__ == '__main__':
batchTauWeight() |
py | 1a323f7ca08aa90d463db07ad6382527c91b5584 | import sys
from PyQt5 import QtWidgets
# Press the green button in the gutter to run the script.
from PyQt5.QtGui import QPixmap, QIcon
from PyQt5.QtWidgets import QLabel, QSystemTrayIcon, QMenu
from ui.MainWindow import MainWindow
from PyQt5.QtCore import Qt, QFile, QTextStream
app = None
if __name__ == '__main__':
# define ico
# load the qt application
app = QtWidgets.QApplication(sys.argv)
tray_icon = QSystemTrayIcon(QIcon('media/icon.PNG'), parent=app)
tray_icon.setToolTip('RNApp')
tray_icon.show()
# splash screen
splash = QLabel()
pixmap = QPixmap('media/logo/RN.png')
# pixmap = pixmap.scaled(640, 640)
splash.setPixmap(pixmap)
splash.setWindowFlags(Qt.SplashScreen | Qt.FramelessWindowHint)
splash.show()
# main window init
window = MainWindow(app=app)
window.setWindowIcon(QIcon('media/logo/RN.png'))
# make tray menu
menu = QMenu()
exit_action = menu.addAction('Exit')
exit_action.triggered.connect(window.close)
# stylesheet init
stylesheet = QFile('ui/stylesheet/dark.qss')
stylesheet.open(QFile.ReadOnly | QFile.Text)
stream = QTextStream(stylesheet)
app.setStyleSheet(stream.readAll())
# splash screen destroy
splash.destroy()
window.show()
app.exec_()
print('Resuming Console Interaction.')
|
py | 1a323fd1463906a048a06aefa4d74abd393479e7 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2018-10-28 17:50
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('applications', '0020_application_ambassador'),
]
operations = [
migrations.RemoveField(
model_name='application',
name='under_age',
),
]
|
py | 1a32406a8aad3edf6047531e2077f754ab198a87 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains mention auto-encoder implementation."""
import flax.linen as nn
import jax.numpy as jnp
from language.mentionmemory.encoders import base_encoder
from language.mentionmemory.encoders import encoder_registry
from language.mentionmemory.modules import embedding
from language.mentionmemory.modules import retrieval_update_layers
from language.mentionmemory.modules import transformer
from language.mentionmemory.utils import default_values
from language.mentionmemory.utils import jax_utils as jut
from language.mentionmemory.utils.custom_types import Array, Dtype, InitType # pylint: disable=g-multiple-import
import ml_collections
@encoder_registry.register_encoder('mauto')
class MautoEncoder(base_encoder.BaseEncoder):
"""Mention autoencoder.
More precisely, this model is designed to evaluate information stored by a
mention encoder, as well as varying methods for incorporating information into
a language model. During pre-training the model has access to oracle memory
entries corresponding to a subset of linked mentions in the passage.
Attributes:
vocab_size: size of token vocabulary.
hidden_size: dimensionality of token representations.
intermediate_dim: dimensionality of intermediate representations in MLP.
retrieval_dim: dimensionality of memory values.
retrieval_update_type: means by which retrieved memory vectors are
incorporated into input representation, such as simple addition or
concatenation + MLP.
retrieval_update_config: hyperparameters for the update layer, beyond input
dimension and datatype.
num_attention_heads: number of attention heads in Transformer layers.
num_initial_layers: number of layers in first Transformer block.
num_final_layers: number of layers in second Transformer block.
dtype: data type of encoding (bfloat16 or float32). Parameters and certain
parts of computation (i.e. loss) are always in float32.
max_positions: number of positions (for positional embeddings).
max_length: maximal number of tokens for pre-training.
dropout_rate: dropout rate in Transformer layers.
no_retrieval: if true, do not incorporate retrieved mentions into model.
num_segments: number of possible token types (for token type embeddings).
kernel_init: initialization function for model kernels.
bias_init: initialization function for model biases.
layer_norm_epsilon: layer norm constant for numerical stability.
"""
vocab_size: int
hidden_size: int
intermediate_dim: int
retrieval_dim: int
retrieval_update_type: str
retrieval_update_config: ml_collections.FrozenConfigDict
num_attention_heads: int
num_initial_layers: int
num_final_layers: int
dtype: Dtype
max_positions: int
max_length: int
dropout_rate: float
num_segments: int = 2
no_retrieval: bool = False
kernel_init: InitType = default_values.kernel_init
bias_init: InitType = default_values.bias_init
layer_norm_epsilon: float = default_values.layer_norm_epsilon
def setup(self):
self.embedder = embedding.DictEmbed({
'token_ids':
embedding.Embed(
num_embeddings=self.vocab_size,
embedding_dim=self.hidden_size,
dtype=self.dtype,
embedding_init=self.kernel_init,
),
'position_ids':
embedding.Embed(
num_embeddings=self.max_positions,
embedding_dim=self.hidden_size,
dtype=self.dtype,
embedding_init=self.kernel_init,
),
'segment_ids':
embedding.Embed(
num_embeddings=self.num_segments,
embedding_dim=self.hidden_size,
dtype=self.dtype,
embedding_init=self.kernel_init,
)
})
self.embeddings_layer_norm = nn.LayerNorm(epsilon=self.layer_norm_epsilon)
self.embeddings_dropout = nn.Dropout(rate=self.dropout_rate)
self.initial_encoder = transformer.TransformerBlock(
num_layers=self.num_initial_layers,
model_dim=self.hidden_size,
intermediate_dim=self.intermediate_dim,
num_heads=self.num_attention_heads,
dropout_rate=self.dropout_rate,
dtype=self.dtype,
kernel_init=self.kernel_init,
bias_init=self.bias_init,
layer_norm_epsilon=self.layer_norm_epsilon,
)
self.retrieval_update_layer = retrieval_update_layers.RETRIEVAL_UPDATE_REGISTRY[
self.retrieval_update_type](
input_dim=self.hidden_size,
dtype=self.dtype,
layer_norm_epsilon=self.layer_norm_epsilon,
**self.retrieval_update_config,
)
self.final_encoder = transformer.TransformerBlock(
num_layers=self.num_final_layers,
model_dim=self.hidden_size,
intermediate_dim=self.intermediate_dim,
num_heads=self.num_attention_heads,
dropout_rate=self.dropout_rate,
dtype=self.dtype,
kernel_init=self.kernel_init,
bias_init=self.bias_init,
layer_norm_epsilon=self.layer_norm_epsilon,
)
self.mention_projector = nn.Dense(
features=self.retrieval_dim,
dtype=self.dtype,
)
def forward(self, batch, deterministic):
loss_helpers = {}
logging_helpers = {}
embedded_input = self.embedder({
'token_ids': batch['text_ids'],
'position_ids': batch['position_ids'],
'segment_ids': batch['segment_ids']
})
embedded_input = self.embeddings_layer_norm(embedded_input)
embedded_input = self.embeddings_dropout(embedded_input, deterministic)
loss_helpers['word_embeddings'] = self.embedder.variables['params'][
'embedders_token_ids']['embedding']
attention_mask = batch['text_mask']
encoding = self.initial_encoder(
encoding=embedded_input,
attention_mask=attention_mask,
deterministic=deterministic)
if not self.no_retrieval:
encoding = self.retrieval_update_layer(
encoded_input=encoding,
retrieval_values=jnp.expand_dims(
# [max_retrieval_indices, retrieval_dim]
batch['retrieval_mention_values'],
-2),
retrieval_scores=jnp.expand_dims(
# [max_retrieval_indices]
batch['retrieval_mention_scores'],
-1),
mention_batch_positions=batch['retrieval_mention_batch_positions'],
mention_start_positions=batch['retrieval_mention_start_positions'],
mention_end_positions=batch['retrieval_mention_end_positions'],
mention_mask=batch['retrieval_mention_mask'],
deterministic=deterministic)
encoding = self.final_encoder(
encoding=encoding,
attention_mask=attention_mask,
deterministic=deterministic)
mention_target_batch_positions = jut.matmul_slice(
batch['mention_batch_positions'], batch['mention_target_indices'])
mention_target_start_positions = jut.matmul_slice(
batch['mention_start_positions'], batch['mention_target_indices'])
mention_target_end_positions = jut.matmul_slice(
batch['mention_end_positions'], batch['mention_target_indices'])
mention_start_final_encodings = jut.matmul_2d_index_select(
encoding,
(mention_target_batch_positions, mention_target_start_positions))
mention_end_final_encodings = jut.matmul_2d_index_select(
encoding,
(mention_target_batch_positions, mention_target_end_positions))
loss_helpers['target_mention_encodings'] = self.mention_projector(
jnp.concatenate(
(mention_start_final_encodings, mention_end_final_encodings),
axis=-1))
return encoding, loss_helpers, logging_helpers
|
py | 1a3240d464cbdfaf61462c5b0e9530999e126f41 | import os
import numpy as np
import pandas as pd
from keras.preprocessing.text import text_to_word_sequence, Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, SimpleRNN, LSTM, Dropout, Embedding
from keras.optimizers import Adam, SGD
from keras.metrics import categorical_accuracy
from itertools import chain
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras import layers
import matplotlib.pyplot as plt
'''
Trains a basic RNN and LSTM on the first five tasks of Facebook bABI.
Inspiration for this code is taken from the Keras team babi_rnn file.
Specifically: parse_stories and data_to_vector are taken from babi_rnn, credits
go to the Keras team
Original comes from "Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks"
http://arxiv.org/abs/1502.05698
Task Number | FB LSTM Baseline | Keras QA
--- | --- | ---
QA1 - Single Supporting Fact | 50 | 100.0
QA2 - Two Supporting Facts | 20 | 50.0
QA3 - Three Supporting Facts | 20 | 20.5
QA4 - Two Arg. Relations | 61 | 62.9
QA5 - Three Arg. Relations | 70 | 61.9
QA6 - yes/No Questions | 48 | 50.7
QA7 - Counting | 49 | 78.9
QA8 - Lists/Sets | 45 | 77.2
QA9 - Simple Negation | 64 | 64.0
QA10 - Indefinite Knowledge | 44 | 47.7
QA11 - Basic Coreference | 72 | 74.9
QA12 - Conjunction | 74 | 76.4
QA13 - Compound Coreference | 94 | 94.4
QA14 - Time Reasoning | 27 | 34.8
QA15 - Basic Deduction | 21 | 32.4
QA16 - Basic Induction | 23 | 50.6
QA17 - Positional Reasoning | 51 | 49.1
QA18 - Size Reasoning | 52 | 90.8
QA19 - Path Finding | 8 | 9.0
QA20 - Agent's Motivations | 91 | 90.7
bAbI Project Resources:
https://research.facebook.com/researchers/1543934539189348:
'''
def setup_local_files():
'''get files from local machine and return all training / testing text files in sorted order'''
path = 'tasks'
files = os.listdir(path)
all_training_files = []
all_testing_files = []
for fn in files:
if 'train' in fn:
all_training_files.append(fn)
if 'test' in fn:
all_testing_files.append(fn)
all_training_files = np.asarray(sorted(all_training_files))
all_testing_files = np.asarray(sorted(all_testing_files))
print(all_training_files)
print(all_testing_files)
return (all_training_files,all_testing_files)
# Setup local files
all_training_files,all_testing_files = setup_local_files()
def setup_dictionaries(training_files,testing_files):
'''take in all training / testing files and return as dictionaries
corresponding to tasks'''
training_tasks_dict = dict((k+1,v) for k,v in enumerate(training_files))
testing_tasks_dict = dict((k+1,v) for k,v in enumerate(testing_files))
return (training_tasks_dict,testing_tasks_dict)
# Dictionary setup to grab tasks
training_tasks_dict,testing_tasks_dict = setup_dictionaries(all_training_files,all_testing_files)
def txt_to_raw(task_file):
'''
take in a specific task file and return a raw corpus
'''
with open(f'{os.getcwd()}/tasks/{task_file}', 'r') as file:
raw_corpus = file.readlines()
return raw_corpus
def parse_story(story):
'''
parse the passed in raw text corpus. This is modeled from the babi_rnn source from the Keras team.
GitHub URL: https://github.com/keras-team/keras/blob/master/examples/babi_rnn.py
'''
related_content = []
data = []
for line in story:
line_id,line = line.split(' ',1)
line_id = int(line_id)
if line_id == 1:
related_content = []
if '\t' in line:
question,answer,supporting_facts = line.split('\t')
question = text_to_word_sequence(question,filters='?\n')
answer = [answer]
substory = [ss for ss in related_content if ss]
data.append((substory,question,answer))
related_content.append('')
else:
line = text_to_word_sequence(line,filters='.\n') + ['.']
for word in line:
related_content.append(word)
return data
def get_unique_vocab(train_file,test_file):
'''opens up files and grabs unique vocabulary words from the text'''
with open(f'{os.getcwd()}/tasks/{train_file}','r') as train_file, open(f'{os.getcwd()}/tasks/{test_file}','r') as test_file:
raw_corpus_train = train_file.read()
raw_corpus_test = test_file.read()
train_tokenized = text_to_word_sequence(raw_corpus_train, filters='\n\t?123456789101112131415.')
test_tokenized = text_to_word_sequence(raw_corpus_test, filters='\n\t?123456789101112131415.')
return set(train_tokenized + test_tokenized + ['.'])
def data_to_vector(data,word_dictionary,vocab_size,sentence_limit,story_maxlen,question_maxlen):
'''
Stories and questions are represented as word embeddings and the answers are one-hot encoded.
Takes the stories, finds unique words, and then vectorizing them into pure numeric form.
Each word has a numeric index which it gets replaced by!
This is modeled from the babi_rnn source from the Keras team.
GitHub URL: https://github.com/keras-team/keras/blob/master/examples/babi_rnn.py
'''
STORY_VECTOR,QUESTION_VECTOR,ANSWER_VECTOR = [],[],[]
for story,question,answer in data:
# Encode the story representations
STORY_VECTOR.append([word_dictionary[word] for word in story])
# Encode the question representations
QUESTION_VECTOR.append([word_dictionary[word] for word in question])
ANSWER_VECTOR.append(word_dictionary[answer[0].lower()])
return pad_sequences(STORY_VECTOR,maxlen=story_maxlen),pad_sequences(QUESTION_VECTOR,maxlen=question_maxlen),np.array(ANSWER_VECTOR)
def zip_sq(story_training_input,question_training_input,story_testing_input,question_testing_input):
'''take story and question vectors and return a single
concatenated vector for both training and testing alongside combined max length'''
zipped_sq_training = list(zip(story_training_input,question_training_input))
zipped_sq_testing = list(zip(story_testing_input,question_testing_input))
sq_training_combined = []
sq_testing_combined = []
for sq in zipped_sq_training:
sq_training_combined.append(list(chain(sq[0],sq[1])))
for sq in zipped_sq_testing:
sq_testing_combined.append(list(chain(sq[0],sq[1])))
combined_maxlen = max(map(len,[sq for sq in sq_training_combined]))
return (sq_training_combined,sq_testing_combined,combined_maxlen)
def build_rnn(combined_maxlen,vocab_maxlen,embedding_size,dropout_rate,learning_rate,task_num):
'''build and return the model to be used'''
print(f'Building, training and evaluating RNN for {task_num}\n\n')
rnn_model = Sequential()
rnn_model.add(Embedding(input_shape=combined_maxlen,input_dim=vocab_maxlen,output_dim=embedding_size))
rnn_model.add(SimpleRNN(50,return_sequences=True))
rnn_model.add(SimpleRNN(50))
rnn_model.add(Dropout(dropout_rate))
rnn_model.add(Dense(vocab_maxlen,activation='softmax'))
rnn_model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam(lr=learning_rate), metrics=['accuracy'])
print('Build completed, returning RNN Model...')
return rnn_model
def run_rnn(rnn_model,x,y,testing_x,testing_y,epochs,task_num):
'''build and run the rnn model and return the history'''
print(f'Training and evaluating RNN for {task_num}\n\n')
train_history = rnn_model.fit(x=np.array(x),y=np.array(y),
epochs=epochs,validation_split=0.05)
loss, accuracy = rnn_model.evaluate(x=np.array(testing_x),
y=np.array(testing_y),
batch_size=32)
print(f'\n\nRNN Evaluation loss: {loss}, Evaluation accuracy: {accuracy} for task {task_num}\n\n')
return train_history, loss, accuracy
def build_lstm(combined_maxlen,vocab_maxlen,embedding_size,dropout_rate,learning_rate,task_num):
'''build and return the model to be used'''
lstm_model = Sequential()
lstm_model.add(Embedding(input_shape=combined_maxlen,input_dim=vocab_maxlen,output_dim=embedding_size))
lstm_model.add(LSTM(50,return_sequences=True))
lstm_model.add(LSTM(50))
lstm_model.add(Dropout(dropout_rate))
lstm_model.add(Dense(vocab_maxlen, activation='softmax'))
lstm_model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam(lr=learning_rate), metrics=['accuracy'])
print('Build completed, returning LSTM Model...')
return lstm_model
def run_lstm(lstm_model,x,y,testing_x,testing_y,epochs,task_num):
'''build and run the lstm model'''
print(f'Training and evaluating LSTM for {task_num}\n\n')
train_history = lstm_model.fit(np.array(x),np.array(y),
epochs=epochs,validation_split=0.05)
loss, accuracy = lstm_model.evaluate(x=np.array(testing_x),
y=np.array(testing_y),
batch_size=32)
print(f'\n\nLSTM Evaluation loss: {loss}, Evaluation accuracy: {accuracy} for task {task_num}\n\n')
return train_history, loss, accuracy
def predict_results(model,story_question_input,answer_testing_input):
'''predict and return results of prediction'''
def predictions_helper(expected,actuals):
'''given the expected answers and the actual answers compare and contrast '''
correct = 0
for i in range(len(expected)):
if expected[i] == actuals[i]:
correct += 1
print(f'\n\n----\nOut of 1000 possible answers the model correctly predicted: {correct}')
predictions = model.predict([np.array(story_question_input)])
idxs_of_preds = []
for preds in predictions:
for idx,ps in enumerate(preds):
if ps == max(preds):
idxs_of_preds.append(idx)
print(f'List of all the predictions made by our Model: \n\n{idxs_of_preds}')
print(f'\n\n---\n\n List of the expected values given by our testing: \n\n{answer_testing_input}')
predictions_helper(answer_testing_input,idxs_of_preds)
def plot_loss(training_history, model_type, task_num):
'''plot training vs validation loss'''
plt.plot(training_history.history['loss'], label='Training Loss')
plt.plot(training_history.history['val_loss'], label='Validation Loss')
plt.legend()
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title(f'{model_type} Training loss vs Evaluation loss for task {task_num}')
def plot_acc(training_history, model_type, task_num):
'''plot training vs validation accuracy'''
plt.plot(training_history.history['acc'], label='Training Accuracy')
plt.plot(training_history.history['val_acc'], label='Validation Accuracy')
plt.legend()
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.title(f'{model_type} Training accuracy vs Evaluation accuracy for task {task_num}')
def plot_all_training_losses_rnn(rnn_hist):
'''plot rnn training losses'''
rnn_loss_epoch_fig = plt.figure().add_subplot(1,1,1)
tasks = ['Single Supporting Fact', 'Two Supporting Facts', 'Three Supporting Facts',
'Two Arg. Relations', 'Three Arg. Relations']
for i in range(5):
rnn_loss_epoch_fig.plot(rnn_hist[i].history['loss'], label=f'Task {i+1} - {tasks[i]}')
rnn_loss_epoch_fig.legend()
rnn_loss_epoch_fig.legend(bbox_to_anchor=(1, 1))
rnn_loss_epoch_fig.set_xlabel('Epoch')
rnn_loss_epoch_fig.set_ylabel('Loss')
rnn_loss_epoch_fig.set_title(f'Loss rate for RNN for tasks 1 - 5 with Adam')
def plot_all_training_acc_rnn(rnn_hist):
rnn_acc_fig = plt.figure().add_subplot(1,1,1)
tasks = ['Single Supporting Fact', 'Two Supporting Facts', 'Three Supporting Facts',
'Two Arg. Relations', 'Three Arg. Relations']
for i in range(5):
rnn_acc_fig.plot(rnn_hist[i].history['acc'], label=f'Task {i+1} - {tasks[i]}')
rnn_acc_fig.legend(bbox_to_anchor=(1, 1))
rnn_acc_fig.set_xlabel('Epoch')
rnn_acc_fig.set_ylabel('Accuracy')
rnn_acc_fig.set_title('Accuracy for RNN for tasks 1 - 5')
def plot_all_training_losses_lstm(lstm_hist):
'''plot all lstm training losses'''
lstm_loss_epoch_fig = plt.figure().add_subplot(1,1,1)
tasks = ['Single Supporting Fact', 'Two Supporting Facts', 'Three Supporting Facts',
'Two Arg. Relations', 'Three Arg. Relations']
for i in range(5):
lstm_loss_epoch_fig.plot(lstm_hist[i].history['loss'], label=f'Task {i+1} - {tasks[i]}')
lstm_loss_epoch_fig.legend(bbox_to_anchor=(1, 1))
lstm_loss_epoch_fig.set_xlabel('Epoch')
lstm_loss_epoch_fig.set_ylabel('Loss')
lstm_loss_epoch_fig.set_title('Loss rate for LSTM for tasks 1 - 5 with Adam')
def plot_all_training_acc_lstm(lstm_hist):
lstm_acc_fig = plt.figure().add_subplot(1,1,1)
tasks = ['Single Supporting Fact', 'Two Supporting Facts', 'Three Supporting Facts',
'Two Arg. Relations', 'Three Arg. Relations']
for i in range(5):
lstm_acc_fig.plot(lstm_hist[i].history['acc'], label=f'Task {i+1} - {tasks[i]}')
lstm_acc_fig.legend(bbox_to_anchor=(1, 1))
lstm_acc_fig.set_xlabel('Epoch')
lstm_acc_fig.set_ylabel('Accuracy')
lstm_acc_fig.set_title('Accuracy for LSTM for tasks 1 - 5')
def run_all(embedding_size,dropout_rate,rnn_learning_rate,lstm_learning_rate,rnn_epochs,lstm_epochs):
'''run all tasks and return history along with evaluations'''
all_rnn_history = []
all_lstm_history = []
all_rnn_eval_loss = []
all_lstm_eval_loss = []
all_rnn_eval_acc = []
all_lstm_eval_acc = []
print('Running all tasks')
print(f'Passed in parameters are the following EMBEDDING SIZE: {embedding_size}, DROPOUT RATE: {dropout_rate}',\
f'LEARNING RATE FOR RNN: {rnn_learning_rate}, LEARNING RATE FOR LSTM: {lstm_learning_rate},\
, RNN EPOCHS: {rnn_epochs}, LSTM EPOCHS: {lstm_epochs}\n\n')
print('Building models...')
for task_number in range(1,6):
print(f'Running RNN and LSTM for Task {task_number}\n\n')
# Text to raw
task_training_corpus = txt_to_raw(training_tasks_dict[task_number])
task_testing_corpus = txt_to_raw(training_tasks_dict[task_number])
# Set up parsed stories
training_data = parse_story(task_training_corpus)
testing_data = parse_story(task_testing_corpus)
# Get unique vocabulary
vocab = get_unique_vocab(training_tasks_dict[task_number],testing_tasks_dict[task_number])
# Get max lengths
vocab_maxlen = len(vocab) + 1
story_maxlen = max(map(len,[s for s,_,_ in training_data]))
question_maxlen = max(map(len,[q for _,q,_ in training_data]))
# Set up word indices
word_index = dict((c, i + 1) for i, c in enumerate(vocab))
index_words = [''] + list(vocab)
# Vectorize stories, questions and answers
vocab_maxlen = len(vocab) + 1
sentence_limit = story_maxlen
vocab_size = vocab_maxlen
story_training_input,question_training_input,answer_training_input = data_to_vector(training_data,word_index,
vocab_size,sentence_limit,
story_maxlen,
question_maxlen)
story_testing_input,question_testing_input,answer_testing_input = data_to_vector(testing_data,word_index,
vocab_size,sentence_limit,
story_maxlen,
question_maxlen)
# Zip up story, questions
sq_training_combined,sq_testing_combined,combined_maxlen = zip_sq(story_training_input,question_training_input,
story_testing_input,question_testing_input)
print('Building model, training and evaluating...\n\n')
# Run and plot RNN / LSTM
rnn_model = build_rnn(combined_maxlen=(combined_maxlen,),vocab_maxlen=vocab_maxlen,embedding_size=embedding_size,dropout_rate=dropout_rate,
learning_rate=rnn_learning_rate,task_num=task_number)
lstm_model = build_lstm(combined_maxlen=(combined_maxlen,),vocab_maxlen=vocab_maxlen,embedding_size=embedding_size,dropout_rate=dropout_rate,
learning_rate=lstm_learning_rate,task_num=task_number)
rnn_history, rnn_eval_loss, rnn_eval_acc = run_rnn(rnn_model=rnn_model,x=sq_training_combined,
y=answer_training_input,
testing_x=sq_testing_combined,
testing_y=answer_testing_input,
epochs=rnn_epochs,task_num=task_number)
lstm_history, lstm_eval_loss, lstm_eval_acc = run_lstm(lstm_model=lstm_model,x=sq_training_combined,
y=answer_training_input,testing_x=sq_testing_combined,
testing_y=answer_testing_input,
epochs=lstm_epochs,task_num=task_number)
# Make Predictions
print(f'\n\n RNN Model Predictions for task {task_number}\n')
rnn_predictions = predict_results(rnn_model, sq_testing_combined, answer_testing_input)
print(f'\n\n LSTM Model Predictions for task {task_number}\n')
lstm_predictions = predict_results(lstm_model, sq_testing_combined, answer_testing_input)
all_rnn_history.append(rnn_history)
all_lstm_history.append(lstm_history)
all_rnn_eval_loss.append(rnn_eval_loss)
all_rnn_eval_acc.append(rnn_eval_acc)
all_lstm_eval_loss.append(lstm_eval_loss)
all_lstm_eval_acc.append(lstm_eval_acc)
print(f'End build for task {task_number}')
return (all_rnn_history,all_lstm_history,
all_rnn_eval_loss,all_rnn_eval_acc,
all_lstm_eval_loss,all_lstm_eval_acc)
# All history for the model runs
all_history_evaluations = run_all(embedding_size=50,dropout_rate=0.10,rnn_learning_rate=0.0001,
lstm_learning_rate=0.001,rnn_epochs=20,lstm_epochs=30)
# Separated histories for RNN / LSTM and Evaluation Loss / Accuracy
rnn_hist,lstm_hist,rnn_eval_loss,rnn_eval_acc,lstm_eval_loss,lstm_eval_acc = all_history_evaluations
|
py | 1a32420a13a58c02a0d472c88bfcface4601208f | import pandas as pd
import python_bitbankcc
import datetime
import numpy as np
import os
bitbank_pub = python_bitbankcc.public()
PATH = os.path.dirname(__file__)
def make_data(pair, start_day, end_day=None, return_window=12):
""""
:param pair: 通貨ペア
:param start_day: データ取得開始日(yyyymmdd)
:param end_day: データ取得終了日(yyyymmdd)
:param return_window: returnの計算幅
"""
str_pattern = "%Y%m%d"
col_names = ["open", "high", "low", "close", "vol", "timestamp"]
output_col_names = [
"open",
"high",
"low",
"close",
"vol",
"timestamp",
"VWAP",
"log_return",
"upper_shadow",
"lower_shadow",
]
# 実行日の時刻を00:00:00に調整
today_zero = datetime.datetime.today().strftime(str_pattern)
today_zero = datetime.datetime.strptime(today_zero, str_pattern)
# end_dayがデータ取得範囲外である場合に実行日に更新
if end_day is None:
end_day = today_zero
else:
end_day = datetime.datetime.strptime(end_day, str_pattern)
if end_day >= today_zero:
end_day = today_zero
# while条件用に日時型に変更
target_day = datetime.datetime.strptime(start_day, str_pattern)
# return_windowが日付をまたがないように調整
if return_window > 288:
return_window = 288
while target_day <= end_day:
# 取得対象前日のデータを取得
target_yesterday = target_day - datetime.timedelta(days=1)
target_yesterday_str = target_yesterday.strftime(str_pattern)
pre_candles = bitbank_pub.get_candlestick(pair, "5min", target_yesterday_str)["candlestick"][0]["ohlcv"]
df_pre_candles = pd.DataFrame(np.array(pre_candles, dtype=float), columns=col_names)
# 取得対象日のデータを取得
target_day_str = target_day.strftime(str_pattern)
candles = bitbank_pub.get_candlestick(pair, "5min", target_day_str)["candlestick"][0]["ohlcv"]
df_candles = pd.DataFrame(np.array(candles, dtype=float), columns=col_names)
# timestampを変換
df_output = pd.concat([df_pre_candles, df_candles])
df_output["timestamp"] = df_output["timestamp"] / 1000
df_output["timestamp"] = pd.to_datetime(df_output["timestamp"], unit="s")
# VWAPを計算
df_output["multiple"] = df_output["close"].multiply(df_output["vol"]).rolling(288).sum().values
df_output["vol_sum"] = df_output["vol"].rolling(288).sum().values
df_output["VWAP"] = df_output["multiple"] / df_output["vol_sum"]
# log return
# log(P(t)/P(t-window)) ~ P(t) / P(t-window) - 1
df_output["log_return"] = (df_output["close"] / df_output["close"].shift(periods=return_window)) - 1
# 出力用DataFrameから前日のデータを削除
df_output = df_output[df_output["timestamp"] >= target_day]
# ヒゲを取得
df_output["upper_shadow"] = upper_shadow(df_output)
df_output["lower_shadow"] = lower_shadow(df_output)
# 必要な列のみ抽出
df_output = df_output[output_col_names]
# データを出力
df_output.to_csv(PATH + "/data/" + target_day_str + ".csv")
# 取得対象日を更新
target_day += datetime.timedelta(days=1)
def upper_shadow(df):
"""
上ヒゲを取得
:param df: OHLC
:return: 上ヒゲ
"""
return df["high"] - np.maximum(df["close"], df["open"])
def lower_shadow(df):
"""
下ヒゲを取得
:param df: OHLC
:return: 下ヒゲ
"""
return np.minimum(df["close"], df["open"]) - df["low"]
if __name__ == "__main__":
# 動作テスト用
make_data("bat_jpy", "20220105", "20220106")
|
py | 1a32421d1e19bc49ff6994f8e0ca5419b20cddf2 | # copyright (c) 2019 paddlepaddle authors. all rights reserved.
#
# licensed under the apache license, version 2.0 (the "license");
# you may not use this file except in compliance with the license.
# you may obtain a copy of the license at
#
# http://www.apache.org/licenses/license-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the license is distributed on an "as is" basis,
# without warranties or conditions of any kind, either express or implied.
# see the license for the specific language governing permissions and
# limitations under the license.
"""
Test LightNAS.
"""
import sys
import unittest
import paddle.fluid as fluid
from paddle.fluid.contrib.slim.core import Compressor
sys.path.append("./light_nas")
from light_nas_space import LightNASSpace
class TestLightNAS(unittest.TestCase):
"""
Test LightNAS.
"""
def test_compression(self):
"""
Test LightNAS.
"""
# Update compress.yaml
lines = list()
fid = open('./light_nas/compress.yaml')
for line in fid:
if 'target_latency' in line:
lines.append(' target_latency: 0\n')
else:
lines.append(line)
fid.close()
fid = open('./light_nas/compress.yaml', 'w')
for line in lines:
fid.write(line)
fid.close()
# Begin test
if not fluid.core.is_compiled_with_cuda():
return
space = LightNASSpace()
startup_prog, train_prog, test_prog, train_metrics, test_metrics, train_reader, test_reader = space.create_net(
)
train_cost, train_acc1, train_acc5, global_lr = train_metrics
test_cost, test_acc1, test_acc5 = test_metrics
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(startup_prog)
val_fetch_list = [('acc_top1', test_acc1.name),
('acc_top5', test_acc5.name)]
train_fetch_list = [('loss', train_cost.name)]
com_pass = Compressor(
place,
fluid.global_scope(),
train_prog,
train_reader=train_reader,
train_feed_list=None,
train_fetch_list=train_fetch_list,
eval_program=test_prog,
eval_reader=test_reader,
eval_feed_list=None,
eval_fetch_list=val_fetch_list,
train_optimizer=None,
search_space=space)
com_pass.config('./light_nas/compress.yaml')
eval_graph = com_pass.run()
def test_compression_with_target_latency(self):
"""
Test LightNAS with target_latency.
"""
# Update compress.yaml
lines = list()
fid = open('./light_nas/compress.yaml')
for line in fid:
if 'target_latency' in line:
lines.append(' target_latency: 1\n')
else:
lines.append(line)
fid.close()
fid = open('./light_nas/compress.yaml', 'w')
for line in lines:
fid.write(line)
fid.close()
# Begin test
if not fluid.core.is_compiled_with_cuda():
return
space = LightNASSpace()
startup_prog, train_prog, test_prog, train_metrics, test_metrics, train_reader, test_reader = space.create_net(
)
train_cost, train_acc1, train_acc5, global_lr = train_metrics
test_cost, test_acc1, test_acc5 = test_metrics
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(startup_prog)
val_fetch_list = [('acc_top1', test_acc1.name),
('acc_top5', test_acc5.name)]
train_fetch_list = [('loss', train_cost.name)]
com_pass = Compressor(
place,
fluid.global_scope(),
train_prog,
train_reader=train_reader,
train_feed_list=None,
train_fetch_list=train_fetch_list,
eval_program=test_prog,
eval_reader=test_reader,
eval_feed_list=None,
eval_fetch_list=val_fetch_list,
train_optimizer=None,
search_space=space)
com_pass.config('./light_nas/compress.yaml')
eval_graph = com_pass.run()
if __name__ == '__main__':
unittest.main()
|
py | 1a3242c3725328495c349611abdabe88204739af | from __future__ import division
import collections
from collections import OrderedDict
import copy
from datetime import datetime
import functools
import itertools
import json
import math
import threading
import time
import warnings
try:
from bson import json_util, SON
except ImportError:
json_utils = SON = None
try:
import execjs
except ImportError:
execjs = None
try:
from pymongo import ReturnDocument
except ImportError:
class ReturnDocument(object):
BEFORE = False
AFTER = True
from sentinels import NOTHING
from six import iteritems
from six import iterkeys
from six import itervalues
from six import MAXSIZE
from six.moves import xrange
from six import string_types
from six import text_type
from mongomock.command_cursor import CommandCursor
from mongomock import DuplicateKeyError
from mongomock.filtering import filter_applies
from mongomock.filtering import iter_key_candidates
from mongomock import helpers
from mongomock import InvalidOperation
from mongomock import ObjectId
from mongomock import OperationFailure
from mongomock.results import BulkWriteResult
from mongomock.results import DeleteResult
from mongomock.results import InsertManyResult
from mongomock.results import InsertOneResult
from mongomock.results import UpdateResult
from mongomock.write_concern import WriteConcern
from mongomock import WriteError
lock = threading.RLock()
def validate_is_mapping(option, value):
if not isinstance(value, collections.Mapping):
raise TypeError('%s must be an instance of dict, bson.son.SON, or '
'other type that inherits from '
'collections.Mapping' % (option,))
def validate_is_mutable_mapping(option, value):
if not isinstance(value, collections.MutableMapping):
raise TypeError('%s must be an instance of dict, bson.son.SON, or '
'other type that inherits from '
'collections.MutableMapping' % (option,))
def validate_ok_for_replace(replacement):
validate_is_mapping('replacement', replacement)
if replacement:
first = next(iter(replacement))
if first.startswith('$'):
raise ValueError('replacement can not include $ operators')
def validate_ok_for_update(update):
validate_is_mapping('update', update)
if not update:
raise ValueError('update only works with $ operators')
first = next(iter(update))
if not first.startswith('$'):
raise ValueError('update only works with $ operators')
def validate_write_concern_params(**params):
if params:
WriteConcern(**params)
def get_value_by_dot(doc, key):
"""Get dictionary value using dotted key"""
result = doc
for i in key.split('.'):
result = result[i]
return result
def set_value_by_dot(doc, key, value):
"""Set dictionary value using dotted key"""
result = doc
keys = key.split('.')
for i in keys[:-1]:
if i not in result:
result[i] = {}
result = result[i]
result[keys[-1]] = value
return doc
class BulkWriteOperation(object):
def __init__(self, builder, selector, is_upsert=False):
self.builder = builder
self.selector = selector
self.is_upsert = is_upsert
def upsert(self):
assert not self.is_upsert
return BulkWriteOperation(self.builder, self.selector, is_upsert=True)
def register_remove_op(self, multi):
collection = self.builder.collection
selector = self.selector
def exec_remove():
op_result = collection.remove(selector, multi=multi)
if op_result.get("ok"):
return {'nRemoved': op_result.get('n')}
err = op_result.get("err")
if err:
return {"writeErrors": [err]}
return {}
self.builder.executors.append(exec_remove)
def remove(self):
assert not self.is_upsert
self.register_remove_op(multi=True)
def remove_one(self,):
assert not self.is_upsert
self.register_remove_op(multi=False)
def register_update_op(self, document, multi, **extra_args):
if not extra_args.get("remove"):
validate_ok_for_update(document)
collection = self.builder.collection
selector = self.selector
def exec_update():
result = collection._update(spec=selector, document=document,
multi=multi, upsert=self.is_upsert,
**extra_args)
ret_val = {}
if result.get('upserted'):
ret_val["upserted"] = result.get('upserted')
ret_val["nUpserted"] = result.get('n')
modified = result.get('nModified')
if modified is not None:
ret_val['nModified'] = modified
ret_val['nMatched'] = modified
if result.get('err'):
ret_val['err'] = result.get('err')
return ret_val
self.builder.executors.append(exec_update)
def update(self, document):
self.register_update_op(document, multi=True)
def update_one(self, document):
self.register_update_op(document, multi=False)
def replace_one(self, document):
self.register_update_op(document, multi=False, remove=True)
class BulkOperationBuilder(object):
def __init__(self, collection, ordered=False):
self.collection = collection
self.ordered = ordered
self.results = {}
self.executors = []
self.done = False
self._insert_returns_nModified = True
self._update_returns_nModified = True
def find(self, selector):
return BulkWriteOperation(self, selector)
def insert(self, doc):
def exec_insert():
self.collection.insert(doc)
return {'nInserted': 1}
self.executors.append(exec_insert)
def __aggregate_operation_result(self, total_result, key, value):
agg_val = total_result.get(key)
assert agg_val is not None, "Unknow operation result %s=%s" \
" (unrecognized key)" % (key, value)
if isinstance(agg_val, int):
total_result[key] += value
elif isinstance(agg_val, list):
if key == "upserted":
new_element = {"index": len(agg_val), "_id": value}
agg_val.append(new_element)
else:
agg_val.append(value)
else:
assert False, "Fixme: missed aggreation rule for type: %s for" \
" key {%s=%s}" % (type(agg_val), key, agg_val)
def _set_nModified_policy(self, insert, update):
self._insert_returns_nModified = insert
self._update_returns_nModified = update
def execute(self, write_concern=None):
if not self.executors:
raise InvalidOperation("Bulk operation empty!")
if self.done:
raise InvalidOperation("Bulk operation already executed!")
self.done = True
result = {'nModified': 0, 'nUpserted': 0, 'nMatched': 0,
'writeErrors': [], 'upserted': [], 'writeConcernErrors': [],
'nRemoved': 0, 'nInserted': 0}
has_update = False
has_insert = False
broken_nModified_info = False
for execute_func in self.executors:
exec_name = execute_func.__name__
op_result = execute_func()
for (key, value) in op_result.items():
self.__aggregate_operation_result(result, key, value)
if exec_name == "exec_update":
has_update = True
if "nModified" not in op_result:
broken_nModified_info = True
has_insert |= exec_name == "exec_insert"
if broken_nModified_info:
result.pop('nModified')
elif has_insert and self._insert_returns_nModified:
pass
elif has_update and self._update_returns_nModified:
pass
elif self._update_returns_nModified and self._insert_returns_nModified:
pass
else:
result.pop('nModified')
return result
def add_insert(self, doc):
self.insert(doc)
def add_update(self, selector, doc, multi, upsert):
write_operation = BulkWriteOperation(self, selector, is_upsert=upsert)
write_operation.register_update_op(doc, multi)
def add_replace(self, selector, doc, upsert):
write_operation = BulkWriteOperation(self, selector, is_upsert=upsert)
write_operation.replace_one(doc)
def add_delete(self, selector, just_one):
write_operation = BulkWriteOperation(self, selector, is_upsert=False)
write_operation.register_remove_op(not just_one)
class Collection(object):
def __init__(self, db, name):
self.name = name
self.full_name = "{0}.{1}".format(db.name, name)
self.database = db
self._documents = OrderedDict()
self._uniques = []
def __repr__(self):
return "Collection({0}, '{1}')".format(self.database, self.name)
def __getitem__(self, name):
return self.database[self.name + '.' + name]
def __getattr__(self, name):
return self.__getitem__(name)
def initialize_unordered_bulk_op(self):
return BulkOperationBuilder(self, ordered=False)
def initialize_ordered_bulk_op(self):
return BulkOperationBuilder(self, ordered=True)
def insert(self, data, manipulate=True, check_keys=True,
continue_on_error=False, **kwargs):
warnings.warn("insert is deprecated. Use insert_one or insert_many "
"instead.", DeprecationWarning, stacklevel=2)
validate_write_concern_params(**kwargs)
return self._insert(data)
def insert_one(self, document):
validate_is_mutable_mapping('document', document)
return InsertOneResult(self._insert(document), acknowledged=True)
def insert_many(self, documents, ordered=True):
if not isinstance(documents, collections.Iterable) or not documents:
raise TypeError('documents must be a non-empty list')
for document in documents:
validate_is_mutable_mapping('document', document)
return InsertManyResult(self._insert(documents), acknowledged=True)
def _insert(self, data):
if isinstance(data, list):
return [self._insert(item) for item in data]
if not all(isinstance(k, string_types) for k in data):
raise ValueError("Document keys must be strings")
if '_id' not in data:
data['_id'] = ObjectId()
object_id = data['_id']
if isinstance(object_id, dict):
object_id = helpers.hashdict(object_id)
if object_id in self._documents:
raise DuplicateKeyError("Duplicate Key Error", 11000)
for unique in self._uniques:
find_kwargs = {}
for key, direction in unique:
find_kwargs[key] = data.get(key)
answer = self.find(find_kwargs)
if answer.count() > 0:
raise DuplicateKeyError("Duplicate Key Error", 11000)
with lock:
self._documents[object_id] = self._internalize_dict(data)
return data['_id']
def _internalize_dict(self, d):
return {k: copy.deepcopy(v) for k, v in iteritems(d)}
def _has_key(self, doc, key):
key_parts = key.split('.')
sub_doc = doc
for part in key_parts:
if part not in sub_doc:
return False
sub_doc = sub_doc[part]
return True
def _remove_key(self, doc, key):
key_parts = key.split('.')
sub_doc = doc
for part in key_parts[:-1]:
sub_doc = sub_doc[part]
del sub_doc[key_parts[-1]]
def update_one(self, filter, update, upsert=False):
validate_ok_for_update(update)
return UpdateResult(self._update(filter, update, upsert=upsert),
acknowledged=True)
def update_many(self, filter, update, upsert=False):
validate_ok_for_update(update)
return UpdateResult(self._update(filter, update, upsert=upsert,
multi=True),
acknowledged=True)
def replace_one(self, filter, replacement, upsert=False):
validate_ok_for_replace(replacement)
return UpdateResult(self._update(filter, replacement, upsert=upsert),
acknowledged=True)
def update(self, spec, document, upsert=False, manipulate=False,
multi=False, check_keys=False, **kwargs):
warnings.warn("update is deprecated. Use replace_one, update_one or "
"update_many instead.", DeprecationWarning, stacklevel=2)
return self._update(spec, document, upsert, manipulate, multi,
check_keys, **kwargs)
def _update(self, spec, document, upsert=False, manipulate=False,
multi=False, check_keys=False, **kwargs):
validate_is_mapping('spec', spec)
validate_is_mapping('document', document)
updated_existing = False
upserted_id = None
num_updated = 0
for existing_document in itertools.chain(self._iter_documents(spec), [None]):
# we need was_insert for the setOnInsert update operation
was_insert = False
# the sentinel document means we should do an upsert
if existing_document is None:
if not upsert or num_updated:
continue
_id = document.get('_id')
to_insert = dict(spec, _id=_id) if _id else spec
to_insert = self._expand_dots(to_insert)
upserted_id = self._insert(self._discard_operators(to_insert))
existing_document = self._documents[upserted_id]
was_insert = True
else:
updated_existing = True
num_updated += 1
first = True
subdocument = None
for k, v in iteritems(document):
if k in _updaters.keys():
updater = _updaters[k]
subdocument = self._update_document_fields_with_positional_awareness(
existing_document, v, spec, updater, subdocument)
elif k == '$setOnInsert':
if not was_insert:
continue
subdocument = self._update_document_fields_with_positional_awareness(
existing_document, v, spec, _set_updater, subdocument)
elif k == '$currentDate':
for value in itervalues(v):
if value == {'$type': 'timestamp'}:
raise NotImplementedError('timestamp is not supported so far')
subdocument = self._update_document_fields_with_positional_awareness(
existing_document, v, spec, _current_date_updater, subdocument)
elif k == '$addToSet':
for field, value in iteritems(v):
nested_field_list = field.rsplit('.')
if len(nested_field_list) == 1:
if field not in existing_document:
existing_document[field] = []
# document should be a list append to it
if isinstance(value, dict):
if '$each' in value:
# append the list to the field
existing_document[field] += [
obj for obj in list(value['$each'])
if obj not in existing_document[field]]
continue
if value not in existing_document[field]:
existing_document[field].append(value)
continue
# push to array in a nested attribute
else:
# create nested attributes if they do not exist
subdocument = existing_document
for field in nested_field_list[:-1]:
if field not in subdocument:
subdocument[field] = {}
subdocument = subdocument[field]
# we're pushing a list
push_results = []
if nested_field_list[-1] in subdocument:
# if the list exists, then use that list
push_results = subdocument[
nested_field_list[-1]]
if isinstance(value, dict) and '$each' in value:
push_results += [
obj for obj in list(value['$each'])
if obj not in push_results]
elif value not in push_results:
push_results.append(value)
subdocument[nested_field_list[-1]] = push_results
elif k == '$pull':
for field, value in iteritems(v):
nested_field_list = field.rsplit('.')
# nested fields includes a positional element
# need to find that element
if '$' in nested_field_list:
if not subdocument:
subdocument = self._get_subdocument(
existing_document, spec, nested_field_list)
# value should be a dictionary since we're pulling
pull_results = []
# and the last subdoc should be an array
for obj in subdocument[nested_field_list[-1]]:
if isinstance(obj, dict):
for pull_key, pull_value in iteritems(value):
if obj[pull_key] != pull_value:
pull_results.append(obj)
continue
if obj != value:
pull_results.append(obj)
# cannot write to doc directly as it doesn't save to
# existing_document
subdocument[nested_field_list[-1]] = pull_results
else:
arr = existing_document
for field in nested_field_list:
if field not in arr:
break
arr = arr[field]
if not isinstance(arr, list):
continue
if isinstance(value, dict):
for idx, obj in enumerate(arr):
if filter_applies(value, obj):
del arr[idx]
else:
for idx, obj in enumerate(arr):
if value == obj:
del arr[idx]
elif k == '$pullAll':
for field, value in iteritems(v):
nested_field_list = field.rsplit('.')
if len(nested_field_list) == 1:
if field in existing_document:
arr = existing_document[field]
existing_document[field] = [
obj for obj in arr if obj not in value]
continue
else:
subdocument = existing_document
for nested_field in nested_field_list[:-1]:
if nested_field not in subdocument:
break
subdocument = subdocument[nested_field]
if nested_field_list[-1] in subdocument:
arr = subdocument[nested_field_list[-1]]
subdocument[nested_field_list[-1]] = [
obj for obj in arr if obj not in value]
elif k == '$push':
for field, value in iteritems(v):
nested_field_list = field.rsplit('.')
if len(nested_field_list) == 1:
if field not in existing_document:
existing_document[field] = []
# document should be a list
# append to it
if isinstance(value, dict):
if '$each' in value:
# append the list to the field
existing_document[field] += list(value['$each'])
continue
existing_document[field].append(value)
continue
# nested fields includes a positional element
# need to find that element
elif '$' in nested_field_list:
if not subdocument:
subdocument = self._get_subdocument(
existing_document, spec, nested_field_list)
# we're pushing a list
push_results = []
if nested_field_list[-1] in subdocument:
# if the list exists, then use that list
push_results = subdocument[nested_field_list[-1]]
if isinstance(value, dict):
# check to see if we have the format
# { '$each': [] }
if '$each' in value:
push_results += list(value['$each'])
else:
push_results.append(value)
else:
push_results.append(value)
# cannot write to doc directly as it doesn't save to
# existing_document
subdocument[nested_field_list[-1]] = push_results
# push to array in a nested attribute
else:
# create nested attributes if they do not exist
subdocument = existing_document
for field in nested_field_list[:-1]:
if field not in subdocument:
subdocument[field] = {}
subdocument = subdocument[field]
# we're pushing a list
push_results = []
if nested_field_list[-1] in subdocument:
# if the list exists, then use that list
push_results = subdocument[nested_field_list[-1]]
if isinstance(value, dict) and '$each' in value:
push_results += list(value['$each'])
else:
push_results.append(value)
subdocument[nested_field_list[-1]] = push_results
else:
if first:
# replace entire document
for key in document.keys():
if key.startswith('$'):
# can't mix modifiers with non-modifiers in
# update
raise ValueError('field names cannot start with $ [{}]'.format(k))
_id = spec.get('_id', existing_document.get('_id'))
existing_document.clear()
if _id:
existing_document['_id'] = _id
existing_document.update(self._internalize_dict(document))
if existing_document['_id'] != _id:
raise OperationFailure(
"The _id field cannot be changed from {0} to {1}"
.format(existing_document['_id'], _id))
break
else:
# can't mix modifiers with non-modifiers in update
raise ValueError(
'Invalid modifier specified: {}'.format(k))
first = False
# if empty document comes
if len(document) == 0:
_id = spec.get('_id', existing_document.get('_id'))
existing_document.clear()
if _id:
existing_document['_id'] = _id
if not multi:
break
return {
text_type("connectionId"): self.database.client._id,
text_type("err"): None,
text_type("n"): num_updated,
text_type("nModified"): num_updated if updated_existing else 0,
text_type("ok"): 1,
text_type("upserted"): upserted_id,
text_type("updatedExisting"): updated_existing,
}
def _get_subdocument(self, existing_document, spec, nested_field_list):
"""This method retrieves the subdocument of the existing_document.nested_field_list.
It uses the spec to filter through the items. It will continue to grab nested documents
until it can go no further. It will then return the subdocument that was last saved.
'$' is the positional operator, so we use the $elemMatch in the spec to find the right
subdocument in the array.
"""
# current document in view
doc = existing_document
# previous document in view
subdocument = existing_document
# current spec in view
subspec = spec
# walk down the dictionary
for subfield in nested_field_list:
if subfield == '$':
# positional element should have the equivalent elemMatch in the
# query
subspec = subspec['$elemMatch']
for item in doc:
# iterate through
if filter_applies(subspec, item):
# found the matching item save the parent
subdocument = doc
# save the item
doc = item
break
continue
subdocument = doc
doc = doc[subfield]
if subfield not in subspec:
break
subspec = subspec[subfield]
return subdocument
def _expand_dots(self, doc):
expanded = {}
paths = {}
for k, v in iteritems(doc):
key_parts = k.split('.')
sub_doc = v
for i in reversed(range(1, len(key_parts))):
key = key_parts[i]
sub_doc = {key: sub_doc}
key = key_parts[0]
if key in expanded:
raise WriteError("cannot infer query fields to set, "
"both paths '%s' and '%s' are matched"
% (k, paths[key]))
paths[key] = k
expanded[key] = sub_doc
return expanded
def _discard_operators(self, doc):
# TODO(this looks a little too naive...)
return {k: v for k, v in iteritems(doc) if not k.startswith("$")}
def find(self, filter=None, projection=None, skip=0, limit=0,
no_cursor_timeout=False, cursor_type=None, sort=None,
allow_partial_results=False, oplog_replay=False, modifiers=None,
batch_size=0, manipulate=True):
spec = filter
if spec is None:
spec = {}
validate_is_mapping('filter', spec)
return Cursor(self, spec, sort, projection, skip, limit)
def _get_dataset(self, spec, sort, fields, as_class, skip):
dataset = (self._copy_only_fields(document, fields, as_class)
for document in self._iter_documents(spec))
if sort:
for sortKey, sortDirection in reversed(sort):
dataset = iter(sorted(
dataset, key=lambda x: _resolve_sort_key(sortKey, x),
reverse=sortDirection < 0))
for i in xrange(skip):
try:
next(dataset)
except StopIteration:
pass
return dataset
def _copy_field(self, obj, container):
if isinstance(obj, list):
new = []
for item in obj:
new.append(self._copy_field(item, container))
return new
if isinstance(obj, dict):
new = container()
for key, value in obj.items():
new[key] = self._copy_field(value, container)
return new
else:
return copy.copy(obj)
def _extract_projection_operators(self, fields):
"""Removes and returns fields with projection operators."""
result = {}
allowed_projection_operators = set(['$elemMatch'])
for key, value in iteritems(fields):
if isinstance(value, dict):
for op in value:
if op not in allowed_projection_operators:
raise ValueError('Unsupported projection option: {}'.format(op))
result[key] = value
for key in result:
del fields[key]
return result
def _apply_projection_operators(self, ops, doc, doc_copy):
"""Applies projection operators to copied document."""
for field, op in iteritems(ops):
if field not in doc_copy:
if field in doc:
# field was not copied yet (since we are in include mode)
doc_copy[field] = doc[field]
else:
# field doesn't exist in original document, no work to do
continue
if '$elemMatch' in op:
if isinstance(doc_copy[field], list):
# find the first item that matches
matched = False
for item in doc_copy[field]:
if filter_applies(op['$elemMatch'], item):
matched = True
doc_copy[field] = [item]
break
# nothing have matched
if not matched:
del doc_copy[field]
else:
# remove the field since there is nothing to iterate
del doc_copy[field]
def _copy_only_fields(self, doc, fields, container):
"""Copy only the specified fields."""
if fields is None:
return self._copy_field(doc, container)
else:
if not fields:
fields = {"_id": 1}
if not isinstance(fields, dict):
fields = helpers._fields_list_to_dict(fields)
# we can pass in something like {"_id":0, "field":1}, so pull the id
# value out and hang on to it until later
id_value = fields.pop('_id', 1)
# filter out fields with projection operators, we will take care of them later
projection_operators = self._extract_projection_operators(fields)
# other than the _id field, all fields must be either includes or
# excludes, this can evaluate to 0
if len(set(list(fields.values()))) > 1:
raise ValueError(
'You cannot currently mix including and excluding fields.')
# if we have novalues passed in, make a doc_copy based on the
# id_value
if len(list(fields.values())) == 0:
if id_value == 1:
doc_copy = container()
else:
doc_copy = self._copy_field(doc, container)
# if 1 was passed in as the field values, include those fields
elif list(fields.values())[0] == 1:
doc_copy = container()
for key in fields:
key_parts = key.split('.')
subdocument = doc
subdocument_copy = doc_copy
full_key_path_found = True
for key_part in key_parts[:-1]:
if key_part not in subdocument:
full_key_path_found = False
break
subdocument = subdocument[key_part]
subdocument_copy = subdocument_copy.setdefault(key_part, {})
if not full_key_path_found or key_parts[-1] not in subdocument:
continue
subdocument_copy[key_parts[-1]] = subdocument[key_parts[-1]]
# otherwise, exclude the fields passed in
else:
doc_copy = self._copy_field(doc, container)
for key in fields:
key_parts = key.split('.')
subdocument_copy = doc_copy
full_key_path_found = True
for key_part in key_parts[:-1]:
if key_part not in subdocument_copy:
full_key_path_found = False
break
subdocument_copy = subdocument_copy[key_part]
if not full_key_path_found or key_parts[-1] not in subdocument_copy:
continue
del subdocument_copy[key_parts[-1]]
# set the _id value if we requested it, otherwise remove it
if id_value == 0:
doc_copy.pop('_id', None)
else:
if '_id' in doc:
doc_copy['_id'] = doc['_id']
fields['_id'] = id_value # put _id back in fields
# time to apply the projection operators and put back their fields
self._apply_projection_operators(projection_operators, doc, doc_copy)
for field, op in iteritems(projection_operators):
fields[field] = op
return doc_copy
def _update_document_fields(self, doc, fields, updater):
"""Implements the $set behavior on an existing document"""
for k, v in iteritems(fields):
self._update_document_single_field(doc, k, v, updater)
def _update_document_fields_positional(self, doc, fields, spec, updater,
subdocument=None):
"""Implements the $set behavior on an existing document"""
for k, v in iteritems(fields):
if '$' in k:
field_name_parts = k.split('.')
if not subdocument:
current_doc = doc
subspec = spec
for part in field_name_parts[:-1]:
if part == '$':
subspec = subspec.get('$elemMatch', subspec)
for item in current_doc:
if filter_applies(subspec, item):
current_doc = item
break
continue
new_spec = {}
for el in subspec:
if el.startswith(part):
if len(el.split(".")) > 1:
new_spec[".".join(
el.split(".")[1:])] = subspec[el]
else:
new_spec = subspec[el]
subspec = new_spec
current_doc = current_doc[part]
subdocument = current_doc
if (field_name_parts[-1] == '$' and
isinstance(subdocument, list)):
for i, doc in enumerate(subdocument):
if filter_applies(subspec, doc):
subdocument[i] = v
break
continue
updater(subdocument, field_name_parts[-1], v)
continue
# otherwise, we handle it the standard way
self._update_document_single_field(doc, k, v, updater)
return subdocument
def _update_document_fields_with_positional_awareness(self, existing_document, v, spec,
updater, subdocument):
positional = any('$' in key for key in iterkeys(v))
if positional:
return self._update_document_fields_positional(
existing_document, v, spec, updater, subdocument)
self._update_document_fields(existing_document, v, updater)
return subdocument
def _update_document_single_field(self, doc, field_name, field_value, updater):
field_name_parts = field_name.split(".")
for part in field_name_parts[:-1]:
if isinstance(doc, list):
try:
if part == '$':
doc = doc[0]
else:
doc = doc[int(part)]
continue
except ValueError:
pass
elif isinstance(doc, dict):
doc = doc.setdefault(part, {})
else:
return
field_name = field_name_parts[-1]
if isinstance(doc, list):
try:
doc[int(field_name)] = field_value
except IndexError:
pass
else:
updater(doc, field_name, field_value)
def _iter_documents(self, filter=None):
return (document for document in list(itervalues(self._documents))
if filter_applies(filter, document))
def find_one(self, filter=None, *args, **kwargs):
# Allow calling find_one with a non-dict argument that gets used as
# the id for the query.
if filter is None:
filter = {}
if not isinstance(filter, collections.Mapping):
filter = {'_id': filter}
try:
return next(self.find(filter, *args, **kwargs))
except StopIteration:
return None
def find_one_and_delete(self, filter, projection=None, sort=None, **kwargs):
kwargs['remove'] = True
validate_is_mapping('filter', filter)
return self._find_and_modify(filter, projection, sort=sort, **kwargs)
def find_one_and_replace(self, filter, replacement,
projection=None, sort=None, upsert=False,
return_document=ReturnDocument.BEFORE, **kwargs):
validate_is_mapping('filter', filter)
validate_ok_for_replace(replacement)
return self._find_and_modify(filter, projection, replacement, upsert,
sort, return_document, **kwargs)
def find_one_and_update(self, filter, update,
projection=None, sort=None, upsert=False,
return_document=ReturnDocument.BEFORE, **kwargs):
validate_is_mapping('filter', filter)
validate_ok_for_update(update)
return self._find_and_modify(filter, projection, update, upsert,
sort, return_document, **kwargs)
def find_and_modify(self, query={}, update=None, upsert=False, sort=None,
full_response=False, manipulate=False, **kwargs):
warnings.warn("find_and_modify is deprecated, use find_one_and_delete"
", find_one_and_replace, or find_one_and_update instead",
DeprecationWarning, stacklevel=2)
return self._find_and_modify(query, update=update, upsert=upsert,
sort=sort, **kwargs)
def _find_and_modify(self, query, projection=None, update=None,
upsert=False, sort=None,
return_document=ReturnDocument.BEFORE, **kwargs):
remove = kwargs.get("remove", False)
if kwargs.get("new", False) and remove:
# message from mongodb
raise OperationFailure("remove and returnNew can't co-exist")
if not (remove or update):
raise ValueError("Must either update or remove")
if remove and update:
raise ValueError("Can't do both update and remove")
old = self.find_one(query, projection=projection, sort=sort)
if not old and not upsert:
return
if old and '_id' in old:
query = {'_id': old['_id']}
if remove:
self.delete_one(query)
else:
self._update(query, update, upsert)
if return_document is ReturnDocument.AFTER or kwargs.get('new'):
return self.find_one(query, projection)
return old
def save(self, to_save, manipulate=True, check_keys=True, **kwargs):
warnings.warn("save is deprecated. Use insert_one or replace_one "
"instead", DeprecationWarning, stacklevel=2)
validate_is_mutable_mapping("to_save", to_save)
validate_write_concern_params(**kwargs)
if "_id" not in to_save:
return self.insert(to_save)
else:
self._update({"_id": to_save["_id"]}, to_save, True,
manipulate, check_keys=True, **kwargs)
return to_save.get("_id", None)
def delete_one(self, filter):
validate_is_mapping('filter', filter)
return DeleteResult(self._delete(filter), True)
def delete_many(self, filter):
validate_is_mapping('filter', filter)
return DeleteResult(self._delete(filter, multi=True), True)
def _delete(self, filter, multi=False):
if filter is None:
filter = {}
if not isinstance(filter, collections.Mapping):
filter = {'_id': filter}
to_delete = list(self.find(filter))
deleted_count = 0
for doc in to_delete:
doc_id = doc['_id']
if isinstance(doc_id, dict):
doc_id = helpers.hashdict(doc_id)
del self._documents[doc_id]
deleted_count += 1
if not multi:
break
return {
"connectionId": self.database.client._id,
"n": deleted_count,
"ok": 1.0,
"err": None,
}
def remove(self, spec_or_id=None, multi=True, **kwargs):
warnings.warn("remove is deprecated. Use delete_one or delete_many "
"instead.", DeprecationWarning, stacklevel=2)
validate_write_concern_params(**kwargs)
return self._delete(spec_or_id, multi=multi)
def count(self, filter=None, **kwargs):
if filter is None:
return len(self._documents)
else:
return self.find(filter).count()
def drop(self):
self.database.drop_collection(self.name)
def ensure_index(self, key_or_list, cache_for=300, **kwargs):
self.create_index(key_or_list, cache_for, **kwargs)
def create_index(self, key_or_list, cache_for=300, **kwargs):
if 'unique' in kwargs and kwargs['unique']:
self._uniques.append(helpers._index_list(key_or_list))
def drop_index(self, index_or_name):
pass
def index_information(self):
return {}
def map_reduce(self, map_func, reduce_func, out, full_response=False,
query=None, limit=0):
if execjs is None:
raise NotImplementedError(
"PyExecJS is required in order to run Map-Reduce. "
"Use 'pip install pyexecjs pymongo' to support Map-Reduce mock."
)
if limit == 0:
limit = None
start_time = time.clock()
out_collection = None
reduced_rows = None
full_dict = {
'counts': {
'input': 0,
'reduce': 0,
'emit': 0,
'output': 0},
'timeMillis': 0,
'ok': 1.0,
'result': None}
map_ctx = execjs.compile("""
function doMap(fnc, docList) {
var mappedDict = {};
function emit(key, val) {
if (key['$oid']) {
mapped_key = '$oid' + key['$oid'];
}
else {
mapped_key = key;
}
if(!mappedDict[mapped_key]) {
mappedDict[mapped_key] = [];
}
mappedDict[mapped_key].push(val);
}
mapper = eval('('+fnc+')');
var mappedList = new Array();
for(var i=0; i<docList.length; i++) {
var thisDoc = eval('('+docList[i]+')');
var mappedVal = (mapper).call(thisDoc);
}
return mappedDict;
}
""")
reduce_ctx = execjs.compile("""
function doReduce(fnc, docList) {
var reducedList = new Array();
reducer = eval('('+fnc+')');
for(var key in docList) {
var reducedVal = {'_id': key,
'value': reducer(key, docList[key])};
reducedList.push(reducedVal);
}
return reducedList;
}
""")
doc_list = [json.dumps(doc, default=json_util.default)
for doc in self.find(query)]
mapped_rows = map_ctx.call('doMap', map_func, doc_list)
reduced_rows = reduce_ctx.call('doReduce', reduce_func, mapped_rows)[:limit]
for reduced_row in reduced_rows:
if reduced_row['_id'].startswith('$oid'):
reduced_row['_id'] = ObjectId(reduced_row['_id'][4:])
reduced_rows = sorted(reduced_rows, key=lambda x: x['_id'])
if full_response:
full_dict['counts']['input'] = len(doc_list)
for key in mapped_rows.keys():
emit_count = len(mapped_rows[key])
full_dict['counts']['emit'] += emit_count
if emit_count > 1:
full_dict['counts']['reduce'] += 1
full_dict['counts']['output'] = len(reduced_rows)
if isinstance(out, (str, bytes)):
out_collection = getattr(self.database, out)
out_collection.drop()
out_collection.insert(reduced_rows)
ret_val = out_collection
full_dict['result'] = out
elif isinstance(out, SON) and out.get('replace') and out.get('db'):
# Must be of the format SON([('replace','results'),('db','outdb')])
out_db = getattr(self.database._client, out['db'])
out_collection = getattr(out_db, out['replace'])
out_collection.insert(reduced_rows)
ret_val = out_collection
full_dict['result'] = {'db': out['db'], 'collection': out['replace']}
elif isinstance(out, dict) and out.get('inline'):
ret_val = reduced_rows
full_dict['result'] = reduced_rows
else:
raise TypeError("'out' must be an instance of string, dict or bson.SON")
full_dict['timeMillis'] = int(round((time.clock() - start_time) * 1000))
if full_response:
ret_val = full_dict
return ret_val
def inline_map_reduce(self, map_func, reduce_func, full_response=False,
query=None, limit=0):
return self.map_reduce(
map_func, reduce_func, {'inline': 1}, full_response, query, limit)
def distinct(self, key, filter=None):
return self.find(filter).distinct(key)
def group(self, key, condition, initial, reduce, finalize=None):
if execjs is None:
raise NotImplementedError(
"PyExecJS is required in order to use group. "
"Use 'pip install pyexecjs pymongo' to support group mock."
)
reduce_ctx = execjs.compile("""
function doReduce(fnc, docList) {
reducer = eval('('+fnc+')');
for(var i=0, l=docList.length; i<l; i++) {
try {
reducedVal = reducer(docList[i-1], docList[i]);
}
catch (err) {
continue;
}
}
return docList[docList.length - 1];
}
""")
ret_array = []
doc_list_copy = []
ret_array_copy = []
reduced_val = {}
doc_list = [doc for doc in self.find(condition)]
for doc in doc_list:
doc_copy = copy.deepcopy(doc)
for k in doc:
if isinstance(doc[k], ObjectId):
doc_copy[k] = str(doc[k])
if k not in key and k not in reduce:
del doc_copy[k]
for initial_key in initial:
if initial_key in doc.keys():
pass
else:
doc_copy[initial_key] = initial[initial_key]
doc_list_copy.append(doc_copy)
doc_list = doc_list_copy
for k in key:
doc_list = sorted(doc_list, key=lambda x: _resolve_key(k, x))
for k in key:
if not isinstance(k, helpers.basestring):
raise TypeError(
"Keys must be a list of key names, "
"each an instance of %s" % helpers.basestring.__name__)
for k2, group in itertools.groupby(doc_list, lambda item: item[k]):
group_list = ([x for x in group])
reduced_val = reduce_ctx.call('doReduce', reduce, group_list)
ret_array.append(reduced_val)
for doc in ret_array:
doc_copy = copy.deepcopy(doc)
for k in doc:
if k not in key and k not in initial.keys():
del doc_copy[k]
ret_array_copy.append(doc_copy)
ret_array = ret_array_copy
return ret_array
def aggregate(self, pipeline, **kwargs):
pipeline_operators = [
'$project',
'$match',
'$redact',
'$limit',
'$skip',
'$unwind',
'$group',
'$sample'
'$sort',
'$geoNear',
'$lookup'
'$out',
'$indexStats']
group_operators = [
'$addToSet',
'$first',
'$last',
'$max',
'$min',
'$avg',
'$push',
'$sum',
'$stdDevPop',
'$stdDevSamp']
project_operators = [
'$max',
'$min',
'$avg',
'$sum',
'$stdDevPop',
'$stdDevSamp',
'$arrayElemAt'
]
boolean_operators = ['$and', '$or', '$not'] # noqa
set_operators = [ # noqa
'$setEquals',
'$setIntersection',
'$setDifference',
'$setUnion',
'$setIsSubset',
'$anyElementTrue',
'$allElementsTrue']
comparison_operators = [ # noqa
'$cmp',
'$eq',
'$gt',
'$gte',
'$lt',
'$lte',
'$ne']
arithmetic_operators = [ # noqa
'$abs',
'$add',
'$ceil',
'$divide',
'$exp',
'$floor',
'$ln',
'$log',
'$log10',
'$mod',
'$multiply',
'$pow',
'$sqrt',
'$subtract',
'$trunc']
string_operators = [ # noqa
'$concat',
'$strcasecmp',
'$substr',
'$toLower',
'$toUpper']
text_search_operators = ['$meta'] # noqa
array_operators = [ # noqa
'$arrayElemAt',
'$concatArrays',
'$filter',
'$isArray',
'$size',
'$slice']
projection_operators = ['$map', '$let', '$literal'] # noqa
date_operators = [ # noqa
'$dayOfYear',
'$dayOfMonth',
'$dayOfWeek',
'$year',
'$month',
'$week',
'$hour',
'$minute',
'$second',
'$millisecond',
'$dateToString']
def _handle_arithmetic_operator(operator, values, doc_dict):
if operator == '$abs':
return abs(_parse_expression(values, doc_dict))
elif operator == '$ceil':
return math.ceil(_parse_expression(values, doc_dict))
elif operator == '$divide':
assert len(values) == 2, 'divide must have only 2 items'
return _parse_expression(values[0], doc_dict) / _parse_expression(values[1],
doc_dict)
elif operator == '$exp':
return math.exp(_parse_expression(values, doc_dict))
elif operator == '$floor':
return math.floor(_parse_expression(values, doc_dict))
elif operator == '$ln':
return math.log(_parse_expression(values, doc_dict))
elif operator == '$log':
assert len(values) == 2, 'log must have only 2 items'
return math.log(_parse_expression(values[0], doc_dict),
_parse_expression(values[1], doc_dict))
elif operator == '$log10':
return math.log10(_parse_expression(values, doc_dict))
elif operator == '$mod':
assert len(values) == 2, 'mod must have only 2 items'
return math.fmod(_parse_expression(values[0], doc_dict),
_parse_expression(values[1], doc_dict))
elif operator == '$pow':
assert len(values) == 2, 'pow must have only 2 items'
return math.pow(_parse_expression(values[0], doc_dict),
_parse_expression(values[1], doc_dict))
elif operator == '$sqrt':
return math.sqrt(_parse_expression(values, doc_dict))
elif operator == '$subtract':
assert len(values) == 2, 'subtract must have only 2 items'
return _parse_expression(values[0], doc_dict) - _parse_expression(values[1],
doc_dict)
else:
raise NotImplementedError("Although '%s' is a valid aritmetic operator for the "
"aggregation pipeline, it is currently not implemented "
" in Mongomock." % operator)
def _handle_comparison_operator(operator, values, doc_dict):
assert len(values) == 2, 'Comparison requires two expressions'
if operator == '$eq':
return _parse_expression(values[0], doc_dict) == \
_parse_expression(values[1], doc_dict)
elif operator == '$gt':
return _parse_expression(values[0], doc_dict) > \
_parse_expression(values[1], doc_dict)
elif operator == '$gte':
return _parse_expression(values[0], doc_dict) >= \
_parse_expression(values[1], doc_dict)
elif operator == '$lt':
return _parse_expression(values[0], doc_dict) < \
_parse_expression(values[1], doc_dict)
elif operator == '$lte':
return _parse_expression(values[0], doc_dict) <= \
_parse_expression(values[1], doc_dict)
elif operator == '$ne':
return _parse_expression(values[0], doc_dict) != \
_parse_expression(values[1], doc_dict)
else:
raise NotImplementedError(
"Although '%s' is a valid comparison operator for the "
"aggregation pipeline, it is currently not implemented "
" in Mongomock." % operator)
def _handle_date_operator(operator, values, doc_dict):
out_value = _parse_expression(values, doc_dict)
if operator == '$dayOfYear':
return out_value.timetuple().tm_yday
elif operator == '$dayOfMonth':
return out_value.day
elif operator == '$dayOfWeek':
return out_value.isoweekday()
elif operator == '$year':
return out_value.year
elif operator == '$month':
return out_value.month
elif operator == '$week':
return out_value.isocalendar()[1]
elif operator == '$hour':
return out_value.hour
elif operator == '$minute':
return out_value.minute
elif operator == '$second':
return out_value.second
elif operator == '$millisecond':
return int(out_value.microsecond / 1000)
else:
raise NotImplementedError(
"Although '%s' is a valid date operator for the "
"aggregation pipeline, it is currently not implemented "
" in Mongomock." % operator)
def _handle_project_operator(operator, values, doc_dict):
if operator == '$min':
if len(values) > 2:
raise NotImplementedError("Although %d is a valid amount of elements in "
"aggregation pipeline, it is currently not "
" implemented in Mongomock" % len(values))
return min(_parse_expression(values[0], doc_dict),
_parse_expression(values[1], doc_dict))
elif operator == '$arrayElemAt':
key, index = values
array = _parse_basic_expression(key, doc_dict)
v = array[index]
return v
else:
raise NotImplementedError("Although '%s' is a valid project operator for the "
"aggregation pipeline, it is currently not implemented "
"in Mongomock." % operator)
def _parse_basic_expression(expression, doc_dict):
if isinstance(expression, str) and expression.startswith('$'):
get_value = helpers.embedded_item_getter(expression.replace('$', ''))
return get_value(doc_dict)
else:
return expression
def _parse_expression(expression, doc_dict):
if not isinstance(expression, dict):
return _parse_basic_expression(expression, doc_dict)
value_dict = {}
for k, v in iteritems(expression):
if k in arithmetic_operators:
return _handle_arithmetic_operator(k, v, doc_dict)
elif k in project_operators:
return _handle_project_operator(k, v, doc_dict)
elif k in comparison_operators:
return _handle_comparison_operator(k, v, doc_dict)
elif k in date_operators:
return _handle_date_operator(k, v, doc_dict)
else:
value_dict[k] = _parse_expression(v, doc_dict)
return value_dict
def _extend_collection(out_collection, field, expression):
field_exists = False
for doc in out_collection:
if field in doc:
field_exists = True
break
if not field_exists:
for doc in out_collection:
if isinstance(expression, str) and expression.startswith('$'):
try:
doc[field] = get_value_by_dot(doc, expression.lstrip('$'))
except KeyError:
pass
else:
# verify expression has operator as first
doc[field] = _parse_expression(expression.copy(), doc)
return out_collection
conditional_operators = ['$cond', '$ifNull'] # noqa
out_collection = [doc for doc in self.find()]
for stage in pipeline:
for k, v in iteritems(stage):
if k == '$match':
out_collection = [doc for doc in out_collection
if filter_applies(v, doc)]
elif k == '$group':
grouped_collection = []
_id = stage['$group']['_id']
if _id:
key_getter = functools.partial(_parse_expression, _id)
out_collection = sorted(out_collection, key=key_getter)
grouped = itertools.groupby(out_collection, key_getter)
else:
grouped = [(None, out_collection)]
for doc_id, group in grouped:
group_list = ([x for x in group])
doc_dict = {'_id': doc_id}
for field, value in iteritems(v):
if field == '_id':
continue
for operator, key in iteritems(value):
if operator in (
"$sum",
"$avg",
"$min",
"$max",
"$first",
"$last",
"$addToSet",
'$push'
):
key_getter = functools.partial(_parse_expression, key)
values = [key_getter(doc) for doc in group_list]
if operator == "$sum":
val_it = (val or 0 for val in values)
doc_dict[field] = sum(val_it)
elif operator == "$avg":
values = [val or 0 for val in values]
doc_dict[field] = sum(values) / max(len(values), 1)
elif operator == "$min":
val_it = (val or MAXSIZE for val in values)
doc_dict[field] = min(val_it)
elif operator == "$max":
val_it = (val or -MAXSIZE for val in values)
doc_dict[field] = max(val_it)
elif operator == "$first":
doc_dict[field] = values[0]
elif operator == "$last":
doc_dict[field] = values[-1]
elif operator == "$addToSet":
val_it = (val or None for val in values)
doc_dict[field] = set(val_it)
elif operator == '$push':
if field not in doc_dict:
doc_dict[field] = []
doc_dict[field].extend(values)
else:
if operator in group_operators:
raise NotImplementedError(
"Although %s is a valid group operator for the "
"aggregation pipeline, it is currently not implemented "
"in Mongomock." % operator)
else:
raise NotImplementedError(
"%s is not a valid group operator for the aggregation "
"pipeline. See http://docs.mongodb.org/manual/meta/"
"aggregation-quick-reference/ for a complete list of "
"valid operators." % operator)
grouped_collection.append(doc_dict)
out_collection = grouped_collection
elif k == '$sort':
sort_array = []
for x, y in v.items():
sort_array.append({x: y})
for sort_pair in reversed(sort_array):
for sortKey, sortDirection in sort_pair.items():
out_collection = sorted(
out_collection,
key=lambda x: _resolve_sort_key(sortKey, x),
reverse=sortDirection < 0)
elif k == '$skip':
out_collection = out_collection[v:]
elif k == '$limit':
out_collection = out_collection[:v]
elif k == '$unwind':
if not isinstance(v, helpers.basestring) or v[0] != '$':
raise ValueError(
"$unwind failed: exception: field path references must be prefixed "
"with a '$' '%s'" % v)
unwound_collection = []
for doc in out_collection:
array_value = get_value_by_dot(doc, v[1:])
if array_value in (None, []):
continue
elif not isinstance(array_value, list):
raise TypeError(
'$unwind must specify an array field, field: '
'"%s", value found: %s' % (v, array_value))
for field_item in array_value:
unwound_collection.append(copy.deepcopy(doc))
unwound_collection[-1] = set_value_by_dot(
unwound_collection[-1], v[1:], field_item)
out_collection = unwound_collection
elif k == '$project':
filter_list = ['_id']
for field, value in iteritems(v):
if field == '_id' and not value:
filter_list.remove('_id')
elif value:
filter_list.append(field)
out_collection = _extend_collection(out_collection, field, value)
out_collection = [{k: v for (k, v) in x.items() if k in filter_list}
for x in out_collection]
elif k == '$out':
# TODO(MetrodataTeam): should leave the origin collection unchanged
collection = self.database.get_collection(v)
if collection.count() > 0:
collection.drop()
collection.insert_many(out_collection)
else:
if k in pipeline_operators:
raise NotImplementedError(
"Although '%s' is a valid operator for the aggregation pipeline, it is "
"currently not implemented in Mongomock." % k)
else:
raise NotImplementedError(
"%s is not a valid operator for the aggregation pipeline. "
"See http://docs.mongodb.org/manual/meta/aggregation-quick-reference/ "
"for a complete list of valid operators." % k)
return CommandCursor(out_collection)
def with_options(
self, codec_options=None, read_preference=None, write_concern=None, read_concern=None):
return self
def rename(self, new_name, **kwargs):
self.database.rename_collection(self.name, new_name, **kwargs)
def bulk_write(self, operations):
bulk = BulkOperationBuilder(self)
for operation in operations:
operation._add_to_bulk(bulk)
return BulkWriteResult(bulk.execute(), True)
def _resolve_key(key, doc):
return next(iter(iter_key_candidates(key, doc)), NOTHING)
def _resolve_sort_key(key, doc):
value = _resolve_key(key, doc)
# see http://docs.mongodb.org/manual/reference/method/cursor.sort/#ascending-descending-sort
if value is NOTHING:
return 0, value
return 1, value
class Cursor(object):
def __init__(self, collection, spec=None, sort=None, projection=None, skip=0, limit=0):
super(Cursor, self).__init__()
self.collection = collection
self._spec = spec
self._sort = sort
self._projection = projection
self._skip = skip
self._factory = functools.partial(collection._get_dataset,
spec, sort, projection, dict, skip)
# pymongo limit defaults to 0, returning everything
self._limit = limit if limit != 0 else None
self.rewind()
def __iter__(self):
return self
def clone(self):
return Cursor(self.collection,
self._spec, self._sort, self._projection, self._skip, self._limit)
def __next__(self):
if self._skip and not self._skipped:
for i in range(self._skip):
next(self._dataset)
self._skipped = self._skip
if self._limit is not None and self._limit <= self._emitted:
raise StopIteration()
if self._limit is not None:
self._emitted += 1
return {k: copy.deepcopy(v) for k, v in iteritems(next(self._dataset))}
next = __next__
def rewind(self):
self._dataset = self._factory()
self._emitted = 0
self._skipped = 0
def sort(self, key_or_list, direction=None):
if direction is None:
direction = 1
if isinstance(key_or_list, (tuple, list)):
for sortKey, sortDirection in reversed(key_or_list):
self._dataset = iter(
sorted(
self._dataset,
key=lambda x: _resolve_sort_key(
sortKey,
x),
reverse=sortDirection < 0))
else:
self._dataset = iter(
sorted(self._dataset,
key=lambda x: _resolve_sort_key(key_or_list, x),
reverse=direction < 0))
return self
def count(self, with_limit_and_skip=False):
arr = [x for x in self._dataset]
count = len(arr)
if with_limit_and_skip:
if self._skip:
count -= self._skip
if self._limit and count > self._limit:
count = self._limit
self._dataset = iter(arr)
return count
def skip(self, count):
self._skip = count
return self
def limit(self, count):
self._limit = count if count != 0 else None
return self
def batch_size(self, count):
return self
def close(self):
pass
def distinct(self, key):
if not isinstance(key, helpers.basestring):
raise TypeError('cursor.distinct key must be a string')
unique = set()
unique_dict_vals = []
for x in iter(self._dataset):
value = _resolve_key(key, x)
if value == NOTHING:
continue
if isinstance(value, dict):
if any(dict_val == value for dict_val in unique_dict_vals):
continue
unique_dict_vals.append(value)
else:
unique.update(
value if isinstance(
value, (tuple, list)) else [value])
return list(unique) + unique_dict_vals
def __getitem__(self, index):
if isinstance(index, slice):
# Limit the cursor to the given slice
self._dataset = (x for x in list(self._dataset)[index])
return self
elif not isinstance(index, int):
raise TypeError("index '%s' cannot be applied to Cursor instances" % index)
elif index < 0:
raise IndexError('Cursor instances do not support negativeindices')
else:
arr = [x for x in self._dataset]
self._dataset = iter(arr)
return arr[index]
def _set_updater(doc, field_name, value):
if isinstance(value, (tuple, list)):
value = copy.deepcopy(value)
if isinstance(doc, dict):
doc[field_name] = value
def _unset_updater(doc, field_name, value):
if isinstance(doc, dict):
doc.pop(field_name, None)
def _inc_updater(doc, field_name, value):
if isinstance(doc, dict):
doc[field_name] = doc.get(field_name, 0) + value
def _max_updater(doc, field_name, value):
if isinstance(doc, dict):
doc[field_name] = max(doc.get(field_name, value), value)
def _min_updater(doc, field_name, value):
if isinstance(doc, dict):
doc[field_name] = min(doc.get(field_name, value), value)
def _sum_updater(doc, field_name, current, result):
if isinstance(doc, dict):
result = current + doc.get[field_name, 0]
return result
def _current_date_updater(doc, field_name, value):
if isinstance(doc, dict):
doc[field_name] = datetime.utcnow()
_updaters = {
'$set': _set_updater,
'$unset': _unset_updater,
'$inc': _inc_updater,
'$max': _max_updater,
'$min': _min_updater,
}
|
py | 1a3242db8c11f2bd1e5a09beacb2388e8aad1a4e | class Solution:
def specialArray(self, nums: List[int]) -> int:
nums.sort()
res = prev = -1
for ind, i in enumerate(nums):
if i>=len(nums)-ind and len(nums)-ind>prev:
print(nums, ind, i, prev)
return len(nums)-ind
prev=i
return res |
py | 1a32437ec965919e7bfa964350ba497f96c14d2f | import random
director_name = ['ahpwnzl mabfhlu','augsiq qbtxmw','gyrpvldoewfuin tmsgrjahozqwun',
'grecfblmuqz jkebcmdwqao','ljuemdby gfdewsck','dsrhgtxzivcuoya rcxmdlofqkgvphs',]
client_enterprise_name = ['hjwpfxevdlmcqz jexytvcfnrglaq','wrvjudn paydvfc','xuanq hxzve',
'drhelmfupxnz ljscbumproni','ugodj zutjm','yjgwlnzpbk bhevflixys',
'fiovwl hnzray','lsyexrpwzicdfq ckndofjeuzgpbi']
salesman_name = ['gndxzrfyulie qvfykexcuopw','lykjcqwteznomhp vlfsegqxodabznt','napdbvwkfytxzho gtsxmdyehwjblvc',
'bhrfsxwvpnoz deuqazrgnkot','pybwg chxeg','podxevkisw wrosfeuzvc',
'wvpgnuym mqktwoib','swmdinbcohtarlk iylvfhorbucpjem','agtwu ykazf']
salesman_number = ['11049911','19647906','17444041',
'15938426','19707413','14263792',
'18402745','15744174','19246098']
salesman_phone = ['15794620485','13261138409','16613282797',
'14268410346','12808971977','15530011206',
'17120631428','12505751408','16568866838']
product_code = ['N9C7h0Jx','i5gCFe4k','zsVaSZrH',
'uPUofy6q','yeKWEuzc','kpfj5GlW',
'1f7TCp9K','FJnygAXG','qkjmafCR']
product_name = ['nuoaxkqgcvt cfhrtbemaix','mzftpibjwo nzsgeyloru','cftxq ivajf',
'mzqhnsfbpoevjdk gefqcwlxvpionut','qyhlungdz rfuleqwnd','bfoau xajml',
'mbyjo lownu','rlgoemuwfkdqjs evfxmalobyuhij','qndtxlmfjcgaupe ytiwvjzqamxopeg']
product_model = ['yhzqi yzhdt','sxvwgua iyjxnwf','vwlybetukzcqn cvyirmsnebqod',
'ijkvtenzscupxq lktjzofacgxwen','ryeohdupljb usfbaohzyvx','mdlycxjvkzrpqb cqeflukpznxtgy',
'uonhcjasbypvxwe uweajmrlihgotbx','czhsruvj hqxrgudl','xzkbjvrqgdlmco ifqtgmzhuewkjc']
def generate_select_test_supply_center() -> list:
select_test = []
for i1 in range(10):
select_test.append('''
SELECT * FROM supply_center WHERE id = %d;
''' % random.randint(1, 93632))
for i in director_name:
select_test.append('''
SELECT * FROM supply_center WHERE director_name = '%s';
''' % i)
return select_test
def generate_select_test_client_enterprise() -> list:
select_test = []
for i1 in range(10):
select_test.append('''
SELECT * FROM client_enterprise WHERE id = %d;
''' % random.randint(1, 275302))
for i in client_enterprise_name:
select_test.append('''
SELECT * FROM client_enterprise WHERE name = '%s';
''' % i)
return select_test
def generate_select_test_salesman() -> list:
select_test = []
for i1 in range(10):
select_test.append('''
SELECT * FROM client WHERE id = %d;
''' % random.randint(1, 972749))
for i in salesman_name:
select_test.append('''
SELECT * FROM client WHERE name = '%s';
''' % i)
for i in salesman_number:
select_test.append('''
SELECT * FROM client WHERE number = '%s';
''' % i)
for i in salesman_phone:
select_test.append('''
SELECT * FROM client WHERE mobile_number = '%s';
''' % i)
return select_test
def generate_select_test_product() -> list:
select_test = []
for i1 in range(10):
select_test.append('''
SELECT * FROM product WHERE id = %d;
''' % random.randint(1, 962787))
for i in product_code:
select_test.append('''
SELECT * FROM product WHERE product_code = '%s';
''' % i)
for i in product_name:
select_test.append('''
SELECT * FROM product WHERE product_name = '%s';
''' % i)
return select_test
def generate_select_test_product_model() -> list:
select_test = []
for i1 in range(10):
select_test.append('''
SELECT * FROM product_model WHERE id = %d;
''' % random.randint(1, 3597940))
for i1 in range(10):
select_test.append('''
SELECT * FROM product_model WHERE product_id = %d;
''' % random.randint(1, 962787))
for i in product_model:
select_test.append('''
SELECT * FROM product_model WHERE product_model = '%s';
''' % i)
return select_test
def generate_select_test_contract() -> list:
select_test = []
for i1 in range(10):
select_test.append('''
SELECT * FROM contract WHERE id = %d;
''' % random.randint(1, 400000))
return select_test
def generate_select_test_contract_content() -> list:
select_test = []
for i1 in range(10):
select_test.append('''
SELECT * FROM contract_content WHERE id = %d;
''' % random.randint(1, 3597940))
return select_test
|
py | 1a3243835d59093c6439fda46a4bd52a936e0071 | from django.db.models.signals import m2m_changed
from django.dispatch import receiver
from apps.inventories.models import PlaceMember
@receiver(m2m_changed, sender=PlaceMember)
def create_place_member(action, instance, pk_set, **_kwargs):
if action == 'post_add':
for member_id in pk_set:
if not PlaceMember.objects.filter(member_id=member_id, is_the_default_one=True).exists():
PlaceMember.objects.filter(member_id=member_id, place=instance).update(is_the_default_one=True)
if action == 'post_remove':
for member_id in pk_set:
if not PlaceMember.objects.filter(member_id=member_id, is_the_default_one=True).exists():
placemember = PlaceMember.objects.filter(member_id=member_id).first()
if placemember:
placemember.is_the_default_one = True
placemember.save()
|
py | 1a324620575b0450e4be238f9ec324313a6de0b5 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A class that uses different static function depending of a parameter passed in
init. Note the use of a single dictionary instead of multiple conditions
"""
__author__ = "Ibrahim Diop <[email protected]>"
class Catalog(object):
"""catalog of multiple static methods that are executed depending on an init
parameter
"""
def __init__(self, param):
# dictionary that will be used to determine which static method is
# to be executed but that will be also used to store possible param
# value
self._static_method_choices = {'param_value_1': self._static_method_1, 'param_value_2': self._static_method_2}
# simple test to validate param value
if param in self._static_method_choices.keys():
self.param = param
else:
raise ValueError("Invalid Value for Param: {0}".format(param))
@staticmethod
def _static_method_1():
print("executed method 1!")
@staticmethod
def _static_method_2():
print("executed method 2!")
def main_method(self):
"""will execute either _static_method_1 or _static_method_2
depending on self.param value
"""
self._static_method_choices[self.param]()
# Alternative implementation for different levels of methods
class CatalogInstance(object):
"""catalog of multiple methods that are executed depending on an init
parameter
"""
def __init__(self, param):
self.x1 = 'x1'
self.x2 = 'x2'
# simple test to validate param value
if param in self._instance_method_choices:
self.param = param
else:
raise ValueError("Invalid Value for Param: {0}".format(param))
def _instance_method_1(self):
print("Value {}".format(self.x1))
def _instance_method_2(self):
print("Value {}".format(self.x2))
_instance_method_choices = {'param_value_1': _instance_method_1, 'param_value_2': _instance_method_2}
def main_method(self):
"""will execute either _instance_method_1 or _instance_method_2
depending on self.param value
"""
self._instance_method_choices[self.param].__get__(self)()
class CatalogClass(object):
"""catalog of multiple class methods that are executed depending on an init
parameter
"""
x1 = 'x1'
x2 = 'x2'
def __init__(self, param):
# simple test to validate param value
if param in self._class_method_choices:
self.param = param
else:
raise ValueError("Invalid Value for Param: {0}".format(param))
@classmethod
def _class_method_1(cls):
print("Value {}".format(cls.x1))
@classmethod
def _class_method_2(cls):
print("Value {}".format(cls.x2))
_class_method_choices = {'param_value_1': _class_method_1, 'param_value_2': _class_method_2}
def main_method(self):
"""will execute either _class_method_1 or _class_method_2
depending on self.param value
"""
self._class_method_choices[self.param].__get__(None, self.__class__)()
class CatalogStatic(object):
"""catalog of multiple static methods that are executed depending on an init
parameter
"""
def __init__(self, param):
# simple test to validate param value
if param in self._static_method_choices:
self.param = param
else:
raise ValueError("Invalid Value for Param: {0}".format(param))
@staticmethod
def _static_method_1():
print("executed method 1!")
@staticmethod
def _static_method_2():
print("executed method 2!")
_static_method_choices = {'param_value_1': _static_method_1, 'param_value_2': _static_method_2}
def main_method(self):
"""will execute either _static_method_1 or _static_method_2
depending on self.param value
"""
self._static_method_choices[self.param].__get__(None, self.__class__)()
def main():
"""
>>> test = Catalog('param_value_2')
>>> test.main_method()
executed method 2!
>>> test = CatalogInstance('param_value_1')
>>> test.main_method()
Value x1
>>> test = CatalogClass('param_value_2')
>>> test.main_method()
Value x2
>>> test = CatalogStatic('param_value_1')
>>> test.main_method()
executed method 1!
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
|
py | 1a3246740e21c2272426a60c55e6bbbcd7779574 | from PySide2.QtWidgets import *
import sys
if __name__ == "__main__":
app = QApplication(sys.argv)
bt1 = QPushButton("bt1")
bt2 = QPushButton("bt2")
bt3 = QPushButton("bt3")
layout2 = QHBoxLayout()
layout2.addWidget(QPushButton("ok"))
layout2.addWidget(QPushButton("cancel"))
layout = QVBoxLayout()
layout.addWidget(bt1)
layout.addWidget(bt2)
layout.addStretch()
layout.addLayout(layout2)
widget = QWidget()
widget.setLayout(layout)
widget.show()
app.exec_()
|
py | 1a32473b5e6ab8fee57f07e9ea1bfedad91507ea | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import re
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
def read(*parts):
# intentionally *not* adding an encoding option to open
# see here: https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690
return open(path.join(here, *parts), 'r').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='schedule',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=find_version('schedule', '__init__.py'),
#version='0.0.1',
description='batch scheduler',
long_description=long_description,
# The project's main homepage.
url='https://github.com/assethurajan/aws-batch-example',
# Author details
author='Sethu rajan',
author_email='[email protected]',
# Choose your license
license='Apache2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],
# What does your project relate to?
keywords='Amazon Batch Jobs',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'boto3>=1.3.1'
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
#extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'schedule = schedule:main'
],
},
)
|
py | 1a32477e6c07ce0ef2466b3153bd7911f4f81be1 | ''' '''
'''
ISC License
Copyright (c) 2016, Autonomous Vehicle Systems Lab, University of Colorado at Boulder
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
## \defgroup Tutorials_6_1
## @{
# Demonstrates how to stabilize the tumble of a 6-DOF spacecraft with reaction wheels in the BSK_Sim architecture.
#
# BSK Simulation: Feedback RW {#scenario_FeedbackRW}
# ====
#
# Scenario Description
# -----
# This script sets up a 6-DOF spacecraft orbiting Earth. The goal of the scenario is to
# 1) add reaction wheels to BSK_Dynamics.py, and 2) establish a inertial pointing FSW mode in BSK_FSW.py.
#
# To run the default scenario, call the python script from a Terminal window through
#
# python scenario_FeedbackRW.py
#
# The simulation mimics the basic simulation simulation in the earlier tutorial in
# [scenarioAttitudeFeedbackRW.py](@ref scenarioAttitudeFeedbackRW).
#
# The simulation layout is shown in the following illustration.
# 
# Two simulation processes are created: one
# which contains dynamics modules, and one that contains the FSW
# modules. The initial setup for the simulation closely models that of scenario_BasicOrbit.py.
#
# The scenario must inherit from the BSK_master class using:
# ~~~~~~~~~~~~~{.py}
# class scenario_FeedbackRW(BSKScenario):
# def __init__(self, masterSim):
# super(scenario_FeedbackRW, self).__init__(masterSim)
# self.name = 'scenario_FeedbackRW'
# ~~~~~~~~~~~~~
#
# Within configure_initial_conditions(), the user needs to first define the spacecraft FSW mode for the simulation
# through:
# ~~~~~~~~~~~~~{.py}
# self.masterSim.modeRequest = "inertial3D"
# ~~~~~~~~~~~~~
# which triggers the `initiateInertial3D` event within the BSK_FSW.py script.
#
# The initial conditions for the scenario are the same as found within scenario_BasicOrbit.py except the tumble of the
# spacecraft must be simulated by adding:
# ~~~~~~~~~~~~~{.py}
# self.masterSim.get_DynModel().scObject.hub.sigma_BNInit = [[0.1], [0.2], [-0.3]] # sigma_BN_B
# self.masterSim.get_DynModel().scObject.hub.omega_BN_BInit = [[0.001], [-0.01], [0.03]] # rad/s - omega_BN_B
# ~~~~~~~~~~~~~
# Within BSK_Scenario.py log_outputs(), the user must log additional messages to observe how the spacecraft corrected
# for its initial tumbling through:
# ~~~~~~~~~~~~~{.py}
# # FSW process outputs
# samplingTime = self.masterSim.get_FswModel().processTasksTimeStep
# self.masterSim.TotalSim.logThisMessage(self.masterSim.get_FswModel().mrpFeedbackRWsData.inputRWSpeedsName, samplingTime)
# self.masterSim.TotalSim.logThisMessage(self.masterSim.get_FswModel().rwMotorTorqueData.outputDataName, samplingTime)
# self.masterSim.TotalSim.logThisMessage(self.masterSim.get_FswModel().trackingErrorData.outputDataName, samplingTime)
# ~~~~~~~~~~~~~
# The data is then pulled using:
# ~~~~~~~~~~~~~{.py}
# dataUsReq = self.masterSim.pullMessageLogData(
# self.masterSim.get_FswModel().rwMotorTorqueData.outputDataName + ".motorTorque", range(num_RW))
# sigma_BR = self.masterSim.pullMessageLogData(
# self.masterSim.get_FswModel().trackingErrorData.outputDataName + ".sigma_BR", range(3))
# omega_BR_B = self.masterSim.pullMessageLogData(
# self.masterSim.get_FswModel().trackingErrorData.outputDataName + ".omega_BR_B", range(3))
# RW_speeds = self.masterSim.pullMessageLogData(
# self.masterSim.get_FswModel().mrpFeedbackRWsData.inputRWSpeedsName + ".wheelSpeeds", range(num_RW))
# self.masterSim.get_FswModel().mrpSteeringData.outputDataName + ".omega_BastR_B", range(3))
#
# ~~~~~~~~~~~~~
# and then plot using:
# ~~~~~~~~~~~~~{.py}
# # Plot results
# timeData = dataUsReq[:, 0] * macros.NANO2MIN
# BSK_plt.plot_attitude_error(timeData, sigma_BR)
# BSK_plt.plot_rw_cmd_torque(timeData, dataUsReq, num_RW)
# BSK_plt.plot_rate_error(timeData, omega_BR_B)
# BSK_plt.plot_rw_speeds(timeData, RW_speeds, num_RW)
# ~~~~~~~~~~~~~
#
#
#
#
#
#
#
#
#
#
# Custom Dynamics Configurations Instructions
# -----
# In addition to the modules used in scenario_BasicOrbit.py, the user must configure the RW module in BSK_Dynamics.py
# to stabilize the tumbling. This is accomplished by first creating the RW state effector:
# ~~~~~~~~~~~~~{.py}
# # Instantiate Dyn modules as objects
# self.rwStateEffector = reactionWheelStateEffector.ReactionWheelStateEffector()
# ~~~~~~~~~~~~~
# The RW object is then configured through `InitAllDynObjects(SimBase)` which includes the `SetReactionWheelDynEffector()`
# function which configures the RW pyramid's properties and messages.
# ~~~~~~~~~~~~~{.py}
# # Global call to initialize every module
# def InitAllDynObjects(self):
# self.SetReactionWheelDynEffector()
# ~~~~~~~~~~~~~
#
# The setter function itself includes:
#
# ~~~~~~~~~~~~~{.py}
# def SetReactionWheelDynEffector(self):
# # Make a fresh RW factory instance, this is critical to run multiple times
# rwFactory = simIncludeRW.rwFactory()
#
# # specify RW momentum capacity
# maxRWMomentum = 50. # Nms
#
# # Define orthogonal RW pyramid
# # -- Pointing directions
# rwElAngle = np.array([40.0, 40.0, 40.0, 40.0])*mc.D2R
# rwAzimuthAngle = np.array([45.0, 135.0, 225.0, 315.0])*mc.D2R
# rwPosVector = [[0.8, 0.8, 1.79070],
# [0.8, -0.8, 1.79070],
# [-0.8, -0.8, 1.79070],
# [-0.8, 0.8, 1.79070]
# ]
#
# for elAngle, azAngle, posVector in zip(rwElAngle, rwAzimuthAngle, rwPosVector):
# gsHat = (rbk.Mi(-azAngle,3).dot(rbk.Mi(elAngle,2))).dot(np.array([1,0,0]))
# rwFactory.create('Honeywell_HR16',
# gsHat,
# maxMomentum=maxRWMomentum,
# rWB_B=posVector)
#
# rwFactory.addToSpacecraft("RWStateEffector", self.rwStateEffector, self.scObject)
# ~~~~~~~~~~~~~
# which generates a RW pyramid using the RW factory and then adds it to the spacecraft. Now all future BSK_Scenarios
# have access to a pre-configured RW pyramid that does not need to be defined for each new simulation.
#
# Following the configuration of all
# dynamics objects' messages and properties, the objects must be attached to the DynamicsTask. In addition to the tasks
# assigned in [scenario_BasicOrbit.py](@ref scenario_BasicOribt), the user must also add:
# ~~~~~~~~~~~~~{.py}
# # Assign initialized modules to tasks
# SimBase.AddModelToTask(self.taskName, self.rwStateEffector, None, 301)
# ~~~~~~~~~~~~~
#
#
#
#
#
#
#
#
# Custom FSW Configurations Instructions
# -----
# To configure the desired "inertial3D" FSW mode the user must declare the following modules
# within the `__init__()` function in BSK_FSW.py:
# ~~~~~~~~~~~~~{.py}
# self.inertial3DData = inertial3D.inertial3DConfig()
# self.inertial3DWrap = SimBase.setModelDataWrap(self.inertial3DData)
# self.inertial3DWrap.ModelTag = "inertial3D"
#
# self.trackingErrorData = attTrackingError.attTrackingErrorConfig()
# self.trackingErrorWrap = SimBase.setModelDataWrap(self.trackingErrorData)
# self.trackingErrorWrap.ModelTag = "trackingError"
#
# self.mrpFeedbackRWsData = MRP_Feedback.MRP_FeedbackConfig()
# self.mrpFeedbackRWsWrap = SimBase.setModelDataWrap(self.mrpFeedbackRWsData)
# self.mrpFeedbackRWsWrap.ModelTag = "mrpFeedbackRWs"
#
# self.rwMotorTorqueData = rwMotorTorque.rwMotorTorqueConfig()
# self.rwMotorTorqueWrap = SimBase.setModelDataWrap(self.rwMotorTorqueData)
# self.rwMotorTorqueWrap.ModelTag = "rwMotorTorque"
# ~~~~~~~~~~~~~
# These provide the initial setup for an attitude guidance system that makes use of an inertial pointing model, a module
# that tracks the error of the spacecraft's MRP parameters against the pointing model, and a module that takes that
# information to provide a torque to correct for the error.
#
# Following the initial declaration of these configuration modules, BSK_FSW.py calls a `InitAllFSWObjects()` command,
# which, like BSK_Dynamics's InitAllDynObjects(), calls additional setter functions that configure each of the FSW modules
# with the appropriate information and message names.
#
# Within `InitAllFSWObjects()` these modules are configured by calling the setter functions:
# ~~~~~~~~~~~~~{.py}
# # Global call to initialize every module
# def InitAllFSWObjects(self, SimBase):
# self.SetInertial3DPointGuidance()
# self.SetAttitudeTrackingError(SimBase)
# self.SetMRPFeedbackControl(SimBase)
# self.SetVehicleConfiguration(SimBase)
# self.SetRWConfigMsg(SimBase)
# self.SetMRPFeedbackRWA()
# self.SetRWMotorTorque(SimBase)
# ~~~~~~~~~~~~~
# Which configure the FSW modules as seen below:
# ~~~~~~~~~~~~~{.py}
#
# def SetInertial3DPointGuidance(self):
# self.inertial3DData.sigma_R0N = [0.2, 0.4, 0.6]
# self.inertial3DData.outputDataName = "referenceOut"
#
# def SetAttitudeTrackingError(self, SimBase):
# self.trackingErrorData.inputNavName = SimBase.get_DynModel().simpleNavObject.outputAttName
# # Note: SimBase.get_DynModel().simpleNavObject.outputAttName = "simple_att_nav_output"
# self.trackingErrorData.inputRefName = "referenceOut"
# self.trackingErrorData.outputDataName = "guidanceOut"
#
# def SetMRPFeedbackControl(self, SimBase):
# self.mrpFeedbackControlData.inputGuidName = "guidanceOut"
# self.mrpFeedbackControlData.vehConfigInMsgName = "adcs_config_data"
# self.mrpFeedbackControlData.outputDataName = SimBase.get_DynModel().extForceTorqueObject.cmdTorqueInMsgName
# # Note: SimBase.get_DynModel().extForceTorqueObject.cmdTorqueInMsgName = "extTorquePntB_B_cmds"
#
# self.mrpFeedbackControlData.K = 3.5
# self.mrpFeedbackControlData.Ki = -1.0 # Note: make value negative to turn off integral feedback
# self.mrpFeedbackControlData.P = 30.0
# self.mrpFeedbackControlData.integralLimit = 2. / self.mrpFeedbackControlData.Ki * 0.1
# self.mrpFeedbackControlData.domega0 = [0.0, 0.0, 0.0]
#
# def SetMRPFeedbackRWA(self):
# self.mrpFeedbackRWsData.K = 3.5
# self.mrpFeedbackRWsData.Ki = -1 # Note: make value negative to turn off integral feedback
# self.mrpFeedbackRWsData.P = 30.0
# self.mrpFeedbackRWsData.integralLimit = 2. / self.mrpFeedbackRWsData.Ki * 0.1
# self.mrpFeedbackRWsData.domega0 = [0.0, 0.0, 0.0]
#
# self.mrpFeedbackRWsData.vehConfigInMsgName = "adcs_config_data"
# self.mrpFeedbackRWsData.inputRWSpeedsName = "reactionwheel_output_states"
# self.mrpFeedbackRWsData.rwParamsInMsgName = "rwa_config_data"
# self.mrpFeedbackRWsData.inputGuidName = "guidanceOut"
# self.mrpFeedbackRWsData.outputDataName = "controlTorqueRaw"
#
# def SetRWMotorTorque(self, SimBase):
# controlAxes_B = [
# 1.0, 0.0, 0.0
# , 0.0, 1.0, 0.0
# , 0.0, 0.0, 1.0
# ]
# self.rwMotorTorqueData.controlAxes_B = controlAxes_B
# self.rwMotorTorqueData.inputVehControlName = "controlTorqueRaw"
# self.rwMotorTorqueData.outputDataName = SimBase.get_DynModel().rwStateEffector.InputCmds # "reactionwheel_cmds"
# self.rwMotorTorqueData.rwParamsInMsgName = "rwa_config_data"
# ~~~~~~~~~~~~~
# Note how the messages occassionaly pull output data from the `SimBase.get_DynModel()` to link messages from BSK_Dynamics.py.
#
# In addition to the modules used for attitude guidance, there are also two setter functions that send vehicle and RW
# configuration messages that are linked into the attitude guidance modules:
# ~~~~~~~~~~~~~{.py}
# def SetVehicleConfiguration(self, SimBase):
# vehicleConfigOut = fswMessages.VehicleConfigFswMsg()
# # use the same inertia in the FSW algorithm as in the simulation
# vehicleConfigOut.ISCPntB_B = [900.0, 0.0, 0.0, 0.0, 800.0, 0.0, 0.0, 0.0, 600.0]
# unitTestSupport.setMessage(SimBase.TotalSim,
# SimBase.FSWProcessName,
# "adcs_config_data",
# vehicleConfigOut)
#
# def SetRWConfigMsg(self, SimBase):
# # Configure RW pyramid exactly as it is in the Dynamics (i.e. FSW with perfect knowledge)
# rwElAngle = np.array([40.0, 40.0, 40.0, 40.0]) * mc.D2R
# rwAzimuthAngle = np.array([45.0, 135.0, 225.0, 315.0]) * mc.D2R
# wheelJs = 50.0 / (6000.0 * math.pi * 2.0 / 60)
#
# fswSetupRW.clearSetup()
# for elAngle, azAngle in zip(rwElAngle, rwAzimuthAngle):
# gsHat = (rbk.Mi(-azAngle, 3).dot(rbk.Mi(elAngle, 2))).dot(np.array([1, 0, 0]))
# fswSetupRW.create(gsHat, # spin axis
# wheelJs, # kg*m^2
# 0.2) # Nm uMax
#
# fswSetupRW.writeConfigMessage("rwa_config_data", SimBase.TotalSim, SimBase.FSWProcessName)
# ~~~~~~~~~~~~~
# After each configuration module has been properly initialized with various message names, FSW tasks are generated.
# The two tasks required for the "inertial3D" mode are `inertial3DPointTask` and `mrpFeedbackRWsTask` and they are
# generated through:
# ~~~~~~~~~~~~~{.py}
# SimBase.fswProc.addTask(SimBase.CreateNewTask("inertial3DPointTask", self.processTasksTimeStep), 20)
# SimBase.fswProc.addTask(SimBase.CreateNewTask("mrpFeedbackRWsTask", self.processTasksTimeStep), 10)
# ~~~~~~~~~~~~~
# Note how the tasks are divided between the pointing model and control loop. These modular tasks allow
# for simple FSW reconfigurations should the user want to use a different pointing model, but to use the same feedback
# control loop. This will be seen and discussed in later scenarios.
#
# Each task then has various FSW models added to it:
# ~~~~~~~~~~~~~{.py}
# SimBase.AddModelToTask("inertial3DPointTask", self.inertial3DWrap, self.inertial3DData, 10)
# SimBase.AddModelToTask("inertial3DPointTask", self.trackingErrorWrap, self.trackingErrorData, 9)
#
# SimBase.AddModelToTask("mrpFeedbackRWsTask", self.mrpFeedbackRWsWrap, self.mrpFeedbackRWsData, 9)
# SimBase.AddModelToTask("mrpFeedbackRWsTask", self.rwMotorTorqueWrap, self.rwMotorTorqueData, 8)
# ~~~~~~~~~~~~~
# Finally, the `inertial3D` mode call in scenario_FeedbackRW.py needs to be triggered by:
# ~~~~~~~~~~~~~{.py}
# SimBase.createNewEvent("initiateInertial3D", self.processTasksTimeStep, True,
# ["self.modeRequest == 'inertial3D'"],
# ["self.fswProc.disableAllTasks()",
# "self.enableTask('inertial3DPointTask')",
# "self.enableTask('mrpFeedbackRWsTask')"])
# ~~~~~~~~~~~~~
# which disables any existing tasks and enables the inertial pointing task and RW feedback task.
# This concludes how to construct a preconfigured FSW mode that will be available for any future scenario
# that uses the BSK_Sim architecture.
#
# Numerical Simulation Results
# ------------
# If this simulation is run, then the following plots should be shown.
# 
# 
# 
# 
#
## @}
# Import utilities
from Basilisk.utilities import orbitalMotion, macros, unitTestSupport
# Get current file path
import sys, os, inspect
filename = inspect.getframeinfo(inspect.currentframe()).filename
path = os.path.dirname(os.path.abspath(filename))
# Import master classes: simulation base class and scenario base class
sys.path.append(path + '/..')
from BSK_masters import BSKSim, BSKScenario
import BSK_Dynamics, BSK_Fsw
# Import plotting file for your scenario
sys.path.append(path + '/../plotting')
import BSK_Plotting as BSK_plt
# Create your own scenario child class
class scenario_AttitudeFeedbackRW(BSKScenario):
def __init__(self, masterSim):
super(scenario_AttitudeFeedbackRW, self).__init__(masterSim)
self.name = 'scenario_AttitudeFeedbackRW'
self.masterSim = masterSim
def configure_initial_conditions(self):
print '%s: configure_initial_conditions' % self.name
# Configure FSW mode
self.masterSim.modeRequest = 'inertial3D'
# Configure Dynamics initial conditions
oe = orbitalMotion.ClassicElements()
oe.a = 10000000.0 # meters
oe.e = 0.01
oe.i = 33.3 * macros.D2R
oe.Omega = 48.2 * macros.D2R
oe.omega = 347.8 * macros.D2R
oe.f = 85.3 * macros.D2R
mu = self.masterSim.get_DynModel().gravFactory.gravBodies['earth'].mu
rN, vN = orbitalMotion.elem2rv(mu, oe)
orbitalMotion.rv2elem(mu, rN, vN)
self.masterSim.get_DynModel().scObject.hub.r_CN_NInit = unitTestSupport.np2EigenVectorXd(rN) # m - r_CN_N
self.masterSim.get_DynModel().scObject.hub.v_CN_NInit = unitTestSupport.np2EigenVectorXd(vN) # m/s - v_CN_N
self.masterSim.get_DynModel().scObject.hub.sigma_BNInit = [[0.1], [0.2], [-0.3]] # sigma_BN_B
self.masterSim.get_DynModel().scObject.hub.omega_BN_BInit = [[0.001], [-0.01], [0.03]] # rad/s - omega_BN_B
def log_outputs(self):
print '%s: log_outputs' % self.name
# Dynamics process outputs: log messages below if desired.
# FSW process outputs
samplingTime = self.masterSim.get_FswModel().processTasksTimeStep
self.masterSim.TotalSim.logThisMessage(self.masterSim.get_FswModel().mrpFeedbackRWsData.inputRWSpeedsName, samplingTime)
self.masterSim.TotalSim.logThisMessage(self.masterSim.get_FswModel().rwMotorTorqueData.outputDataName, samplingTime)
self.masterSim.TotalSim.logThisMessage(self.masterSim.get_FswModel().trackingErrorData.outputDataName, samplingTime)
return
def pull_outputs(self, showPlots):
print '%s: pull_outputs' % self.name
num_RW = 4 # number of wheels used in the scenario
# Dynamics process outputs: pull log messages below if any
# FSW process outputs
dataUsReq = self.masterSim.pullMessageLogData(
self.masterSim.get_FswModel().rwMotorTorqueData.outputDataName + ".motorTorque", range(num_RW))
sigma_BR = self.masterSim.pullMessageLogData(
self.masterSim.get_FswModel().trackingErrorData.outputDataName + ".sigma_BR", range(3))
omega_BR_B = self.masterSim.pullMessageLogData(
self.masterSim.get_FswModel().trackingErrorData.outputDataName + ".omega_BR_B", range(3))
RW_speeds = self.masterSim.pullMessageLogData(
self.masterSim.get_FswModel().mrpFeedbackRWsData.inputRWSpeedsName + ".wheelSpeeds", range(num_RW))
# Plot results
BSK_plt.clear_all_plots()
timeData = dataUsReq[:, 0] * macros.NANO2MIN
BSK_plt.plot_attitude_error(timeData, sigma_BR)
BSK_plt.plot_rw_cmd_torque(timeData, dataUsReq, num_RW)
BSK_plt.plot_rate_error(timeData, omega_BR_B)
BSK_plt.plot_rw_speeds(timeData, RW_speeds, num_RW)
figureList = {}
if showPlots:
BSK_plt.show_all_plots()
else:
fileName = os.path.basename(os.path.splitext(__file__)[0])
figureNames = ["attitudeErrorNorm", "rwMotorTorque", "rateError", "rwSpeed"]
figureList = BSK_plt.save_all_plots(fileName, figureNames)
return figureList
def run(showPlots):
# Instantiate base simulation
TheBSKSim = BSKSim()
TheBSKSim.set_DynModel(BSK_Dynamics)
TheBSKSim.set_FswModel(BSK_Fsw)
TheBSKSim.initInterfaces()
# Configure a scenario in the base simulation
TheScenario = scenario_AttitudeFeedbackRW(TheBSKSim)
TheScenario.log_outputs()
TheScenario.configure_initial_conditions()
# Initialize simulation
TheBSKSim.InitializeSimulationAndDiscover()
# Configure run time and execute simulation
simulationTime = macros.min2nano(10.)
TheBSKSim.ConfigureStopTime(simulationTime)
print 'Starting Execution'
TheBSKSim.ExecuteSimulation()
print 'Finished Execution. Post-processing results'
# Pull the results of the base simulation running the chosen scenario
figureList = TheScenario.pull_outputs(showPlots)
return figureList
if __name__ == "__main__":
run(True)
|
py | 1a3247bf8ce06e28cf0a4e4f2d48171f9d83f7cf | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from ossdbtoolsservice.tasks.contracts.cancel_task import CANCEL_TASK_REQUEST, CancelTaskParameters
from ossdbtoolsservice.tasks.contracts.list_tasks import LIST_TASKS_REQUEST, ListTasksParameters
from ossdbtoolsservice.tasks.contracts.task_info import TaskInfo
__all__ = ['CANCEL_TASK_REQUEST', 'CancelTaskParameters', 'LIST_TASKS_REQUEST', 'ListTasksParameters', 'TaskInfo']
|
py | 1a32480f1ef93dad55c9c3cfaf862241ced02fe0 | #!/usr/bin/python
#-*- encoding: utf-8 -*-
"""
Copyright (c) 2015 @myuuuuun
Released under the MIT license.
"""
import math
import numpy as np
import functools
import matplotlib.pyplot as plt
import matplotlib.cm as cm
EPSIRON = 1.0e-8
# P0〜P_(length-1)までのルジャンドル多項式の, xにおける値の配列を返す
def legendre(x, length):
values = [1, x]
for i in range(2, length):
v = ((2*i-1)*x*values[i-1] - (i-1) * values[i-2]) / i
values.append(v)
return values
# P0〜P_(length-1)までのチェビシェフ多項式の, xにおける値の配列を返す
def chebyshev(x, length):
values = []
for i in range(length):
v = np.cos(i * np.arccos(x))
values.append(v)
return values
if __name__ == '__main__':
# 共通設定
length = 6
x_list = np.arange(-0.99, 1.00, 0.01)
f_matrix = np.zeros((length, 199), dtype=float)
# legendre
"""
for i, x in enumerate(x_list):
values = legendre(x, length)
for j in range(length):
f_matrix[j][i] = values[j]
fig, ax = plt.subplots()
plt.title("Legendre多項式")
plt.xlabel("x")
plt.ylabel("f")
plt.xlim(-1, 1)
for j in range(length):
plt.plot(x_list, f_matrix[j], color=cm.gist_earth(j/length), label='P{0}'.format(j))
plt.legend()
plt.show()
"""
"""
# chebyshev
for i, x in enumerate(x_list):
values = chebyshev(x, length)
for j in range(length):
f_matrix[j][i] = values[j]
fig, ax = plt.subplots()
plt.title("Chebyshev多項式")
plt.xlabel("x")
plt.ylabel("f")
plt.xlim(-1, 1)
for j in range(length):
plt.plot(x_list, f_matrix[j], color=cm.gist_earth(j/length), label='P{0}'.format(j))
plt.legend()
plt.show()
"""
|
py | 1a3248949aec861c0cdbb4dd17d80d9e82db1d7d | #!/usr/bin/env python3
print("Starting server...")
from ev3dev2.console import Console
Console("Lat15-Terminus12x6")
print("Importing modules (this may take a while)...")
import time
t1 = time.perf_counter()
import json
import os
import subprocess
import time
import traceback
from base64 import b64decode
from shutil import which
from socket import gethostname
from threading import Thread, Lock
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.websocket
from ev3dev2.led import Leds
from ev3dev2.motor import list_motors, Motor, MoveJoystick, OUTPUT_B, OUTPUT_C
from ev3dev2.sensor import list_sensors, Sensor
t2 = time.perf_counter()
print("Imported in", t2-t1)
# has auth is True if users should be logged in to access the server
HAS_AUTH = (os.path.exists(".htpasswd") # check that password file exists ...
and which("htpasswd") is not None) # ... and that program 'htpasswd' exists
class BasicAuthHandler(tornado.web.RequestHandler):
def prepare(self):
if HAS_AUTH:
def request_auth():
self.set_header("WWW-Authenticate", 'Basic realm="Connect to ' + gethostname() + '"')
self.set_status(401)
self.finish()
tornado.web.Finish()
auth = self.request.headers.get("Authorization")
if auth is None or not auth.startswith("Basic "):
return request_auth()
try:
decoded = b64decode(auth.split(maxsplit=1)[1])
except Exception:
return request_auth()
user, pwd = decoded.split(b":", 1)
try:
proc = subprocess.run(["htpasswd", "-i", "-v", ".htpasswd", user], timeout=1, input=pwd)
except subprocess.TimeoutExpired:
return request_auth()
if proc.returncode != 0:
return request_auth()
LEDS = Leds()
LEDS.all_off()
LEDS.reset()
move_joystick = None
motors = {}
old_joystick_left_port = None
old_joystick_right_port = None
old_motor_1_port = None
old_motor_2_port = None
class EV3InfoHandler(BasicAuthHandler, tornado.websocket.WebSocketHandler):
websockets = set()
websockets_lock = Lock()
def open(self):
with EV3InfoHandler.websockets_lock:
EV3InfoHandler.websockets.add(self)
self.write_message(get_info(set(), set(), True)[0])
self.write_message("next") # inform client that it is allowed to send a new message
def on_close(self):
with EV3InfoHandler.websockets_lock:
EV3InfoHandler.websockets.remove(self)
def on_message(self, messages):
global move_joystick
try:
print("got messages", messages)
for message in json.loads(messages):
type_ = message["type"]
if type_ == "rc-joystick":
if message["leftPort"] != old_joystick_left_port or message["rightPort"] != old_joystick_right_port:
move_joystick = MoveJoystick(message["leftPort"], message["rightPort"])
if message["x"] == 0 and message["y"] == 0:
move_joystick.off(brake=False)
else:
move_joystick.on(message["x"], message["y"], 1)
elif type_ == "rc-motor":
if message["port"] in motors:
motor = motors[message["port"]]
else:
motor = motors[message["port"]] = Motor(message["port"])
motor.on(message["speed"]*100)
elif type_ == "sensor":
port = message["port"]
attributes = message["attributes"]
device = Sensor(port)
for name, value in attributes.items():
setattr(device, name, value)
# send changes to other clients
EV3InfoHandler.send_to_all(json.dumps({port: attributes}), {self})
elif type_ == "motor":
port = message["port"]
attributes = message["attributes"]
device = Motor(port)
for name, value in attributes.items():
setattr(device, name, value)
# send changes to other clients
EV3InfoHandler.send_to_all(json.dumps({port: attributes}), {self})
elif type_ == "led":
port = message["port"]
attributes = message["attributes"]
led_group = port.split(":")[1].lower()
for color_name, brightness in attributes.items():
LEDS.leds[color_name + "_" + led_group].brightness_pct = float(brightness)
# send changes to other clients
EV3InfoHandler.send_to_all(json.dumps({port: attributes}), {self})
else:
raise ValueError("Unknown message type '" + type_ + "'")
except Exception:
traceback.print_exc()
self.send_to_all("next")
@classmethod
def send_to_all(cls, message, exclude_websockets=None):
with cls.websockets_lock:
for websocket in cls.websockets:
if not exclude_websockets or websocket not in exclude_websockets:
try:
websocket.write_message(message)
except Exception:
traceback.print_exc()
"""
Returns a string containing a JSON object which describes the current motor/sensor values in the following format:
{
"<address (e.g. "ev3-ports:in1")>": {
// for both sensors and motors:
"driver_name": "<driver name>",
"command": [<list of possible commands>],
// for sensors:
"values": "<current sensor values, separated by space (max. 8)>",
"mode": {
"selected": "<currently selected mode>],
"values": [<list of possible modes>]
},
// for motors:
"position": "<current motor position>",
"duty_cycle_sp": "<duty cycle setpoint>",
"polarity": "normal" or "inversed",
"position_sp": "position setpoint",
"speed_sp": "speed setpoint",
"ramp_up_sp": "ramp up setpoint",
"ramp_down_sp": "ramp down setpoint",
"stop_action": {
"selected": "<currently selected stop_action>",
"values": [<list of possible stop_actions>]
},
"time_sp": "time setpoint",
}
}
Parameters 'old_sensor_addressse' and 'old_motor_addresses' are sets of previously available adresses.
If an address was previously available, only "values" attribute (for sensors) or "position" attribute (for motors) is included.
This is because these are the only properties that change while the user views the page.
If 'all_info' is True, additional info is added that clients need when they connect for the first time: Currently, this is only LED brightnesses.
When a WebSocket first connects with the server, get_info(set(), set()) is called so that initially the client receives all attributes (see EV3InfoHandler.open).
get_info returns: (string containing JSON object, new sensor addresses (for use in the next call of get_info), new motor addresses (for use in the next call of get_info)).
"""
def get_info(old_sensor_addresses, old_motor_addresses, all_info=False):
info = {"disconnected_devices": []}
if all_info:
for group_name, leds in LEDS.led_groups.items():
info["led:" + group_name] = {led.desc.split("_")[0]: led.brightness_pct for led in leds}
sensor_addresses = set()
for sensor in list_sensors("*"):
try:
address = sensor.address
if address.count(":") > 1:
# addresses for i2c sensors end with ':i2c*', remove this
address = address[:address.index(":", address.index(":")+1)]
if address in old_sensor_addresses:
old_sensor_addresses.remove(address)
info[address] = {
"values": " ".join(str(sensor.value(i)) for i in range(sensor.num_values))
}
else:
info[address] = {
"driver_name": sensor.driver_name,
"mode": {
"values": sensor.modes,
"selected": sensor.mode
},
"command": sensor.commands,
"values": " ".join(str(sensor.value(i)) for i in range(sensor.num_values)),
#"decimals": sensor.decimals,
}
sensor_addresses.add(address)
except Exception:
traceback.print_exc()
info["disconnected_devices"].extend(old_sensor_addresses)
motor_addresses = set()
for motor in list_motors("*"):
try:
address = motor.address
if address in old_motor_addresses:
old_motor_addresses.remove(address)
info[address] = {
"position": motor.position
}
else:
info[address] = {
"driver_name": motor.driver_name,
"duty_cycle_sp": motor.duty_cycle_sp,
"polarity": motor.polarity,
"position": motor.position,
"position_sp": motor.position_sp,
"speed_sp": motor.speed_sp,
"ramp_up_sp": motor.ramp_up_sp,
"ramp_down_sp": motor.ramp_down_sp,
"stop_action": {
"values": motor.stop_actions,
"selected": motor.stop_action
},
"time_sp": motor.time_sp,
"command": motor.commands
}
motor_addresses.add(address)
except Exception:
traceback.print_exc()
info["disconnected_devices"].extend(old_motor_addresses)
content = json.dumps(info).encode("utf-8")
return content, sensor_addresses, motor_addresses
def send_info():
old_sensor_addresses = set()
old_motor_addresses = set()
while True:
if len(EV3InfoHandler.websockets) == 0:
print("Waiting for clients to connect...")
while len(EV3InfoHandler.websockets) == 0:
time.sleep(0.5)
print("Clients connected!")
content, old_sensor_addresses, old_motor_addresses = get_info(old_sensor_addresses, old_motor_addresses)
EV3InfoHandler.send_to_all(content)
time.sleep(0.1)
class StaticFiles(BasicAuthHandler, tornado.web.StaticFileHandler):
def set_extra_headers(self, path):
self.set_header("Cache-Control", "no-store, no-cache, must-revalidate, max-age=0")
if __name__ == "__main__":
tornado.options.define("port", default=8000, help="run on the given port", type=int)
tornado.options.parse_command_line()
static_files = os.path.join(os.path.dirname(__file__), "website")
app = tornado.web.Application([
(r"/ev3-info", EV3InfoHandler),
(r"/(.*)", StaticFiles, {"path": static_files, "default_filename": "index.html"})
],
static_path=os.path.join(os.path.dirname(__file__), "website")
)
app.listen(tornado.options.options.port)
print("Serving on port", tornado.options.options.port)
if HAS_AUTH:
print("Basic auth is required when connecting")
ioloop = tornado.ioloop.IOLoop.current()
Thread(target=ioloop.start).start()
Thread(target=send_info).start()
|
py | 1a324a2b2f0e277ea3277428b8a06c8d4716a0f9 | import configparser
import os
config = configparser.ConfigParser()
config.read(['config.ini', os.path.expanduser('~/.thorconfig.ini')],
encoding='utf8')
base_url = str(os.environ.get('THOR_BASE_URL', config['thor']['base_url']))
auth_token = str(os.environ.get('THOR_AUTH_TOKEN', config['thor']['auth_token']))
|
py | 1a324b9447e52275ab265bb55e8efdd21a329c1b | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
import sys
import pdb
import torch.nn as nn
import torch.optim as optim
from models.archs.base_networks import *
from torchvision.transforms import *
#from layer import *
#from vgg19 import VGG19
def make_model(args, parent=False):
return Net()
class Get_gradient_nopadding(nn.Module):
def __init__(self):
super(Get_gradient_nopadding, self).__init__()
self.kernel_v = torch.from_numpy(np.array([[0, -1, 0],
[0, 0, 0],
[0, 1, 0]])).cuda().float()
self.kernel_h = torch.from_numpy(np.array([[0, 0, 0],
[-1, 0, 1],
[0, 0, 0]])).cuda().float()
self.kernel_h = self.kernel_h.unsqueeze(0).unsqueeze(0)
self.kernel_v = self.kernel_v.unsqueeze(0).unsqueeze(0)
self.gradient_h = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3, padding=1, bias=False)
self.gradient_h.weight.data = self.kernel_h
self.gradient_h.weight.requires_grad = False
self.gradient_v = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3, padding=1, bias=False)
self.gradient_v.weight.data = self.kernel_v
self.gradient_v.weight.requires_grad = False
def forward(self, x):
x_list = []
for i in range(x.shape[1]):
x_i = x[:, i]
x_i_v = self.gradient_v(x_i.unsqueeze(1))
x_i_h = self.gradient_h(x_i.unsqueeze(1))
x_i = torch.sqrt(torch.pow(x_i_v, 2) + torch.pow(x_i_h, 2) + 1e-6)
x_list.append(x_i)
x = torch.cat(x_list, dim = 1)
return x
class Laplacian:
def __init__(self):
self.weight = torch.from_numpy(np.array([
[[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]],[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]],[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]]],
[[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]],[[8.,0.,0.],[0.,8.,0.],[0.,0.,8.]],[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]]],
[[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]],[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]],[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]]]])).cuda().float()
self.frame = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3, padding=1, bias=False)
self.frame.weight.data = self.weight
self.frame.weight.requires_grad = False
def __call__(self, x):
out = self.frame(x)
return out
class _Dense_Block(nn.Module):
def __init__(self, channel_in):
super(_Dense_Block, self).__init__()
self.relu = nn.PReLU()
self.conv1 = nn.Conv2d(in_channels=channel_in, out_channels=16, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, stride=1, padding=1)
self.conv3 = nn.Conv2d(in_channels=32, out_channels=16, kernel_size=3, stride=1, padding=1)
self.conv4 = nn.Conv2d(in_channels=48, out_channels=16, kernel_size=3, stride=1, padding=1)
self.conv5 = nn.Conv2d(in_channels=64, out_channels=16, kernel_size=3, stride=1, padding=1)
self.conv6 = nn.Conv2d(in_channels=80, out_channels=16, kernel_size=3, stride=1, padding=1)
self.conv7 = nn.Conv2d(in_channels=96, out_channels=16, kernel_size=3, stride=1, padding=1)
self.conv8 = nn.Conv2d(in_channels=112, out_channels=16, kernel_size=3, stride=1, padding=1)
def forward(self, x):
conv1 = self.relu(self.conv1(x))
conv2 = self.relu(self.conv2(conv1))
cout2_dense = self.relu(torch.cat([conv1, conv2], 1))
conv3 = self.relu(self.conv3(cout2_dense))
cout3_dense = self.relu(torch.cat([conv1, conv2, conv3], 1))
conv4 = self.relu(self.conv4(cout3_dense))
cout4_dense = self.relu(torch.cat([conv1, conv2, conv3, conv4], 1))
conv5 = self.relu(self.conv5(cout4_dense))
cout5_dense = self.relu(torch.cat([conv1, conv2, conv3, conv4, conv5], 1))
conv6 = self.relu(self.conv6(cout5_dense))
cout6_dense = self.relu(torch.cat([conv1, conv2, conv3, conv4, conv5, conv6], 1))
conv7 = self.relu(self.conv7(cout6_dense))
cout7_dense = self.relu(torch.cat([conv1, conv2, conv3, conv4, conv5, conv6, conv7], 1))
conv8 = self.relu(self.conv8(cout7_dense))
cout8_dense = self.relu(torch.cat([conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8], 1))
return cout8_dense
class EERSGAN(nn.Module):
def __init__(self,num_channels=3, base_filter=64, feat = 256, num_stages=10, scale_factor=4):
super(EERSGAN, self ).__init__()
self.scale = scale_factor
self.lrelu = nn.PReLU()
self.conv1 = nn.Conv2d(in_channels=num_channels, out_channels=128, kernel_size=3, stride=1, padding=1)
self.denseblock1 = self.make_layer(_Dense_Block, 128)
self.denseblock2 = self.make_layer(_Dense_Block, 256)
self.denseblock3 = self.make_layer(_Dense_Block, 384)
self.denseblock4 = self.make_layer(_Dense_Block, 512)
self.denseblock5 = self.make_layer(_Dense_Block, 640)
self.denseblock6 = self.make_layer(_Dense_Block, 768)
self.bottleneck = nn.Conv2d(in_channels=896, out_channels=256, kernel_size=1, stride=1, padding=0, bias=False)
self.ps = nn.PixelShuffle(self.scale)
out_dim = int(256/self.scale/self.scale)
self.reconstruction = nn.Conv2d(in_channels=out_dim, out_channels=3, kernel_size=3, stride=1, padding=1, bias=False)
self.Laplacian = Laplacian()
en = [nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2),
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=1, bias=False),
nn.LeakyReLU(0.2),
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1, bias=False),
nn.LeakyReLU(0.2),
nn.Conv2d(in_channels=256, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2)]
self.en = nn.Sequential(*en)
self.denseblock_e1 = self.make_layer(_Dense_Block, 64)
self.denseblock_e2 = self.make_layer(_Dense_Block, 192)
self.denseblock_e3 = self.make_layer(_Dense_Block, 320)
self.bottleneck_2 = nn.Conv2d(in_channels=448, out_channels=64 , kernel_size=1, stride=1, padding=0, bias=False)
self.e8 = nn.Conv2d(in_channels=448, out_channels=256, kernel_size=3, stride=1, padding=1)
mask = [nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=2, padding=1, bias=False),
nn.LeakyReLU(0.2),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2),
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=1, bias=False),
nn.LeakyReLU(0.2),
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2)]
self.mask = nn.Sequential(*mask)
self.ps2 = nn.PixelShuffle(self.scale)
out_dim = int(256 / self.scale / self.scale)
self.reconstruction_2 = nn.Conv2d(in_channels=out_dim, out_channels=3, kernel_size=3, stride=1, padding=1,
bias=False)
self.get_g_nopadding = Get_gradient_nopadding()
self.laplacian = Laplacian()
#weight = torch.FloatTensor([
#[[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]],[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]],[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]]],
#[[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]],[[8.,0.,0.],[0.,8.,0.],[0.,0.,8.]],[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]]],
#[[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]],[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]],[[-1.,0.,0.],[0.,-1.,0.],[0.,0.,-1.]]]])
#self.weight = nn.Parameter(data = weight,requires_grad = False).cuda()
def make_layer(self, block, channel_in):
layers = []
layers.append(block(channel_in))
return nn.Sequential(*layers)
def forward(self, x):
#pdb.set_trace()
bic = F.upsample(x, size=(int(x.shape[2]*self.scale), int(x.shape[3]*self.scale)), mode='bilinear')
out = self.lrelu(self.conv1(x))
out1 = self.denseblock1(out)
concat = torch.cat([out,out1], 1)
out = self.denseblock2(concat)
concat = torch.cat([concat,out], 1)
out = self.denseblock3(concat)
concat = torch.cat([concat,out], 1)
out = self.denseblock4(concat)
concat = torch.cat([concat,out], 1)
out = self.denseblock5(concat)
concat = torch.cat([concat,out], 1)
out = self.denseblock6(concat)
concat = torch.cat([concat,out], 1)
#out = self.denseblock7(concat)
#concat = torch.cat([concat,out], 1)
out = self.bottleneck(concat)
out = self.ps(out)
sr_base = self.reconstruction(out) + bic
x_fa = self.laplacian(sr_base)
#pdb.set_trace()
x_f = self.en(x_fa.cuda())
x_f2 = self.denseblock_e1(x_f)
concat = torch.cat([x_f,x_f2], 1)
x_f = self.denseblock_e2(concat)
concat = torch.cat([concat,x_f], 1)
x_f = self.denseblock_e3(concat)
concat = torch.cat([concat,x_f], 1)
x_f = self.lrelu(self.e8(concat))
x_mask = self.mask(self.get_g_nopadding(sr_base))
frame_mask = torch.sigmoid(x_mask)
x_frame = frame_mask * x_f +x_f
#x_frame = self.bottleneck_2(x_frame)
x_frame = self.ps2(x_frame)
x_frame = self.reconstruction_2(x_frame)
x_sr = x_frame + sr_base - x_fa
frame_e = x_frame - x_fa
return frame_e, sr_base, x_sr
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.net = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, padding=1),
nn.LeakyReLU(0.2),
nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.2),
nn.Conv2d(64, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2),
nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2),
nn.Conv2d(128, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2),
nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2),
nn.Conv2d(256, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2),
nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2),
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(512, 1024, kernel_size=1),
nn.LeakyReLU(0.2),
nn.Conv2d(1024, 1, kernel_size=1)
)
def forward(self, x):
batch_size = x.size(0)
return torch.sigmoid(self.net(x).view(batch_size))
if __name__ == '__main__':
model = Generator(4).cuda()
img = torch.rand(3,64,64)
#img = img.unsqueeze(0)
img = img.unsqueeze(0)
img=img.cuda()
out=model(img)
|
py | 1a324baae0968f113357304cbd699fa33411154f | # -*- coding: utf-8 -*-
'''
The module used to execute states in salt. A state is unlike a module
execution in that instead of just executing a command it ensure that a
certain state is present on the system.
The data sent to the state calls is as follows:
{ 'state': '<state module name>',
'fun': '<state function name>',
'name': '<the name argument passed to all states>'
'argn': '<arbitrary argument, can have many of these>'
}
'''
# Import python libs
from __future__ import absolute_import
import os
import sys
import copy
import site
import fnmatch
import logging
import datetime
import traceback
import re
# Import salt libs
import salt.utils
import salt.loader
import salt.minion
import salt.pillar
import salt.fileclient
import salt.utils.event
import salt.utils.url
import salt.syspaths as syspaths
from salt.utils import immutabletypes
from salt.template import compile_template, compile_template_str
from salt.exceptions import (
SaltException,
SaltInvocationError,
SaltRenderError,
SaltReqTimeoutError
)
from salt.utils.odict import OrderedDict, DefaultOrderedDict
from salt.utils.locales import sdecode
# Explicit late import to avoid circular import. DO NOT MOVE THIS.
import salt.utils.yamlloader as yamlloader
# Import third party libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six as six
from salt.ext.six.moves import map, range
# pylint: enable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
# These are keywords passed to state module functions which are to be used
# by salt in this state module and not on the actual state module function
STATE_REQUISITE_KEYWORDS = frozenset([
'onchanges',
'onfail',
'prereq',
'prerequired',
'watch',
'require',
'listen',
])
STATE_REQUISITE_IN_KEYWORDS = frozenset([
'onchanges_in',
'onfail_in',
'prereq_in',
'watch_in',
'require_in',
'listen_in',
])
STATE_RUNTIME_KEYWORDS = frozenset([
'fun',
'state',
'check_cmd',
'failhard',
'onlyif',
'unless',
'order',
'prereq',
'prereq_in',
'prerequired',
'reload_modules',
'reload_grains',
'reload_pillar',
'fire_event',
'saltenv',
'use',
'use_in',
'__env__',
'__sls__',
'__id__',
'__pub_user',
'__pub_arg',
'__pub_jid',
'__pub_fun',
'__pub_tgt',
'__pub_ret',
'__pub_pid',
'__pub_tgt_type',
'__prereq__',
])
STATE_INTERNAL_KEYWORDS = STATE_REQUISITE_KEYWORDS.union(STATE_REQUISITE_IN_KEYWORDS).union(STATE_RUNTIME_KEYWORDS)
VALID_PILLAR_ENC = ('gpg',)
def _odict_hashable(self):
return id(self)
OrderedDict.__hash__ = _odict_hashable
def split_low_tag(tag):
'''
Take a low tag and split it back into the low dict that it came from
'''
state, id_, name, fun = tag.split('_|-')
return {'state': state,
'__id__': id_,
'name': name,
'fun': fun}
def _gen_tag(low):
'''
Generate the running dict tag string from the low data structure
'''
return '{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(low)
def _l_tag(name, id_):
low = {'name': 'listen_{0}'.format(name),
'__id__': 'listen_{0}'.format(id_),
'state': 'Listen_Error',
'fun': 'Listen_Error'}
return _gen_tag(low)
def trim_req(req):
'''
Trim any function off of a requisite
'''
reqfirst = next(iter(req))
if '.' in reqfirst:
return {reqfirst.split('.')[0]: req[reqfirst]}
return req
def state_args(id_, state, high):
'''
Return a set of the arguments passed to the named state
'''
args = set()
if id_ not in high:
return args
if state not in high[id_]:
return args
for item in high[id_][state]:
if not isinstance(item, dict):
continue
if len(item) != 1:
continue
args.add(next(iter(item)))
return args
def find_name(name, state, high):
'''
Scan high data for the id referencing the given name and return a list of (IDs, state) tuples that match
Note: if `state` is sls, then we are looking for all IDs that match the given SLS
'''
ext_id = []
if name in high:
ext_id.append((name, state))
# if we are requiring an entire SLS, then we need to add ourselves to everything in that SLS
elif state == 'sls':
for nid, item in high.iteritems():
if item['__sls__'] == name:
ext_id.append((nid, next(iter(item))))
# otherwise we are requiring a single state, lets find it
else:
# We need to scan for the name
for nid in high:
if state in high[nid]:
if isinstance(high[nid][state], list):
for arg in high[nid][state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if arg[next(iter(arg))] == name:
ext_id.append((name, state))
return ext_id
def format_log(ret):
'''
Format the state into a log message
'''
msg = ''
if isinstance(ret, dict):
# Looks like the ret may be a valid state return
if 'changes' in ret:
# Yep, looks like a valid state return
chg = ret['changes']
if not chg:
if ret['comment']:
msg = ret['comment']
else:
msg = 'No changes made for {0[name]}'.format(ret)
elif isinstance(chg, dict):
if 'diff' in chg:
if isinstance(chg['diff'], six.string_types):
msg = 'File changed:\n{0}'.format(chg['diff'])
if all([isinstance(x, dict) for x in six.itervalues(chg)]):
if all([('old' in x and 'new' in x)
for x in six.itervalues(chg)]):
msg = 'Made the following changes:\n'
for pkg in chg:
old = chg[pkg]['old']
if not old and old not in (False, None):
old = 'absent'
new = chg[pkg]['new']
if not new and new not in (False, None):
new = 'absent'
msg += '\'{0}\' changed from \'{1}\' to ' \
'\'{2}\'\n'.format(pkg, old, new)
if not msg:
msg = str(ret['changes'])
if ret['result'] is True or ret['result'] is None:
log.info(msg)
else:
log.error(msg)
else:
# catch unhandled data
log.info(str(ret))
def master_compile(master_opts, minion_opts, grains, id_, saltenv):
'''
Compile the master side low state data, and build the hidden state file
'''
st_ = MasterHighState(master_opts, minion_opts, grains, id_, saltenv)
return st_.compile_highstate()
def ishashable(obj):
try:
hash(obj)
except TypeError:
return False
return True
class StateError(Exception):
'''
Custom exception class.
'''
pass
class Compiler(object):
'''
Class used to compile and manage the High Data structure
'''
def __init__(self, opts, renderers):
self.opts = opts
self.rend = renderers
def render_template(self, template, **kwargs):
'''
Enforce the states in a template
'''
high = compile_template(
template, self.rend, self.opts['renderer'], **kwargs)
if not high:
return high
return self.pad_funcs(high)
def pad_funcs(self, high):
'''
Turns dot delimited function refs into function strings
'''
for name in high:
if not isinstance(high[name], dict):
if isinstance(high[name], six.string_types):
# Is this is a short state? It needs to be padded!
if '.' in high[name]:
comps = high[name].split('.')
if len(comps) >= 2:
# Merge the comps
comps[1] = '.'.join(comps[1:len(comps)])
high[name] = {
# '__sls__': template,
# '__env__': None,
comps[0]: [comps[1]]
}
continue
continue
skeys = set()
for key in sorted(high[name]):
if key.startswith('_'):
continue
if not isinstance(high[name][key], list):
continue
if '.' in key:
comps = key.split('.')
if len(comps) >= 2:
# Merge the comps
comps[1] = '.'.join(comps[1:len(comps)])
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
continue
high[name][comps[0]] = high[name].pop(key)
high[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
return high
def verify_high(self, high):
'''
Verify that the high data is viable and follows the data structure
'''
errors = []
if not isinstance(high, dict):
errors.append('High data is not a dictionary and is invalid')
reqs = {}
for name, body in six.iteritems(high):
if name.startswith('__'):
continue
if not isinstance(name, six.string_types):
errors.append(
'ID \'{0}\' in SLS \'{1}\' is not formed as a string, but '
'is a {2}'.format(
name,
body['__sls__'],
type(name).__name__
)
)
if not isinstance(body, dict):
err = ('The type {0} in {1} is not formatted as a dictionary'
.format(name, body))
errors.append(err)
continue
for state in body:
if state.startswith('__'):
continue
if not isinstance(body[state], list):
errors.append(
'State \'{0}\' in SLS \'{1}\' is not formed as a list'
.format(name, body['__sls__'])
)
else:
fun = 0
if '.' in state:
fun += 1
for arg in body[state]:
if isinstance(arg, six.string_types):
fun += 1
if ' ' in arg.strip():
errors.append(('The function "{0}" in state '
'"{1}" in SLS "{2}" has '
'whitespace, a function with whitespace is '
'not supported, perhaps this is an argument '
'that is missing a ":"').format(
arg,
name,
body['__sls__']))
elif isinstance(arg, dict):
# The arg is a dict, if the arg is require or
# watch, it must be a list.
#
# Add the requires to the reqs dict and check them
# all for recursive requisites.
argfirst = next(iter(arg))
if argfirst in ('require', 'watch', 'prereq', 'onchanges'):
if not isinstance(arg[argfirst], list):
errors.append(('The {0}'
' statement in state \'{1}\' in SLS \'{2}\' '
'needs to be formed as a list').format(
argfirst,
name,
body['__sls__']
))
# It is a list, verify that the members of the
# list are all single key dicts.
else:
reqs[name] = {'state': state}
for req in arg[argfirst]:
if not isinstance(req, dict):
err = ('Requisite declaration {0}'
' in SLS {1} is not formed as a'
' single key dictionary').format(
req,
body['__sls__'])
errors.append(err)
continue
req_key = next(iter(req))
req_val = req[req_key]
if '.' in req_key:
errors.append((
'Invalid requisite type \'{0}\' '
'in state \'{1}\', in SLS '
'\'{2}\'. Requisite types must '
'not contain dots, did you '
'mean \'{3}\'?'.format(
req_key,
name,
body['__sls__'],
req_key[:req_key.find('.')]
)
))
if not ishashable(req_val):
errors.append((
'Illegal requisite "{0}", '
'is SLS {1}\n'
).format(
str(req_val),
body['__sls__']))
continue
# Check for global recursive requisites
reqs[name][req_val] = req_key
# I am going beyond 80 chars on
# purpose, this is just too much
# of a pain to deal with otherwise
if req_val in reqs:
if name in reqs[req_val]:
if reqs[req_val][name] == state:
if reqs[req_val]['state'] == reqs[name][req_val]:
err = ('A recursive '
'requisite was found, SLS '
'"{0}" ID "{1}" ID "{2}"'
).format(
body['__sls__'],
name,
req_val
)
errors.append(err)
# Make sure that there is only one key in the
# dict
if len(list(arg)) != 1:
errors.append(('Multiple dictionaries '
'defined in argument of state \'{0}\' in SLS'
' \'{1}\'').format(
name,
body['__sls__']))
if not fun:
if state == 'require' or state == 'watch':
continue
errors.append(('No function declared in state \'{0}\' in'
' SLS \'{1}\'').format(state, body['__sls__']))
elif fun > 1:
errors.append(
'Too many functions declared in state \'{0}\' in '
'SLS \'{1}\''.format(state, body['__sls__'])
)
return errors
def order_chunks(self, chunks):
'''
Sort the chunk list verifying that the chunks follow the order
specified in the order options.
'''
cap = 1
for chunk in chunks:
if 'order' in chunk:
if not isinstance(chunk['order'], int):
continue
chunk_order = chunk['order']
if chunk_order > cap - 1 and chunk_order > 0:
cap = chunk_order + 100
for chunk in chunks:
if 'order' not in chunk:
chunk['order'] = cap
continue
if not isinstance(chunk['order'], (int, float)):
if chunk['order'] == 'last':
chunk['order'] = cap + 1000000
else:
chunk['order'] = cap
if 'name_order' in chunk:
chunk['order'] = chunk['order'] + chunk.pop('name_order') / 10000.0
if chunk['order'] < 0:
chunk['order'] = cap + 1000000 + chunk['order']
chunk['name'] = sdecode(chunk['name'])
chunks.sort(key=lambda chunk: (chunk['order'], '{0[state]}{0[name]}{0[fun]}'.format(chunk)))
return chunks
def compile_high_data(self, high):
'''
"Compile" the high data as it is retrieved from the CLI or YAML into
the individual state executor structures
'''
chunks = []
for name, body in six.iteritems(high):
if name.startswith('__'):
continue
for state, run in six.iteritems(body):
funcs = set()
names = set()
if state.startswith('__'):
continue
chunk = {'state': state,
'name': name}
if '__sls__' in body:
chunk['__sls__'] = body['__sls__']
if '__env__' in body:
chunk['__env__'] = body['__env__']
chunk['__id__'] = name
for arg in run:
if isinstance(arg, six.string_types):
funcs.add(arg)
continue
if isinstance(arg, dict):
for key, val in six.iteritems(arg):
if key == 'names':
names.update(val)
continue
else:
chunk.update(arg)
if names:
name_order = 1
for entry in names:
live = copy.deepcopy(chunk)
if isinstance(entry, dict):
low_name = next(six.iterkeys(entry))
live['name'] = low_name
list(map(live.update, entry[low_name]))
else:
live['name'] = entry
live['name_order'] = name_order
name_order = name_order + 1
for fun in funcs:
live['fun'] = fun
chunks.append(live)
else:
live = copy.deepcopy(chunk)
for fun in funcs:
live['fun'] = fun
chunks.append(live)
chunks = self.order_chunks(chunks)
return chunks
def apply_exclude(self, high):
'''
Read in the __exclude__ list and remove all excluded objects from the
high data
'''
if '__exclude__' not in high:
return high
ex_sls = set()
ex_id = set()
exclude = high.pop('__exclude__')
for exc in exclude:
if isinstance(exc, str):
# The exclude statement is a string, assume it is an sls
ex_sls.add(exc)
if isinstance(exc, dict):
# Explicitly declared exclude
if len(exc) != 1:
continue
key = next(six.iterkeys(exc))
if key == 'sls':
ex_sls.add(exc['sls'])
elif key == 'id':
ex_id.add(exc['id'])
# Now the excludes have been simplified, use them
if ex_sls:
# There are sls excludes, find the associtaed ids
for name, body in six.iteritems(high):
if name.startswith('__'):
continue
if body.get('__sls__', '') in ex_sls:
ex_id.add(name)
for id_ in ex_id:
if id_ in high:
high.pop(id_)
return high
class State(object):
'''
Class used to execute salt states
'''
def __init__(self, opts, pillar=None, jid=None, pillar_enc=None, proxy=None):
if 'grains' not in opts:
opts['grains'] = salt.loader.grains(opts)
self.opts = opts
self.proxy = proxy
self._pillar_override = pillar
if pillar_enc is not None:
try:
pillar_enc = pillar_enc.lower()
except AttributeError:
pillar_enc = str(pillar_enc).lower()
if pillar_enc not in VALID_PILLAR_ENC:
raise SaltInvocationError(
'Invalid pillar encryption type. Valid types are: {0}'
.format(', '.join(VALID_PILLAR_ENC))
)
self._pillar_enc = pillar_enc
self.opts['pillar'] = self._gather_pillar()
self.state_con = {}
self.load_modules(proxy=proxy)
self.active = set()
self.mod_init = set()
self.pre = {}
self.__run_num = 0
self.jid = jid
self.instance_id = str(id(self))
self.inject_globals = {}
def _decrypt_pillar_override(self):
'''
Decrypt CLI pillar overrides
'''
if not self._pillar_enc:
decrypt = None
else:
# Pillar data must be gathered before the modules are loaded, since
# it will be packed into each loaded function. Thus, we will not
# have access to the functions and must past an empty dict here.
decrypt = salt.loader.render(
self.opts,
{}).get(self._pillar_enc)
try:
return decrypt(self._pillar_override, translate_newlines=True)
except TypeError:
return self._pillar_override
def _gather_pillar(self):
'''
Whenever a state run starts, gather the pillar data fresh
'''
pillar = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillar=self._pillar_override,
pillarenv=self.opts.get('pillarenv')
)
ret = pillar.compile_pillar()
if self._pillar_override:
if isinstance(self._pillar_override, dict):
ret.update(self._decrypt_pillar_override())
else:
decrypted = yamlloader.load(
self._decrypt_pillar_override(),
Loader=yamlloader.SaltYamlSafeLoader
)
if not isinstance(decrypted, dict):
log.error(
'Decrypted pillar data did not render to a dictionary'
)
else:
ret.update(decrypted)
return ret
def _mod_init(self, low):
'''
Check the module initialization function, if this is the first run
of a state package that has a mod_init function, then execute the
mod_init function in the state module.
'''
# ensure that the module is loaded
try:
self.states['{0}.{1}'.format(low['state'], low['fun'])] # pylint: disable=W0106
except KeyError:
return
minit = '{0}.mod_init'.format(low['state'])
if low['state'] not in self.mod_init:
if minit in self.states._dict:
mret = self.states[minit](low)
if not mret:
return
self.mod_init.add(low['state'])
def _mod_aggregate(self, low, running, chunks):
'''
Execute the aggregation systems to runtime modify the low chunk
'''
agg_opt = self.functions['config.option']('state_aggregate')
if 'aggregate' in low:
agg_opt = low['aggregate']
if agg_opt is True:
agg_opt = [low['state']]
else:
return low
if low['state'] in agg_opt and not low.get('__agg__'):
agg_fun = '{0}.mod_aggregate'.format(low['state'])
if agg_fun in self.states:
try:
low = self.states[agg_fun](low, chunks, running)
low['__agg__'] = True
except TypeError:
log.error('Failed to execute aggregate for state {0}'.format(low['state']))
return low
def _run_check(self, low_data):
'''
Check that unless doesn't return 0, and that onlyif returns a 0.
'''
ret = {'result': False}
cmd_opts = {}
if 'shell' in self.opts['grains']:
cmd_opts['shell'] = self.opts['grains'].get('shell')
if 'onlyif' in low_data:
if not isinstance(low_data['onlyif'], list):
low_data_onlyif = [low_data['onlyif']]
else:
low_data_onlyif = low_data['onlyif']
for entry in low_data_onlyif:
cmd = self.functions['cmd.retcode'](
entry, ignore_retcode=True, python_shell=True, **cmd_opts)
log.debug('Last command return code: {0}'.format(cmd))
if cmd != 0 and ret['result'] is False:
ret.update({'comment': 'onlyif execution failed',
'skip_watch': True,
'result': True})
return ret
elif cmd == 0:
ret.update({'comment': 'onlyif execution succeeded', 'result': False})
return ret
if 'unless' in low_data:
if not isinstance(low_data['unless'], list):
low_data_unless = [low_data['unless']]
else:
low_data_unless = low_data['unless']
for entry in low_data_unless:
cmd = self.functions['cmd.retcode'](
entry, ignore_retcode=True, python_shell=True, **cmd_opts)
log.debug('Last command return code: {0}'.format(cmd))
if cmd == 0 and ret['result'] is False:
ret.update({'comment': 'unless execution succeeded',
'skip_watch': True,
'result': True})
elif cmd != 0:
ret.update({'comment': 'unless execution failed', 'result': False})
return ret
# No reason to stop, return ret
return ret
def _run_check_cmd(self, low_data):
'''
Alter the way a successful state run is determined
'''
ret = {'result': False}
cmd_opts = {}
if 'shell' in self.opts['grains']:
cmd_opts['shell'] = self.opts['grains'].get('shell')
for entry in low_data['check_cmd']:
cmd = self.functions['cmd.retcode'](
entry, ignore_retcode=True, python_shell=True, **cmd_opts)
log.debug('Last command return code: {0}'.format(cmd))
if cmd == 0 and ret['result'] is False:
ret.update({'comment': 'check_cmd determined the state succeeded', 'result': True})
elif cmd != 0:
ret.update({'comment': 'check_cmd determined the state failed', 'result': False})
return ret
return ret
def reset_run_num(self):
'''
Rest the run_num value to 0
'''
self.__run_num = 0
def load_modules(self, data=None, proxy=None):
'''
Load the modules into the state
'''
log.info('Loading fresh modules for state activity')
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, self.state_con,
utils=self.utils,
proxy=proxy)
if isinstance(data, dict):
if data.get('provider', False):
if isinstance(data['provider'], str):
providers = [{data['state']: data['provider']}]
elif isinstance(data['provider'], list):
providers = data['provider']
else:
providers = {}
for provider in providers:
for mod in provider:
funcs = salt.loader.raw_mod(self.opts,
provider[mod],
self.functions)
if funcs:
for func in funcs:
f_key = '{0}{1}'.format(
mod,
func[func.rindex('.'):]
)
self.functions[f_key] = funcs[func]
self.serializers = salt.loader.serializers(self.opts)
self.states = salt.loader.states(self.opts, self.functions, self.utils, self.serializers)
self.rend = salt.loader.render(self.opts, self.functions, states=self.states)
def module_refresh(self):
'''
Refresh all the modules
'''
log.debug('Refreshing modules...')
if self.opts['grains'].get('os') != 'MacOS':
# In case a package has been installed into the current python
# process 'site-packages', the 'site' module needs to be reloaded in
# order for the newly installed package to be importable.
try:
reload(site)
except RuntimeError:
log.error('Error encountered during module reload. Modules were not reloaded.')
self.load_modules(proxy=self.proxy)
if not self.opts.get('local', False) and self.opts.get('multiprocessing', True):
self.functions['saltutil.refresh_modules']()
def check_refresh(self, data, ret):
'''
Check to see if the modules for this state instance need to be updated,
only update if the state is a file or a package and if it changed
something. If the file function is managed check to see if the file is a
possible module type, e.g. a python, pyx, or .so. Always refresh if the
function is recurse, since that can lay down anything.
'''
_reload_modules = False
if data.get('reload_grains', False):
log.debug('Refreshing grains...')
self.opts['grains'] = salt.loader.grains(self.opts)
_reload_modules = True
if data.get('reload_pillar', False):
log.debug('Refreshing pillar...')
self.opts['pillar'] = self._gather_pillar()
_reload_modules = True
if data.get('reload_modules', False) or _reload_modules:
# User explicitly requests a reload
self.module_refresh()
return
if not ret['changes']:
return
if data['state'] == 'file':
if data['fun'] == 'managed':
if data['name'].endswith(
('.py', '.pyx', '.pyo', '.pyc', '.so')):
self.module_refresh()
elif data['fun'] == 'recurse':
self.module_refresh()
elif data['fun'] == 'symlink':
if 'bin' in data['name']:
self.module_refresh()
elif data['state'] in ('pkg', 'ports'):
self.module_refresh()
def verify_ret(self, ret):
'''
Verify the state return data
'''
if not isinstance(ret, dict):
raise SaltException(
'Malformed state return, return must be a dict'
)
bad = []
for val in ['name', 'result', 'changes', 'comment']:
if val not in ret:
bad.append(val)
if bad:
raise SaltException(
('The following keys were not present in the state '
'return: {0}'
).format(','.join(bad)))
def verify_data(self, data):
'''
Verify the data, return an error statement if something is wrong
'''
errors = []
if 'state' not in data:
errors.append('Missing "state" data')
if 'fun' not in data:
errors.append('Missing "fun" data')
if 'name' not in data:
errors.append('Missing "name" data')
if data['name'] and not isinstance(data['name'], six.string_types):
errors.append(
'ID \'{0}\' in SLS \'{1}\' is not formed as a string, but is '
'a {2}'.format(
data['name'], data['__sls__'], type(data['name']).__name__)
)
if errors:
return errors
full = data['state'] + '.' + data['fun']
if full not in self.states:
if '__sls__' in data:
errors.append(
'State \'{0}\' was not found in SLS \'{1}\''.format(
full,
data['__sls__']
)
)
reason = self.states.missing_fun_string(full)
if reason:
errors.append('Reason: {0}'.format(reason))
else:
errors.append(
'Specified state \'{0}\' was not found'.format(
full
)
)
else:
# First verify that the parameters are met
aspec = salt.utils.args.get_function_argspec(self.states[full])
arglen = 0
deflen = 0
if isinstance(aspec.args, list):
arglen = len(aspec.args)
if isinstance(aspec.defaults, tuple):
deflen = len(aspec.defaults)
for ind in range(arglen - deflen):
if aspec.args[ind] not in data:
errors.append(
'Missing parameter {0} for state {1}'.format(
aspec.args[ind],
full
)
)
# If this chunk has a recursive require, then it will cause a
# recursive loop when executing, check for it
reqdec = ''
if 'require' in data:
reqdec = 'require'
if 'watch' in data:
# Check to see if the service has a mod_watch function, if it does
# not, then just require
# to just require extend the require statement with the contents
# of watch so that the mod_watch function is not called and the
# requisite capability is still used
if '{0}.mod_watch'.format(data['state']) not in self.states:
if 'require' in data:
data['require'].extend(data.pop('watch'))
else:
data['require'] = data.pop('watch')
reqdec = 'require'
else:
reqdec = 'watch'
if reqdec:
for req in data[reqdec]:
reqfirst = next(iter(req))
if data['state'] == reqfirst:
if (fnmatch.fnmatch(data['name'], req[reqfirst])
or fnmatch.fnmatch(data['__id__'], req[reqfirst])):
err = ('Recursive require detected in SLS {0} for'
' require {1} in ID {2}').format(
data['__sls__'],
req,
data['__id__'])
errors.append(err)
return errors
def verify_high(self, high):
'''
Verify that the high data is viable and follows the data structure
'''
errors = []
if not isinstance(high, dict):
errors.append('High data is not a dictionary and is invalid')
reqs = {}
for name, body in six.iteritems(high):
try:
if name.startswith('__'):
continue
except AttributeError:
pass
if not isinstance(name, six.string_types):
errors.append(
'ID \'{0}\' in SLS \'{1}\' is not formed as a string, but '
'is a {2}. It may need to be quoted.'.format(
name, body['__sls__'], type(name).__name__)
)
if not isinstance(body, dict):
err = ('The type {0} in {1} is not formatted as a dictionary'
.format(name, body))
errors.append(err)
continue
for state in body:
if state.startswith('__'):
continue
if body[state] is None:
errors.append(
'ID \'{0}\' in SLS \'{1}\' contains a short declaration '
'({2}) with a trailing colon. When not passing any '
'arguments to a state, the colon must be omitted.'
.format(name, body['__sls__'], state)
)
continue
if not isinstance(body[state], list):
errors.append(
'State \'{0}\' in SLS \'{1}\' is not formed as a list'
.format(name, body['__sls__'])
)
else:
fun = 0
if '.' in state:
fun += 1
for arg in body[state]:
if isinstance(arg, six.string_types):
fun += 1
if ' ' in arg.strip():
errors.append(('The function "{0}" in state '
'"{1}" in SLS "{2}" has '
'whitespace, a function with whitespace is '
'not supported, perhaps this is an argument '
'that is missing a ":"').format(
arg,
name,
body['__sls__']))
elif isinstance(arg, dict):
# The arg is a dict, if the arg is require or
# watch, it must be a list.
#
# Add the requires to the reqs dict and check them
# all for recursive requisites.
argfirst = next(iter(arg))
if argfirst == 'names':
if not isinstance(arg[argfirst], list):
errors.append(
'The \'names\' argument in state '
'\'{0}\' in SLS \'{1}\' needs to be '
'formed as a list'
.format(name, body['__sls__'])
)
if argfirst in ('require', 'watch', 'prereq', 'onchanges'):
if not isinstance(arg[argfirst], list):
errors.append(
'The {0} statement in state \'{1}\' in '
'SLS \'{2}\' needs to be formed as a '
'list'.format(argfirst,
name,
body['__sls__'])
)
# It is a list, verify that the members of the
# list are all single key dicts.
else:
reqs[name] = {'state': state}
for req in arg[argfirst]:
if not isinstance(req, dict):
err = ('Requisite declaration {0}'
' in SLS {1} is not formed as a'
' single key dictionary').format(
req,
body['__sls__'])
errors.append(err)
continue
req_key = next(iter(req))
req_val = req[req_key]
if '.' in req_key:
errors.append((
'Invalid requisite type \'{0}\' '
'in state \'{1}\', in SLS '
'\'{2}\'. Requisite types must '
'not contain dots, did you '
'mean \'{3}\'?'.format(
req_key,
name,
body['__sls__'],
req_key[:req_key.find('.')]
)
))
if not ishashable(req_val):
errors.append((
'Illegal requisite "{0}", '
'please check your syntax.\n'
).format(str(req_val)))
continue
# Check for global recursive requisites
reqs[name][req_val] = req_key
# I am going beyond 80 chars on
# purpose, this is just too much
# of a pain to deal with otherwise
if req_val in reqs:
if name in reqs[req_val]:
if reqs[req_val][name] == state:
if reqs[req_val]['state'] == reqs[name][req_val]:
err = ('A recursive '
'requisite was found, SLS '
'"{0}" ID "{1}" ID "{2}"'
).format(
body['__sls__'],
name,
req_val
)
errors.append(err)
# Make sure that there is only one key in the
# dict
if len(list(arg)) != 1:
errors.append(
'Multiple dictionaries defined in '
'argument of state \'{0}\' in SLS \'{1}\''
.format(name, body['__sls__'])
)
if not fun:
if state == 'require' or state == 'watch':
continue
errors.append(
'No function declared in state \'{0}\' in SLS \'{1}\''
.format(state, body['__sls__'])
)
elif fun > 1:
errors.append(
'Too many functions declared in state \'{0}\' in '
'SLS \'{1}\''.format(state, body['__sls__'])
)
return errors
def verify_chunks(self, chunks):
'''
Verify the chunks in a list of low data structures
'''
err = []
for chunk in chunks:
err += self.verify_data(chunk)
return err
def order_chunks(self, chunks):
'''
Sort the chunk list verifying that the chunks follow the order
specified in the order options.
'''
cap = 1
for chunk in chunks:
if 'order' in chunk:
if not isinstance(chunk['order'], int):
continue
chunk_order = chunk['order']
if chunk_order > cap - 1 and chunk_order > 0:
cap = chunk_order + 100
for chunk in chunks:
if 'order' not in chunk:
chunk['order'] = cap
continue
if not isinstance(chunk['order'], (int, float)):
if chunk['order'] == 'last':
chunk['order'] = cap + 1000000
else:
chunk['order'] = cap
if 'name_order' in chunk:
chunk['order'] = chunk['order'] + chunk.pop('name_order') / 10000.0
if chunk['order'] < 0:
chunk['order'] = cap + 1000000 + chunk['order']
chunks.sort(key=lambda chunk: (chunk['order'], '{0[state]}{0[name]}{0[fun]}'.format(chunk)))
return chunks
def compile_high_data(self, high):
'''
"Compile" the high data as it is retrieved from the CLI or YAML into
the individual state executor structures
'''
chunks = []
for name, body in six.iteritems(high):
if name.startswith('__'):
continue
for state, run in six.iteritems(body):
funcs = set()
names = set()
if state.startswith('__'):
continue
chunk = {'state': state,
'name': name}
if '__sls__' in body:
chunk['__sls__'] = body['__sls__']
if '__env__' in body:
chunk['__env__'] = body['__env__']
chunk['__id__'] = name
for arg in run:
if isinstance(arg, six.string_types):
funcs.add(arg)
continue
if isinstance(arg, dict):
for key, val in six.iteritems(arg):
if key == 'names':
names.update(val)
elif key == 'state':
# Don't pass down a state override
continue
elif (key == 'name' and
not isinstance(val, six.string_types)):
# Invalid name, fall back to ID
chunk[key] = name
else:
chunk[key] = val
if names:
name_order = 1
for entry in names:
live = copy.deepcopy(chunk)
if isinstance(entry, dict):
low_name = next(six.iterkeys(entry))
live['name'] = low_name
list(map(live.update, entry[low_name]))
else:
live['name'] = entry
live['name_order'] = name_order
name_order = name_order + 1
for fun in funcs:
live['fun'] = fun
chunks.append(live)
else:
live = copy.deepcopy(chunk)
for fun in funcs:
live['fun'] = fun
chunks.append(live)
chunks = self.order_chunks(chunks)
return chunks
def reconcile_extend(self, high):
'''
Pull the extend data and add it to the respective high data
'''
errors = []
if '__extend__' not in high:
return high, errors
ext = high.pop('__extend__')
for ext_chunk in ext:
for name, body in six.iteritems(ext_chunk):
if name not in high:
state_type = next(
x for x in body if not x.startswith('__')
)
# Check for a matching 'name' override in high data
ids = find_name(name, state_type, high)
if len(ids) != 1:
errors.append(
'Cannot extend ID \'{0}\' in \'{1}:{2}\'. It is not '
'part of the high state.\n'
'This is likely due to a missing include statement '
'or an incorrectly typed ID.\nEnsure that a '
'state with an ID of \'{0}\' is available\nin '
'environment \'{1}\' and to SLS \'{2}\''.format(
name,
body.get('__env__', 'base'),
body.get('__sls__', 'base'))
)
continue
else:
name = ids[0][0]
for state, run in six.iteritems(body):
if state.startswith('__'):
continue
if state not in high[name]:
high[name][state] = run
continue
# high[name][state] is extended by run, both are lists
for arg in run:
update = False
for hind in range(len(high[name][state])):
if isinstance(arg, six.string_types) and isinstance(high[name][state][hind], six.string_types):
# replacing the function, replace the index
high[name][state].pop(hind)
high[name][state].insert(hind, arg)
update = True
continue
if isinstance(arg, dict) and isinstance(high[name][state][hind], dict):
# It is an option, make sure the options match
argfirst = next(iter(arg))
if argfirst == next(iter(high[name][state][hind])):
# If argfirst is a requisite then we must merge
# our requisite with that of the target state
if argfirst in STATE_REQUISITE_KEYWORDS:
high[name][state][hind][argfirst].extend(arg[argfirst])
# otherwise, its not a requisite and we are just extending (replacing)
else:
high[name][state][hind] = arg
update = True
if (argfirst == 'name' and
next(iter(high[name][state][hind])) == 'names'):
# If names are overwritten by name use the name
high[name][state][hind] = arg
if not update:
high[name][state].append(arg)
return high, errors
def apply_exclude(self, high):
'''
Read in the __exclude__ list and remove all excluded objects from the
high data
'''
if '__exclude__' not in high:
return high
ex_sls = set()
ex_id = set()
exclude = high.pop('__exclude__')
for exc in exclude:
if isinstance(exc, str):
# The exclude statement is a string, assume it is an sls
ex_sls.add(exc)
if isinstance(exc, dict):
# Explicitly declared exclude
if len(exc) != 1:
continue
key = next(six.iterkeys(exc))
if key == 'sls':
ex_sls.add(exc['sls'])
elif key == 'id':
ex_id.add(exc['id'])
# Now the excludes have been simplified, use them
if ex_sls:
# There are sls excludes, find the associated ids
for name, body in six.iteritems(high):
if name.startswith('__'):
continue
sls = body.get('__sls__', '')
if not sls:
continue
for ex_ in ex_sls:
if fnmatch.fnmatch(sls, ex_):
ex_id.add(name)
for id_ in ex_id:
if id_ in high:
high.pop(id_)
return high
def requisite_in(self, high):
'''
Extend the data reference with requisite_in arguments
'''
req_in = set([
'require_in',
'watch_in',
'onfail_in',
'onchanges_in',
'use',
'use_in',
'prereq',
'prereq_in',
])
req_in_all = req_in.union(
set([
'require',
'watch',
'onfail',
'onchanges',
]))
extend = {}
errors = []
for id_, body in six.iteritems(high):
if not isinstance(body, dict):
continue
for state, run in six.iteritems(body):
if state.startswith('__'):
continue
for arg in run:
if isinstance(arg, dict):
# It is not a function, verify that the arg is a
# requisite in statement
if len(arg) < 1:
# Empty arg dict
# How did we get this far?
continue
# Split out the components
key = next(iter(arg))
if key not in req_in:
continue
rkey = key.split('_')[0]
items = arg[key]
if isinstance(items, dict):
# Formatted as a single req_in
for _state, name in six.iteritems(items):
# Not a use requisite_in
found = False
if name not in extend:
extend[name] = {}
if '.' in _state:
errors.append((
'Invalid requisite in {0}: {1} for '
'{2}, in SLS \'{3}\'. Requisites must '
'not contain dots, did you mean \'{4}\'?'
.format(
rkey,
_state,
name,
body['__sls__'],
_state[:_state.find('.')]
)
))
_state = _state.split(".")[0]
if _state not in extend[name]:
extend[name][_state] = []
extend[name]['__env__'] = body['__env__']
extend[name]['__sls__'] = body['__sls__']
for ind in range(len(extend[name][_state])):
if next(iter(
extend[name][_state][ind])) == rkey:
# Extending again
extend[name][_state][ind][rkey].append(
{state: id_}
)
found = True
if found:
continue
# The rkey is not present yet, create it
extend[name][_state].append(
{rkey: [{state: id_}]}
)
if isinstance(items, list):
# Formed as a list of requisite additions
for ind in items:
if not isinstance(ind, dict):
# Malformed req_in
continue
if len(ind) < 1:
continue
_state = next(iter(ind))
name = ind[_state]
if '.' in _state:
errors.append((
'Invalid requisite in {0}: {1} for '
'{2}, in SLS \'{3}\'. Requisites must '
'not contain dots, did you mean \'{4}\'?'
.format(
rkey,
_state,
name,
body['__sls__'],
_state[:_state.find('.')]
)
))
_state = _state.split(".")[0]
if key == 'prereq_in':
# Add prerequired to origin
if id_ not in extend:
extend[id_] = {}
if state not in extend[id_]:
extend[id_][state] = []
extend[id_][state].append(
{'prerequired': [{_state: name}]}
)
if key == 'prereq':
# Add prerequired to prereqs
ext_ids = find_name(name, _state, high)
for ext_id, _req_state in ext_ids:
if ext_id not in extend:
extend[ext_id] = {}
if _req_state not in extend[ext_id]:
extend[ext_id][_req_state] = []
extend[ext_id][_req_state].append(
{'prerequired': [{state: id_}]}
)
continue
if key == 'use_in':
# Add the running states args to the
# use_in states
ext_ids = find_name(name, _state, high)
for ext_id, _req_state in ext_ids:
if not ext_id:
continue
ext_args = state_args(ext_id, _state, high)
if ext_id not in extend:
extend[ext_id] = {}
if _req_state not in extend[ext_id]:
extend[ext_id][_req_state] = []
ignore_args = req_in_all.union(ext_args)
for arg in high[id_][state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if next(iter(arg)) in ignore_args:
continue
# Don't use name or names
if next(six.iterkeys(arg)) == 'name':
continue
if next(six.iterkeys(arg)) == 'names':
continue
extend[ext_id][_req_state].append(arg)
continue
if key == 'use':
# Add the use state's args to the
# running state
ext_ids = find_name(name, _state, high)
for ext_id, _req_state in ext_ids:
if not ext_id:
continue
loc_args = state_args(id_, state, high)
if id_ not in extend:
extend[id_] = {}
if state not in extend[id_]:
extend[id_][state] = []
ignore_args = req_in_all.union(loc_args)
for arg in high[ext_id][_req_state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if next(iter(arg)) in ignore_args:
continue
# Don't use name or names
if next(six.iterkeys(arg)) == 'name':
continue
if next(six.iterkeys(arg)) == 'names':
continue
extend[id_][state].append(arg)
continue
found = False
if name not in extend:
extend[name] = {}
if _state not in extend[name]:
extend[name][_state] = []
extend[name]['__env__'] = body['__env__']
extend[name]['__sls__'] = body['__sls__']
for ind in range(len(extend[name][_state])):
if next(iter(
extend[name][_state][ind])) == rkey:
# Extending again
extend[name][_state][ind][rkey].append(
{state: id_}
)
found = True
if found:
continue
# The rkey is not present yet, create it
extend[name][_state].append(
{rkey: [{state: id_}]}
)
high['__extend__'] = []
for key, val in six.iteritems(extend):
high['__extend__'].append({key: val})
req_in_high, req_in_errors = self.reconcile_extend(high)
errors.extend(req_in_errors)
return req_in_high, errors
def call(self, low, chunks=None, running=None):
'''
Call a state directly with the low data structure, verify data
before processing.
'''
start_time = datetime.datetime.now()
log.info('Running state [{0}] at time {1}'.format(low['name'], start_time.time().isoformat()))
errors = self.verify_data(low)
if errors:
ret = {
'result': False,
'name': low['name'],
'changes': {},
'comment': '',
}
for err in errors:
ret['comment'] += '{0}\n'.format(err)
ret['__run_num__'] = self.__run_num
self.__run_num += 1
format_log(ret)
self.check_refresh(low, ret)
return ret
else:
ret = {'result': False, 'name': low['name'], 'changes': {}}
if not low.get('__prereq__'):
log.info(
'Executing state {0[state]}.{0[fun]} for {0[name]}'.format(
low
)
)
if 'provider' in low:
self.load_modules(low)
state_func_name = '{0[state]}.{0[fun]}'.format(low)
cdata = salt.utils.format_call(
self.states[state_func_name],
low,
initial_ret={'full': state_func_name},
expected_extra_kws=STATE_INTERNAL_KEYWORDS
)
inject_globals = {
# Pass a copy of the running dictionary, the low state chunks and
# the current state dictionaries.
# We pass deep copies here because we don't want any misbehaving
# state module to change these at runtime.
'__low__': immutabletypes.freeze(low),
'__running__': immutabletypes.freeze(running) if running else {},
'__instance_id__': self.instance_id,
'__lowstate__': immutabletypes.freeze(chunks) if chunks else {}
}
if self.inject_globals:
inject_globals.update(self.inject_globals)
if low.get('__prereq__'):
test = sys.modules[self.states[cdata['full']].__module__].__opts__['test']
sys.modules[self.states[cdata['full']].__module__].__opts__['test'] = True
try:
# Let's get a reference to the salt environment to use within this
# state call.
#
# If the state function accepts an 'env' keyword argument, it
# allows the state to be overridden(we look for that in cdata). If
# that's not found in cdata, we look for what we're being passed in
# the original data, namely, the special dunder __env__. If that's
# not found we default to 'base'
if ('unless' in low and '{0[state]}.mod_run_check'.format(low) not in self.states) or \
('onlyif' in low and '{0[state]}.mod_run_check'.format(low) not in self.states):
ret.update(self._run_check(low))
if 'saltenv' in low:
inject_globals['__env__'] = str(low['saltenv'])
elif isinstance(cdata['kwargs'].get('env', None), six.string_types):
# User is using a deprecated env setting which was parsed by
# format_call.
# We check for a string type since module functions which
# allow setting the OS environ also make use of the "env"
# keyword argument, which is not a string
inject_globals['__env__'] = str(cdata['kwargs']['env'])
elif '__env__' in low:
# The user is passing an alternative environment using __env__
# which is also not the appropriate choice, still, handle it
inject_globals['__env__'] = str(low['__env__'])
else:
# Let's use the default environment
inject_globals['__env__'] = 'base'
if 'result' not in ret or ret['result'] is False:
self.states.inject_globals = inject_globals
ret = self.states[cdata['full']](*cdata['args'],
**cdata['kwargs'])
self.states.inject_globals = {}
if 'check_cmd' in low and '{0[state]}.mod_run_check_cmd'.format(low) not in self.states:
ret.update(self._run_check_cmd(low))
self.verify_ret(ret)
except Exception:
trb = traceback.format_exc()
# There are a number of possibilities to not have the cdata
# populated with what we might have expected, so just be smart
# enough to not raise another KeyError as the name is easily
# guessable and fallback in all cases to present the real
# exception to the user
if len(cdata['args']) > 0:
name = cdata['args'][0]
elif 'name' in cdata['kwargs']:
name = cdata['kwargs']['name']
else:
name = low.get('name', low.get('__id__'))
ret = {
'result': False,
'name': name,
'changes': {},
'comment': 'An exception occurred in this state: {0}'.format(
trb)
}
finally:
if low.get('__prereq__'):
sys.modules[self.states[cdata['full']].__module__].__opts__[
'test'] = test
# If format_call got any warnings, let's show them to the user
if 'warnings' in cdata:
ret.setdefault('warnings', []).extend(cdata['warnings'])
if 'provider' in low:
self.load_modules()
if low.get('__prereq__'):
low['__prereq__'] = False
return ret
ret['__run_num__'] = self.__run_num
self.__run_num += 1
format_log(ret)
self.check_refresh(low, ret)
finish_time = datetime.datetime.now()
ret['start_time'] = start_time.time().isoformat()
delta = (finish_time - start_time)
# duration in milliseconds.microseconds
duration = (delta.seconds * 1000000 + delta.microseconds)/1000.0
ret['duration'] = duration
ret['__id__'] = low['__id__']
log.info('Completed state [{0}] at time {1} duration_in_ms={2}'.format(low['name'], finish_time.time().isoformat(), duration))
return ret
def call_chunks(self, chunks):
'''
Iterate over a list of chunks and call them, checking for requires.
'''
running = {}
for low in chunks:
if '__FAILHARD__' in running:
running.pop('__FAILHARD__')
return running
tag = _gen_tag(low)
if tag not in running:
running = self.call_chunk(low, running, chunks)
if self.check_failhard(low, running):
return running
self.active = set()
return running
def check_failhard(self, low, running):
'''
Check if the low data chunk should send a failhard signal
'''
tag = _gen_tag(low)
if (low.get('failhard', False) or self.opts['failhard']
and tag in running):
return not running[tag]['result']
return False
def check_requisite(self, low, running, chunks, pre=False):
'''
Look into the running data to check the status of all requisite
states
'''
present = False
# If mod_watch is not available make it a require
if 'watch' in low:
if '{0}.mod_watch'.format(low['state']) not in self.states:
if 'require' in low:
low['require'].extend(low.pop('watch'))
else:
low['require'] = low.pop('watch')
else:
present = True
if 'require' in low:
present = True
if 'prerequired' in low:
present = True
if 'prereq' in low:
present = True
if 'onfail' in low:
present = True
if 'onchanges' in low:
present = True
if not present:
return 'met', ()
reqs = {
'require': [],
'watch': [],
'prereq': [],
'onfail': [],
'onchanges': []}
if pre:
reqs['prerequired'] = []
for r_state in reqs:
if r_state in low and low[r_state] is not None:
for req in low[r_state]:
req = trim_req(req)
found = False
for chunk in chunks:
req_key = next(iter(req))
req_val = req[req_key]
if req_val is None:
continue
if req_key == 'sls':
# Allow requisite tracking of entire sls files
if fnmatch.fnmatch(chunk['__sls__'], req_val):
found = True
reqs[r_state].append(chunk)
continue
if (fnmatch.fnmatch(chunk['name'], req_val) or
fnmatch.fnmatch(chunk['__id__'], req_val)):
if chunk['state'] == req_key:
found = True
reqs[r_state].append(chunk)
if not found:
return 'unmet', ()
fun_stats = set()
for r_state, chunks in six.iteritems(reqs):
if r_state == 'prereq':
run_dict = self.pre
else:
run_dict = running
for chunk in chunks:
tag = _gen_tag(chunk)
if tag not in run_dict:
fun_stats.add('unmet')
continue
if r_state == 'onfail':
if run_dict[tag]['result'] is True:
fun_stats.add('onfail')
continue
else:
if run_dict[tag]['result'] is False:
fun_stats.add('fail')
continue
if r_state == 'onchanges':
if not run_dict[tag]['changes']:
fun_stats.add('onchanges')
else:
fun_stats.add('onchangesmet')
continue
if r_state == 'watch' and run_dict[tag]['changes']:
fun_stats.add('change')
continue
if r_state == 'prereq' and run_dict[tag]['result'] is None:
fun_stats.add('premet')
if r_state == 'prereq' and not run_dict[tag]['result'] is None:
fun_stats.add('pre')
else:
fun_stats.add('met')
if 'unmet' in fun_stats:
status = 'unmet'
elif 'fail' in fun_stats:
status = 'fail'
elif 'pre' in fun_stats:
if 'premet' in fun_stats:
status = 'met'
else:
status = 'pre'
elif 'onfail' in fun_stats:
status = 'onfail'
elif 'onchanges' in fun_stats and 'onchangesmet' not in fun_stats:
status = 'onchanges'
elif 'change' in fun_stats:
status = 'change'
else:
status = 'met'
return status, reqs
def event(self, chunk_ret, length, fire_event=False):
'''
Fire an event on the master bus
If `fire_event` is set to True an event will be sent with the
chunk name in the tag and the chunk result in the event data.
If `fire_event` is set to a string such as `mystate/is/finished`,
an event will be sent with the string added to the tag and the chunk
result in the event data.
If the `state_events` is set to True in the config, then after the
chunk is evaluated an event will be set up to the master with the
results.
'''
if not self.opts.get('local') and (self.opts.get('state_events', True) or fire_event) and self.opts.get('master_uri'):
ret = {'ret': chunk_ret}
if fire_event is True:
tag = salt.utils.event.tagify(
[self.jid, self.opts['id'], str(chunk_ret['name'])], 'state_result'
)
elif isinstance(fire_event, six.string_types):
tag = salt.utils.event.tagify(
[self.jid, self.opts['id'], str(fire_event)], 'state_result'
)
else:
tag = salt.utils.event.tagify(
[self.jid, 'prog', self.opts['id'], str(chunk_ret['__run_num__'])], 'job'
)
ret['len'] = length
preload = {'jid': self.jid}
self.functions['event.fire_master'](ret, tag, preload=preload)
def call_chunk(self, low, running, chunks):
'''
Check if a chunk has any requires, execute the requires and then
the chunk
'''
low = self._mod_aggregate(low, running, chunks)
self._mod_init(low)
tag = _gen_tag(low)
if not low.get('prerequired'):
self.active.add(tag)
requisites = ['require', 'watch', 'prereq', 'onfail', 'onchanges']
if not low.get('__prereq__'):
requisites.append('prerequired')
status, reqs = self.check_requisite(low, running, chunks, True)
else:
status, reqs = self.check_requisite(low, running, chunks)
if status == 'unmet':
lost = {}
reqs = []
for requisite in requisites:
lost[requisite] = []
if requisite not in low:
continue
for req in low[requisite]:
req = trim_req(req)
found = False
req_key = next(iter(req))
req_val = req[req_key]
for chunk in chunks:
if req_val is None:
continue
if req_key == 'sls':
# Allow requisite tracking of entire sls files
if fnmatch.fnmatch(chunk['__sls__'], req_val):
if requisite == 'prereq':
chunk['__prereq__'] = True
reqs.append(chunk)
found = True
continue
if (fnmatch.fnmatch(chunk['name'], req_val) or
fnmatch.fnmatch(chunk['__id__'], req_val)):
if chunk['state'] == req_key:
if requisite == 'prereq':
chunk['__prereq__'] = True
elif requisite == 'prerequired':
chunk['__prerequired__'] = True
reqs.append(chunk)
found = True
if not found:
lost[requisite].append(req)
if lost['require'] or lost['watch'] or lost['prereq'] or lost['onfail'] or lost['onchanges'] or lost.get('prerequired'):
comment = 'The following requisites were not found:\n'
for requisite, lreqs in six.iteritems(lost):
if not lreqs:
continue
comment += \
'{0}{1}:\n'.format(' ' * 19, requisite)
for lreq in lreqs:
req_key = next(iter(lreq))
req_val = lreq[req_key]
comment += \
'{0}{1}: {2}\n'.format(' ' * 23, req_key, req_val)
running[tag] = {'changes': {},
'result': False,
'comment': comment,
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
self.__run_num += 1
self.event(running[tag], len(chunks), fire_event=low.get('fire_event'))
return running
for chunk in reqs:
# Check to see if the chunk has been run, only run it if
# it has not been run already
ctag = _gen_tag(chunk)
if ctag not in running:
if ctag in self.active:
if chunk.get('__prerequired__'):
# Prereq recusive, run this chunk with prereq on
if tag not in self.pre:
low['__prereq__'] = True
self.pre[ctag] = self.call(low, chunks, running)
return running
else:
return running
elif ctag not in running:
log.error('Recursive requisite found')
running[tag] = {
'changes': {},
'result': False,
'comment': 'Recursive requisite found',
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
self.__run_num += 1
self.event(running[tag], len(chunks), fire_event=low.get('fire_event'))
return running
running = self.call_chunk(chunk, running, chunks)
if self.check_failhard(chunk, running):
running['__FAILHARD__'] = True
return running
if low.get('__prereq__'):
status, reqs = self.check_requisite(low, running, chunks)
self.pre[tag] = self.call(low, chunks, running)
if not self.pre[tag]['changes'] and status == 'change':
self.pre[tag]['changes'] = {'watch': 'watch'}
self.pre[tag]['result'] = None
else:
running = self.call_chunk(low, running, chunks)
if self.check_failhard(chunk, running):
running['__FAILHARD__'] = True
return running
elif status == 'met':
if low.get('__prereq__'):
self.pre[tag] = self.call(low, chunks, running)
else:
running[tag] = self.call(low, chunks, running)
elif status == 'fail':
# if the requisite that failed was due to a prereq on this low state
# show the normal error
if tag in self.pre:
running[tag] = self.pre[tag]
running[tag]['__run_num__'] = self.__run_num
running[tag]['__sls__'] = low['__sls__']
# otherwise the failure was due to a requisite down the chain
else:
# determine what the requisite failures where, and return
# a nice error message
failed_requisites = set()
# look at all requisite types for a failure
for req_lows in six.itervalues(reqs):
for req_low in req_lows:
req_tag = _gen_tag(req_low)
req_ret = self.pre.get(req_tag, running.get(req_tag))
# if there is no run output for the requisite it
# can't be the failure
if req_ret is None:
continue
# If the result was False (not None) it was a failure
if req_ret['result'] is False:
# use SLS.ID for the key-- so its easier to find
key = '{sls}.{_id}'.format(sls=req_low['__sls__'],
_id=req_low['__id__'])
failed_requisites.add(key)
_cmt = 'One or more requisite failed: {0}'.format(
', '.join(str(i) for i in failed_requisites)
)
running[tag] = {
'changes': {},
'result': False,
'comment': _cmt,
'__run_num__': self.__run_num,
'__sls__': low['__sls__']
}
self.__run_num += 1
elif status == 'change' and not low.get('__prereq__'):
ret = self.call(low, chunks, running)
if not ret['changes'] and not ret.get('skip_watch', False):
low = low.copy()
low['sfun'] = low['fun']
low['fun'] = 'mod_watch'
low['__reqs__'] = reqs
ret = self.call(low, chunks, running)
running[tag] = ret
elif status == 'pre':
pre_ret = {'changes': {},
'result': True,
'comment': 'No changes detected',
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
running[tag] = pre_ret
self.pre[tag] = pre_ret
self.__run_num += 1
elif status == 'onfail':
running[tag] = {'changes': {},
'result': True,
'comment': 'State was not run because onfail req did not change',
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
self.__run_num += 1
elif status == 'onchanges':
running[tag] = {'changes': {},
'result': True,
'comment': 'State was not run because none of the onchanges reqs changed',
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
self.__run_num += 1
else:
if low.get('__prereq__'):
self.pre[tag] = self.call(low, chunks, running)
else:
running[tag] = self.call(low, chunks, running)
if tag in running:
self.event(running[tag], len(chunks), fire_event=low.get('fire_event'))
return running
def call_listen(self, chunks, running):
'''
Find all of the listen routines and call the associated mod_watch runs
'''
listeners = []
crefs = {}
for chunk in chunks:
crefs[(chunk['state'], chunk['name'])] = chunk
crefs[(chunk['state'], chunk['__id__'])] = chunk
if 'listen' in chunk:
listeners.append({(chunk['state'], chunk['__id__']): chunk['listen']})
if 'listen_in' in chunk:
for l_in in chunk['listen_in']:
for key, val in six.iteritems(l_in):
listeners.append({(key, val): [{chunk['state']: chunk['__id__']}]})
mod_watchers = []
errors = {}
for l_dict in listeners:
for key, val in six.iteritems(l_dict):
for listen_to in val:
if not isinstance(listen_to, dict):
continue
for lkey, lval in six.iteritems(listen_to):
if (lkey, lval) not in crefs:
rerror = {_l_tag(lkey, lval):
{
'comment': 'Referenced state {0}: {1} does not exist'.format(lkey, lval),
'name': 'listen_{0}:{1}'.format(lkey, lval),
'result': False,
'changes': {}
}}
errors.update(rerror)
continue
to_tag = _gen_tag(crefs[(lkey, lval)])
if to_tag not in running:
continue
if running[to_tag]['changes']:
if key not in crefs:
rerror = {_l_tag(key[0], key[1]):
{'comment': 'Referenced state {0}: {1} does not exist'.format(key[0], key[1]),
'name': 'listen_{0}:{1}'.format(key[0], key[1]),
'result': False,
'changes': {}}}
errors.update(rerror)
continue
chunk = crefs[key]
low = chunk.copy()
low['sfun'] = chunk['fun']
low['fun'] = 'mod_watch'
low['__id__'] = 'listener_{0}'.format(low['__id__'])
for req in STATE_REQUISITE_KEYWORDS:
if req in low:
low.pop(req)
mod_watchers.append(low)
ret = self.call_chunks(mod_watchers)
running.update(ret)
for err in errors:
errors[err]['__run_num__'] = self.__run_num
self.__run_num += 1
running.update(errors)
return running
def call_high(self, high):
'''
Process a high data call and ensure the defined states.
'''
errors = []
# If there is extension data reconcile it
high, ext_errors = self.reconcile_extend(high)
errors += ext_errors
errors += self.verify_high(high)
if errors:
return errors
high, req_in_errors = self.requisite_in(high)
errors += req_in_errors
high = self.apply_exclude(high)
# Verify that the high data is structurally sound
if errors:
return errors
# Compile and verify the raw chunks
chunks = self.compile_high_data(high)
# Check for any disabled states
disabled = {}
if 'state_runs_disabled' in self.opts['grains']:
for low in chunks[:]:
state_ = '{0}.{1}'.format(low['state'], low['fun'])
for pat in self.opts['grains']['state_runs_disabled']:
if fnmatch.fnmatch(state_, pat):
comment = (
'The state function "{0}" is currently disabled by "{1}", '
'to re-enable, run state.enable {1}.'
).format(
state_,
pat,
)
_tag = _gen_tag(low)
disabled[_tag] = {'changes': {},
'result': False,
'comment': comment,
'__run_num__': self.__run_num,
'__sls__': low['__sls__']}
self.__run_num += 1
chunks.remove(low)
break
# If there are extensions in the highstate, process them and update
# the low data chunks
if errors:
return errors
ret = dict(list(disabled.items()) + list(self.call_chunks(chunks).items()))
ret = self.call_listen(chunks, ret)
def _cleanup_accumulator_data():
accum_data_path = os.path.join(
salt.utils.get_accumulator_dir(self.opts['cachedir']),
self.instance_id
)
try:
os.remove(accum_data_path)
log.debug('Deleted accumulator data file {0}'.format(
accum_data_path)
)
except OSError:
log.debug('File {0} does not exist, no need to cleanup.'.format(
accum_data_path)
)
_cleanup_accumulator_data()
return ret
def render_template(self, high, template):
errors = []
if not high:
return high, errors
if not isinstance(high, dict):
errors.append(
'Template {0} does not render to a dictionary'.format(template)
)
return high, errors
invalid_items = ('include', 'exclude', 'extends')
for item in invalid_items:
if item in high:
errors.append(
'The \'{0}\' declaration found on \'{1}\' is invalid when '
'rendering single templates'.format(item, template)
)
return high, errors
for name in high:
if not isinstance(high[name], dict):
if isinstance(high[name], six.string_types):
# Is this is a short state, it needs to be padded
if '.' in high[name]:
comps = high[name].split('.')
high[name] = {
# '__sls__': template,
# '__env__': None,
comps[0]: [comps[1]]
}
continue
errors.append(
'ID {0} in template {1} is not a dictionary'.format(
name, template
)
)
continue
skeys = set()
for key in sorted(high[name]):
if key.startswith('_'):
continue
if high[name][key] is None:
errors.append(
'ID \'{0}\' in template {1} contains a short '
'declaration ({2}) with a trailing colon. When not '
'passing any arguments to a state, the colon must be '
'omitted.'.format(name, template, key)
)
continue
if not isinstance(high[name][key], list):
continue
if '.' in key:
comps = key.split('.')
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
errors.append(
'ID \'{0}\' in template \'{1}\' contains multiple '
'state declarations of the same type'
.format(name, template)
)
continue
high[name][comps[0]] = high[name].pop(key)
high[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
return high, errors
def call_template(self, template):
'''
Enforce the states in a template
'''
high = compile_template(
template, self.rend, self.opts['renderer'])
if not high:
return high
high, errors = self.render_template(high, template)
if errors:
return errors
return self.call_high(high)
def call_template_str(self, template):
'''
Enforce the states in a template, pass the template as a string
'''
high = compile_template_str(
template, self.rend, self.opts['renderer'])
if not high:
return high
high, errors = self.render_template(high, '<template-str>')
if errors:
return errors
return self.call_high(high)
class BaseHighState(object):
'''
The BaseHighState is an abstract base class that is the foundation of
running a highstate, extend it and add a self.state object of type State.
When extending this class, please note that ``self.client`` and
``self.matcher`` should be instantiated and handled.
'''
def __init__(self, opts):
self.opts = self.__gen_opts(opts)
self.iorder = 10000
self.avail = self.__gather_avail()
self.serial = salt.payload.Serial(self.opts)
self.building_highstate = {}
def __gather_avail(self):
'''
Gather the lists of available sls data from the master
'''
avail = {}
for saltenv in self._get_envs():
avail[saltenv] = self.client.list_states(saltenv)
return avail
def __gen_opts(self, opts):
'''
The options used by the High State object are derived from options
on the minion and the master, or just the minion if the high state
call is entirely local.
'''
# If the state is intended to be applied locally, then the local opts
# should have all of the needed data, otherwise overwrite the local
# data items with data from the master
if 'local_state' in opts:
if opts['local_state']:
return opts
mopts = self.client.master_opts()
if not isinstance(mopts, dict):
# An error happened on the master
opts['renderer'] = 'yaml_jinja'
opts['failhard'] = False
opts['state_top'] = salt.utils.url.create('top.sls')
opts['nodegroups'] = {}
opts['file_roots'] = {'base': [syspaths.BASE_FILE_ROOTS_DIR]}
else:
opts['renderer'] = mopts['renderer']
opts['failhard'] = mopts.get('failhard', False)
if mopts['state_top'].startswith('salt://'):
opts['state_top'] = mopts['state_top']
elif mopts['state_top'].startswith('/'):
opts['state_top'] = salt.utils.url.create(mopts['state_top'][1:])
else:
opts['state_top'] = salt.utils.url.create(mopts['state_top'])
opts['state_top_saltenv'] = mopts.get('state_top_saltenv', None)
opts['nodegroups'] = mopts.get('nodegroups', {})
opts['state_auto_order'] = mopts.get(
'state_auto_order',
opts['state_auto_order'])
opts['file_roots'] = mopts['file_roots']
opts['top_file_merging_strategy'] = mopts.get('top_file_merging_strategy',
opts.get('top_file_merging_strategy'))
opts['env_order'] = mopts.get('env_order', opts.get('env_order', []))
opts['default_top'] = mopts.get('default_top', opts.get('default_top'))
opts['state_events'] = mopts.get('state_events')
opts['state_aggregate'] = mopts.get('state_aggregate', opts.get('state_aggregate', False))
opts['jinja_lstrip_blocks'] = mopts.get('jinja_lstrip_blocks', False)
opts['jinja_trim_blocks'] = mopts.get('jinja_trim_blocks', False)
return opts
def _get_envs(self):
'''
Pull the file server environments out of the master options
'''
envs = ['base']
if 'file_roots' in self.opts:
envs.extend(list(self.opts['file_roots']))
client_envs = self.client.envs()
env_order = self.opts.get('env_order', [])
client_envs = self.client.envs()
if env_order and client_envs:
client_env_list = self.client.envs()
env_intersection = set(env_order).intersection(client_env_list)
final_list = []
for ord_env in env_order:
if ord_env in env_intersection:
final_list.append(ord_env)
return set(final_list)
elif env_order:
return set(env_order)
else:
for cenv in client_envs:
if cenv not in envs:
envs.append(cenv)
return set(envs)
def get_tops(self):
'''
Gather the top files
'''
tops = DefaultOrderedDict(list)
include = DefaultOrderedDict(list)
done = DefaultOrderedDict(list)
found = 0 # did we find any contents in the top files?
# Gather initial top files
if self.opts['top_file_merging_strategy'] == 'same' and \
not self.opts['environment']:
if not self.opts['default_top']:
raise SaltRenderError('Top file merge strategy set to same, but no default_top '
'configuration option was set')
self.opts['environment'] = self.opts['default_top']
if self.opts['environment']:
contents = self.client.cache_file(
self.opts['state_top'],
self.opts['environment']
)
if contents:
found = 1
tops[self.opts['environment']] = [
compile_template(
contents,
self.state.rend,
self.state.opts['renderer'],
saltenv=self.opts['environment']
)
]
elif self.opts['top_file_merging_strategy'] == 'merge':
found = 0
if self.opts.get('state_top_saltenv', False):
saltenv = self.opts['state_top_saltenv']
contents = self.client.cache_file(
self.opts['state_top'],
saltenv
)
if contents:
found = found + 1
else:
log.debug('No contents loaded for env: {0}'.format(saltenv))
tops[saltenv].append(
compile_template(
contents,
self.state.rend,
self.state.opts['renderer'],
saltenv=saltenv
)
)
else:
for saltenv in self._get_envs():
contents = self.client.cache_file(
self.opts['state_top'],
saltenv
)
if contents:
found = found + 1
else:
log.debug('No contents loaded for env: {0}'.format(saltenv))
tops[saltenv].append(
compile_template(
contents,
self.state.rend,
self.state.opts['renderer'],
saltenv=saltenv
)
)
if found > 1:
log.warning('Top file merge strategy set to \'merge\' and multiple top files found. '
'Top file merging order is undefined; '
'for better results use \'same\' option')
if found == 0:
log.error('No contents found in top file')
# Search initial top files for includes
for saltenv, ctops in six.iteritems(tops):
for ctop in ctops:
if 'include' not in ctop:
continue
for sls in ctop['include']:
include[saltenv].append(sls)
ctop.pop('include')
# Go through the includes and pull out the extra tops and add them
while include:
pops = []
for saltenv, states in six.iteritems(include):
pops.append(saltenv)
if not states:
continue
for sls_match in states:
for sls in fnmatch.filter(self.avail[saltenv], sls_match):
if sls in done[saltenv]:
continue
tops[saltenv].append(
compile_template(
self.client.get_state(
sls,
saltenv
).get('dest', False),
self.state.rend,
self.state.opts['renderer'],
saltenv=saltenv
)
)
done[saltenv].append(sls)
for saltenv in pops:
if saltenv in include:
include.pop(saltenv)
return tops
def merge_tops(self, tops):
'''
Cleanly merge the top files
'''
top = DefaultOrderedDict(OrderedDict)
for ctops in six.itervalues(tops):
for ctop in ctops:
for saltenv, targets in six.iteritems(ctop):
if saltenv == 'include':
continue
try:
for tgt in targets:
if tgt not in top[saltenv]:
top[saltenv][tgt] = ctop[saltenv][tgt]
continue
matches = []
states = set()
for comp in top[saltenv][tgt]:
if isinstance(comp, dict):
matches.append(comp)
if isinstance(comp, six.string_types):
states.add(comp)
top[saltenv][tgt] = matches
top[saltenv][tgt].extend(list(states))
except TypeError:
raise SaltRenderError('Unable to render top file. No targets found.')
return top
def verify_tops(self, tops):
'''
Verify the contents of the top file data
'''
errors = []
if not isinstance(tops, dict):
errors.append('Top data was not formed as a dict')
# No further checks will work, bail out
return errors
for saltenv, matches in six.iteritems(tops):
if saltenv == 'include':
continue
if not isinstance(saltenv, six.string_types):
errors.append(
'Environment {0} in top file is not formed as a '
'string'.format(saltenv)
)
if saltenv == '':
errors.append('Empty saltenv statement in top file')
if not isinstance(matches, dict):
errors.append(
'The top file matches for saltenv {0} are not '
'formatted as a dict'.format(saltenv)
)
for slsmods in six.itervalues(matches):
if not isinstance(slsmods, list):
errors.append('Malformed topfile (state declarations not '
'formed as a list)')
continue
for slsmod in slsmods:
if isinstance(slsmod, dict):
# This value is a match option
for val in six.itervalues(slsmod):
if not val:
errors.append(
'Improperly formatted top file matcher '
'in saltenv {0}: {1} file'.format(
slsmod,
val
)
)
elif isinstance(slsmod, six.string_types):
# This is a sls module
if not slsmod:
errors.append(
'Environment {0} contains an empty sls '
'index'.format(saltenv)
)
return errors
def get_top(self):
'''
Returns the high data derived from the top file
'''
try:
tops = self.get_tops()
except SaltRenderError as err:
log.error('Unable to render top file: ' + str(err.error))
return {}
return self.merge_tops(tops)
def top_matches(self, top):
'''
Search through the top high data for matches and return the states
that this minion needs to execute.
Returns:
{'saltenv': ['state1', 'state2', ...]}
'''
matches = {}
# pylint: disable=cell-var-from-loop
for saltenv, body in six.iteritems(top):
if self.opts['environment']:
if saltenv != self.opts['environment']:
continue
for match, data in six.iteritems(body):
def _filter_matches(_match, _data, _opts):
if isinstance(_data, six.string_types):
_data = [_data]
if self.matcher.confirm_top(
_match,
_data,
_opts
):
if saltenv not in matches:
matches[saltenv] = []
for item in _data:
if 'subfilter' in item:
_tmpdata = item.pop('subfilter')
for match, data in six.iteritems(_tmpdata):
_filter_matches(match, data, _opts)
if isinstance(item, six.string_types):
matches[saltenv].append(item)
_filter_matches(match, data, self.opts['nodegroups'])
ext_matches = self.client.ext_nodes()
for saltenv in ext_matches:
if saltenv in matches:
matches[saltenv] = list(
set(ext_matches[saltenv]).union(matches[saltenv]))
else:
matches[saltenv] = ext_matches[saltenv]
# pylint: enable=cell-var-from-loop
return matches
def load_dynamic(self, matches):
'''
If autoload_dynamic_modules is True then automatically load the
dynamic modules
'''
if not self.opts['autoload_dynamic_modules']:
return
if self.opts.get('local', False):
syncd = self.state.functions['saltutil.sync_all'](list(matches),
refresh=False)
else:
syncd = self.state.functions['saltutil.sync_all'](list(matches),
refresh=False)
if syncd['grains']:
self.opts['grains'] = salt.loader.grains(self.opts)
self.state.opts['pillar'] = self.state._gather_pillar()
self.state.module_refresh()
def render_state(self, sls, saltenv, mods, matches, local=False):
'''
Render a state file and retrieve all of the include states
'''
errors = []
if not local:
state_data = self.client.get_state(sls, saltenv)
fn_ = state_data.get('dest', False)
else:
fn_ = sls
if not os.path.isfile(fn_):
errors.append(
'Specified SLS {0} on local filesystem cannot '
'be found.'.format(sls)
)
if not fn_:
errors.append(
'Specified SLS {0} in saltenv {1} is not '
'available on the salt master or through a configured '
'fileserver'.format(sls, saltenv)
)
state = None
try:
state = compile_template(
fn_, self.state.rend, self.state.opts['renderer'], saltenv,
sls, rendered_sls=mods
)
except SaltRenderError as exc:
msg = 'Rendering SLS \'{0}:{1}\' failed: {2}'.format(
saltenv, sls, exc
)
log.critical(msg)
errors.append(msg)
except Exception as exc:
msg = 'Rendering SLS {0} failed, render error: {1}'.format(
sls, exc
)
log.critical(
msg,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
errors.append('{0}\n{1}'.format(msg, traceback.format_exc()))
try:
mods.add('{0}:{1}'.format(saltenv, sls))
except AttributeError:
pass
if state:
if not isinstance(state, dict):
errors.append(
'SLS {0} does not render to a dictionary'.format(sls)
)
else:
include = []
if 'include' in state:
if not isinstance(state['include'], list):
err = ('Include Declaration in SLS {0} is not formed '
'as a list'.format(sls))
errors.append(err)
else:
include = state.pop('include')
self._handle_extend(state, sls, saltenv, errors)
self._handle_exclude(state, sls, saltenv, errors)
self._handle_state_decls(state, sls, saltenv, errors)
for inc_sls in include:
# inc_sls may take the form of:
# 'sls.to.include' <- same as {<saltenv>: 'sls.to.include'}
# {<env_key>: 'sls.to.include'}
# {'_xenv': 'sls.to.resolve'}
xenv_key = '_xenv'
if isinstance(inc_sls, dict):
env_key, inc_sls = inc_sls.popitem()
else:
env_key = saltenv
if env_key not in self.avail:
msg = ('Nonexistent saltenv \'{0}\' found in include '
'of \'{1}\' within SLS \'{2}:{3}\''
.format(env_key, inc_sls, saltenv, sls))
log.error(msg)
errors.append(msg)
continue
if inc_sls.startswith('.'):
levels, include = \
re.match(r'^(\.+)(.*)$', inc_sls).groups()
level_count = len(levels)
p_comps = sls.split('.')
if state_data.get('source', '').endswith('/init.sls'):
p_comps.append('init')
if level_count > len(p_comps):
msg = ('Attempted relative include of \'{0}\' '
'within SLS \'{1}:{2}\' '
'goes beyond top level package '
.format(inc_sls, saltenv, sls))
log.error(msg)
errors.append(msg)
continue
inc_sls = '.'.join(p_comps[:-level_count] + [include])
if env_key != xenv_key:
if matches is None:
matches = []
# Resolve inc_sls in the specified environment
if env_key in matches or fnmatch.filter(self.avail[env_key], inc_sls):
resolved_envs = [env_key]
else:
resolved_envs = []
else:
# Resolve inc_sls in the subset of environment matches
resolved_envs = [
aenv for aenv in matches
if fnmatch.filter(self.avail[aenv], inc_sls)
]
# An include must be resolved to a single environment, or
# the include must exist in the current environment
if len(resolved_envs) == 1 or saltenv in resolved_envs:
# Match inc_sls against the available states in the
# resolved env, matching wildcards in the process. If
# there were no matches, then leave inc_sls as the
# target so that the next recursion of render_state
# will recognize the error.
sls_targets = fnmatch.filter(
self.avail[saltenv],
inc_sls
) or [inc_sls]
for sls_target in sls_targets:
r_env = resolved_envs[0] if len(resolved_envs) == 1 else saltenv
mod_tgt = '{0}:{1}'.format(r_env, sls_target)
if mod_tgt not in mods:
nstate, err = self.render_state(
sls_target,
r_env,
mods,
matches
)
if nstate:
self.merge_included_states(state, nstate, errors)
state.update(nstate)
if err:
errors.extend(err)
else:
msg = ''
if not resolved_envs:
msg = ('Unknown include: Specified SLS {0}: {1} is not available on the salt '
'master in saltenv(s): {2} '
).format(env_key,
inc_sls,
', '.join(matches) if env_key == xenv_key else env_key)
elif len(resolved_envs) > 1:
msg = ('Ambiguous include: Specified SLS {0}: {1} is available on the salt master '
'in multiple available saltenvs: {2}'
).format(env_key,
inc_sls,
', '.join(resolved_envs))
log.critical(msg)
errors.append(msg)
try:
self._handle_iorder(state)
except TypeError:
log.critical('Could not render SLS {0}. Syntax error detected.'.format(sls))
else:
state = {}
return state, errors
def _handle_iorder(self, state):
'''
Take a state and apply the iorder system
'''
if self.opts['state_auto_order']:
for name in state:
for s_dec in state[name]:
if not isinstance(s_dec, six.string_types):
# PyDSL OrderedDict?
continue
if not isinstance(state[name], dict):
# Include's or excludes as lists?
continue
if not isinstance(state[name][s_dec], list):
# Bad syntax, let the verify seq pick it up later on
continue
found = False
if s_dec.startswith('_'):
continue
for arg in state[name][s_dec]:
if isinstance(arg, dict):
if len(arg) > 0:
if next(six.iterkeys(arg)) == 'order':
found = True
if not found:
if not isinstance(state[name][s_dec], list):
# quite certainly a syntax error, managed elsewhere
continue
state[name][s_dec].append(
{'order': self.iorder}
)
self.iorder += 1
return state
def _handle_state_decls(self, state, sls, saltenv, errors):
'''
Add sls and saltenv components to the state
'''
for name in state:
if not isinstance(state[name], dict):
if name == '__extend__':
continue
if name == '__exclude__':
continue
if isinstance(state[name], six.string_types):
# Is this is a short state, it needs to be padded
if '.' in state[name]:
comps = state[name].split('.')
state[name] = {'__sls__': sls,
'__env__': saltenv,
comps[0]: [comps[1]]}
continue
errors.append(
'ID {0} in SLS {1} is not a dictionary'.format(name, sls)
)
continue
skeys = set()
for key in state[name]:
if key.startswith('_'):
continue
if not isinstance(state[name][key], list):
continue
if '.' in key:
comps = key.split('.')
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - source: salt://redis/redis.conf
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
errors.append(
'ID \'{0}\' in SLS \'{1}\' contains multiple state '
'declarations of the same type'.format(name, sls)
)
continue
state[name][comps[0]] = state[name].pop(key)
state[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
if '__sls__' not in state[name]:
state[name]['__sls__'] = sls
if '__env__' not in state[name]:
state[name]['__env__'] = saltenv
def _handle_extend(self, state, sls, saltenv, errors):
'''
Take the extend dec out of state and apply to the highstate global
dec
'''
if 'extend' in state:
ext = state.pop('extend')
if not isinstance(ext, dict):
errors.append(('Extension value in SLS \'{0}\' is not a '
'dictionary').format(sls))
return
for name in ext:
if not isinstance(ext[name], dict):
errors.append(('Extension name \'{0}\' in SLS \'{1}\' is '
'not a dictionary'
.format(name, sls)))
continue
if '__sls__' not in ext[name]:
ext[name]['__sls__'] = sls
if '__env__' not in ext[name]:
ext[name]['__env__'] = saltenv
for key in ext[name]:
if key.startswith('_'):
continue
if not isinstance(ext[name][key], list):
continue
if '.' in key:
comps = key.split('.')
ext[name][comps[0]] = ext[name].pop(key)
ext[name][comps[0]].append(comps[1])
state.setdefault('__extend__', []).append(ext)
def _handle_exclude(self, state, sls, saltenv, errors):
'''
Take the exclude dec out of the state and apply it to the highstate
global dec
'''
if 'exclude' in state:
exc = state.pop('exclude')
if not isinstance(exc, list):
err = ('Exclude Declaration in SLS {0} is not formed '
'as a list'.format(sls))
errors.append(err)
state.setdefault('__exclude__', []).extend(exc)
def render_highstate(self, matches):
'''
Gather the state files and render them into a single unified salt
high data structure.
'''
highstate = self.building_highstate
all_errors = []
mods = set()
statefiles = []
for saltenv, states in six.iteritems(matches):
for sls_match in states:
try:
statefiles = fnmatch.filter(self.avail[saltenv], sls_match)
except KeyError:
all_errors.extend(
['No matching salt environment for environment '
'\'{0}\' found'.format(saltenv)]
)
# if we did not found any sls in the fileserver listing, this
# may be because the sls was generated or added later, we can
# try to directly execute it, and if it fails, anyway it will
# return the former error
if not statefiles:
statefiles = [sls_match]
for sls in statefiles:
r_env = '{0}:{1}'.format(saltenv, sls)
if r_env in mods:
continue
state, errors = self.render_state(
sls, saltenv, mods, matches)
if state:
self.merge_included_states(highstate, state, errors)
for i, error in enumerate(errors[:]):
if 'is not available' in error:
# match SLS foobar in environment
this_sls = 'SLS {0} in saltenv'.format(
sls_match)
if this_sls in error:
errors[i] = (
'No matching sls found for \'{0}\' '
'in env \'{1}\''.format(sls_match, saltenv))
all_errors.extend(errors)
self.clean_duplicate_extends(highstate)
return highstate, all_errors
def clean_duplicate_extends(self, highstate):
if '__extend__' in highstate:
highext = []
for items in (six.iteritems(ext) for ext in highstate['__extend__']):
for item in items:
if item not in highext:
highext.append(item)
highstate['__extend__'] = [{t[0]: t[1]} for t in highext]
def merge_included_states(self, highstate, state, errors):
# The extend members can not be treated as globally unique:
if '__extend__' in state:
highstate.setdefault('__extend__',
[]).extend(state.pop('__extend__'))
if '__exclude__' in state:
highstate.setdefault('__exclude__',
[]).extend(state.pop('__exclude__'))
for id_ in state:
if id_ in highstate:
if highstate[id_] != state[id_]:
errors.append((
'Detected conflicting IDs, SLS'
' IDs need to be globally unique.\n The'
' conflicting ID is \'{0}\' and is found in SLS'
' \'{1}:{2}\' and SLS \'{3}:{4}\'').format(
id_,
highstate[id_]['__env__'],
highstate[id_]['__sls__'],
state[id_]['__env__'],
state[id_]['__sls__'])
)
try:
highstate.update(state)
except ValueError:
errors.append(
'Error when rendering state with contents: {0}'.format(state)
)
def _check_pillar(self, force=False):
'''
Check the pillar for errors, refuse to run the state if there are
errors in the pillar and return the pillar errors
'''
if force:
return True
if '_errors' in self.state.opts['pillar']:
return False
return True
def matches_whitelist(self, matches, whitelist):
'''
Reads over the matches and returns a matches dict with just the ones
that are in the whitelist
'''
if not whitelist:
return matches
ret_matches = {}
if not isinstance(whitelist, list):
whitelist = whitelist.split(',')
for env in matches:
for sls in matches[env]:
if sls in whitelist:
ret_matches[env] = ret_matches[env] if env in ret_matches else []
ret_matches[env].append(sls)
return ret_matches
def call_highstate(self, exclude=None, cache=None, cache_name='highstate',
force=False, whitelist=None):
'''
Run the sequence to execute the salt highstate for this minion
'''
# Check that top file exists
tag_name = 'no_|-states_|-states_|-None'
ret = {tag_name: {
'result': False,
'comment': 'No states found for this minion',
'name': 'No States',
'changes': {},
'__run_num__': 0,
}}
cfn = os.path.join(
self.opts['cachedir'],
'{0}.cache.p'.format(cache_name)
)
if cache:
if os.path.isfile(cfn):
with salt.utils.fopen(cfn, 'rb') as fp_:
high = self.serial.load(fp_)
return self.state.call_high(high)
# File exists so continue
err = []
try:
top = self.get_top()
except SaltRenderError as err:
ret[tag_name]['comment'] = 'Unable to render top file: '
ret[tag_name]['comment'] += str(err.error)
return ret
except Exception:
trb = traceback.format_exc()
err.append(trb)
return err
err += self.verify_tops(top)
matches = self.top_matches(top)
if not matches:
msg = 'No Top file or external nodes data matches found.'
ret[tag_name]['comment'] = msg
return ret
matches = self.matches_whitelist(matches, whitelist)
self.load_dynamic(matches)
if not self._check_pillar(force):
err += ['Pillar failed to render with the following messages:']
err += self.state.opts['pillar']['_errors']
else:
high, errors = self.render_highstate(matches)
if exclude:
if isinstance(exclude, str):
exclude = exclude.split(',')
if '__exclude__' in high:
high['__exclude__'].extend(exclude)
else:
high['__exclude__'] = exclude
err += errors
if err:
return err
if not high:
return ret
cumask = os.umask(0o77)
try:
if salt.utils.is_windows():
# Make sure cache file isn't read-only
self.state.functions['cmd.run']('attrib -R "{0}"'.format(cfn), output_loglevel='quiet')
with salt.utils.fopen(cfn, 'w+b') as fp_:
try:
self.serial.dump(high, fp_)
except TypeError:
# Can't serialize pydsl
pass
except (IOError, OSError):
msg = 'Unable to write to "state.highstate" cache file {0}'
log.error(msg.format(cfn))
os.umask(cumask)
return self.state.call_high(high)
def compile_highstate(self):
'''
Return just the highstate or the errors
'''
err = []
top = self.get_top()
err += self.verify_tops(top)
matches = self.top_matches(top)
high, errors = self.render_highstate(matches)
err += errors
if err:
return err
return high
def compile_low_chunks(self):
'''
Compile the highstate but don't run it, return the low chunks to
see exactly what the highstate will execute
'''
top = self.get_top()
matches = self.top_matches(top)
high, errors = self.render_highstate(matches)
# If there is extension data reconcile it
high, ext_errors = self.state.reconcile_extend(high)
errors += ext_errors
# Verify that the high data is structurally sound
errors += self.state.verify_high(high)
high, req_in_errors = self.state.requisite_in(high)
errors += req_in_errors
high = self.state.apply_exclude(high)
if errors:
return errors
# Compile and verify the raw chunks
chunks = self.state.compile_high_data(high)
return chunks
class HighState(BaseHighState):
'''
Generate and execute the salt "High State". The High State is the
compound state derived from a group of template files stored on the
salt master or in the local cache.
'''
# a stack of active HighState objects during a state.highstate run
stack = []
def __init__(self, opts, pillar=None, jid=None, pillar_enc=None, proxy=None):
self.opts = opts
self.client = salt.fileclient.get_file_client(self.opts)
BaseHighState.__init__(self, opts)
self.state = State(self.opts, pillar, jid, pillar_enc, proxy=proxy)
self.matcher = salt.minion.Matcher(self.opts)
# tracks all pydsl state declarations globally across sls files
self._pydsl_all_decls = {}
# a stack of current rendering Sls objects, maintained and used by the pydsl renderer.
self._pydsl_render_stack = []
def push_active(self):
self.stack.append(self)
@classmethod
def clear_active(cls):
# Nuclear option
#
# Blow away the entire stack. Used primarily by the test runner but also
# useful in custom wrappers of the HighState class, to reset the stack
# to a fresh state.
cls.stack = []
@classmethod
def pop_active(cls):
cls.stack.pop()
@classmethod
def get_active(cls):
try:
return cls.stack[-1]
except IndexError:
return None
class MasterState(State):
'''
Create a State object for master side compiling
'''
def __init__(self, opts, minion):
State.__init__(self, opts)
def load_modules(self, data=None, proxy=None):
'''
Load the modules into the state
'''
log.info('Loading fresh modules for state activity')
# Load a modified client interface that looks like the interface used
# from the minion, but uses remote execution
#
self.functions = salt.client.FunctionWrapper(
self.opts,
self.opts['id']
)
# Load the states, but they should not be used in this class apart
# from inspection
self.utils = salt.loader.utils(self.opts)
self.serializers = salt.loader.serializers(self.opts)
self.states = salt.loader.states(self.opts, self.functions, self.utils, self.serializers)
self.rend = salt.loader.render(self.opts, self.functions, states=self.states)
class MasterHighState(HighState):
'''
Execute highstate compilation from the master
'''
def __init__(self, master_opts, minion_opts, grains, id_,
saltenv=None,
env=None):
if isinstance(env, six.string_types):
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt '
'Boron.'
)
# Backwards compatibility
saltenv = env
# Force the fileclient to be local
opts = copy.deepcopy(minion_opts)
opts['file_client'] = 'local'
opts['file_roots'] = master_opts['master_roots']
opts['renderer'] = master_opts['renderer']
opts['state_top'] = master_opts['state_top']
opts['id'] = id_
opts['grains'] = grains
HighState.__init__(self, opts)
class RemoteHighState(object):
'''
Manage gathering the data from the master
'''
def __init__(self, opts, grains):
self.opts = opts
self.grains = grains
self.serial = salt.payload.Serial(self.opts)
# self.auth = salt.crypt.SAuth(opts)
self.channel = salt.transport.Channel.factory(self.opts['master_uri'])
def compile_master(self):
'''
Return the state data from the master
'''
load = {'grains': self.grains,
'opts': self.opts,
'cmd': '_master_state'}
try:
return self.channel.send(load, tries=3, timeout=72000)
except SaltReqTimeoutError:
return {}
|
py | 1a324c073a4615d3d0e4db638b03604c99b77c78 | from rest_framework.test import APITestCase
from django.urls import reverse
from datetime import datetime
import json
from crawlers.models import Crawler
from crawlers.models import CrawlerExecution
from crawlers.models import CrawlerExecutionGroup
from crawlers.models import STARTED
from django.contrib.auth import get_user_model
class CrawlerEndpoint(APITestCase):
def setUp(self):
self.endpoint = '/api/crawlers/'
Crawler.objects.bulk_create([
Crawler(site_name="mpf", site_name_display="MPF",
url_root="www.mpf.mp.br", task_name="mpf_crawler"),
Crawler(site_name="incra", site_name_display="INCRA",
url_root="www.gov.br/incra/pt-br", task_name="incra_crawler"),
Crawler(site_name="tcu", site_name_display="TCU",
url_root="pesquisa.apps.tcu.gov.br", task_name="tcu_crawler"),
])
self.crawler_to_be_create = {
"site_name": "ibama",
"site_name_display": "IBAMA",
"url_root": "www.gov.br/ibama/pt-br",
"task_name": "ibama_crawler",
}
def tearDown(self):
Crawler.objects.all().delete()
def user_login(self):
username = "admin"
email = "[email protected]"
password = "admin"
User = get_user_model()
User.objects.create_superuser(
username=username,
email=email,
password=password)
return json.loads(
self.client.post(
'/token/',
{
"username": username,
"password": password
}
).content)["access"]
def test_list_all_crawlers(self):
response = json.loads(self.client.get(
self.endpoint,
format='json'
).content)
self.assertEqual(
3,
len(response['results']),
)
def test_create(self):
token = self.user_login()
response = self.client.post(
self.endpoint,
self.crawler_to_be_create,
HTTP_AUTHORIZATION='Bearer {}'.format(token)
)
json_response = json.loads(response.content)
self.assertEqual(201, response.status_code)
self.assertEqual(
self.crawler_to_be_create['site_name'], json_response['site_name'])
self.assertEqual(
self.crawler_to_be_create['url_root'], json_response['url_root'])
self.assertEqual(
self.crawler_to_be_create['task_name'], json_response['task_name'])
def test_get(self):
token = self.user_login()
# Create Crawler
crawler_response = json.loads(self.client.post(
self.endpoint,
self.crawler_to_be_create,
HTTP_AUTHORIZATION='Bearer {}'.format(token)
).content)
response = self.client.get(
f"{self.endpoint}{crawler_response['id']}/",
format='json',
HTTP_AUTHORIZATION='Bearer {}'.format(token)
)
json_response = json.loads(response.content)
self.assertEqual(200, response.status_code)
self.assertEqual(
self.crawler_to_be_create['site_name'], json_response['site_name'])
self.assertEqual(
self.crawler_to_be_create['url_root'], json_response['url_root'])
self.assertEqual(
self.crawler_to_be_create['task_name'], json_response['task_name'])
def test_update(self):
token = self.user_login()
# Create Crawler
crawler_response = json.loads(self.client.post(
self.endpoint,
self.crawler_to_be_create,
HTTP_AUTHORIZATION='Bearer {}'.format(token)
).content)
crawler_update = {
"site_name": "ibge",
"site_name_display": "IBGE",
"url_root": "www.ibge.gov.br",
"task_name": "ibge_crawler",
}
updated_response = self.client.put(
f"{self.endpoint}{crawler_response['id']}/",
crawler_update,
HTTP_AUTHORIZATION='Bearer {}'.format(token)
)
json_response = json.loads(updated_response.content)
self.assertEqual(200, updated_response.status_code)
self.assertEqual(
crawler_update['site_name'], json_response['site_name'])
self.assertEqual(crawler_update['url_root'], json_response['url_root'])
self.assertEqual(
crawler_update['task_name'], json_response['task_name'])
def test_delete(self):
token = self.user_login()
# Create Crawler
crawler_response = json.loads(self.client.post(
self.endpoint,
self.crawler_to_be_create,
HTTP_AUTHORIZATION='Bearer {}'.format(token)
).content)
response = self.client.delete(
f"{self.endpoint}{crawler_response['id']}/",
format='json',
HTTP_AUTHORIZATION='Bearer {}'.format(token)
)
self.assertEqual(204, response.status_code)
class CrawlerExecutionsEndpoint(APITestCase):
def setUp(self):
self.endpoint_base = '/api/crawlers'
self.crawler_to_be_create = Crawler.objects.create(
site_name="mpf",
url_root="www.mpf.mp.br",
task_name="mpf_crawler"
)
self.crawler_group_exec = CrawlerExecutionGroup.objects.create(
crawler=self.crawler_to_be_create,
task_name="mpf_crawler_group",
finish_datetime=datetime(2021, 10, 10, 8, 35, 21),
state=STARTED,
)
CrawlerExecution.objects.bulk_create([
CrawlerExecution(
crawler_execution_group=self.crawler_group_exec,
task_id="352c6526-3153-11ec-8d3d-0242ac130003",
finish_datetime=datetime(2021, 10, 10, 8, 35, 21),
),
CrawlerExecution(
crawler_execution_group=self.crawler_group_exec,
task_id="352c6742-3153-11ec-8d3d-0242ac130003",
finish_datetime=datetime(2021, 10, 10, 8, 40, 10),
),
CrawlerExecution(
crawler_execution_group=self.crawler_group_exec,
task_id="352c6832-3153-11ec-8d3d-0242ac130003",
finish_datetime=datetime(2021, 10, 10, 8, 50, 15),
),
])
def tearDown(self):
Crawler.objects.all().delete()
def user_login(self):
username = "admin"
email = "[email protected]"
password = "admin"
User = get_user_model()
User.objects.create_superuser(
username=username,
email=email,
password=password)
return json.loads(
self.client.post(
'/token/',
{
"username": username,
"password": password
}
).content)["access"]
def test_list_all_crawler_executions(self):
token = self.user_login()
response = json.loads(self.client.get(
f"{self.endpoint_base}/{self.crawler_to_be_create.id}/executions/",
format='json',
HTTP_AUTHORIZATION='Bearer {}'.format(token)
).content)
crawler_executions_group = response['results']
self.assertEqual(
1,
len(crawler_executions_group),
)
self.assertEqual(
3,
len(crawler_executions_group[0]['crawler_executions']),
)
|
py | 1a324c4251c45fe6ea8784210317f636f8954b61 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DecodeJpegOp."""
import os
import time
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
class DecodeJpegBenchmark(test.Benchmark):
"""Evaluate tensorflow DecodeJpegOp performance."""
def _evalDecodeJpeg(self,
image_name,
parallelism,
num_iters,
crop_during_decode=None,
crop_window=None,
tile=None):
"""Evaluate DecodeJpegOp for the given image.
TODO(tanmingxing): add decoding+cropping as well.
Args:
image_name: a string of image file name (without suffix).
parallelism: the number of concurrent decode_jpeg ops to be run.
num_iters: number of iterations for evaluation.
crop_during_decode: If true, use fused DecodeAndCropJpeg instead of
separate decode and crop ops. It is ignored if crop_window is None.
crop_window: if not None, crop the decoded image. Depending on
crop_during_decode, cropping could happen during or after decoding.
tile: if not None, tile the image to composite a larger fake image.
Returns:
The duration of the run in seconds.
"""
ops.reset_default_graph()
image_file_path = resource_loader.get_path_to_datafile(
os.path.join('core', 'lib', 'jpeg', 'testdata', image_name))
# resource_loader does not seem to work well under benchmark runners.
# So if the above path is not available, try another way to access the file:
if not os.path.exists(image_file_path):
image_file_path = resource_loader.get_path_to_datafile(
os.path.join(
'..', '..', 'core', 'lib', 'jpeg', 'testdata', image_name))
if tile is None:
image_content = variable_scope.get_variable(
'image_%s' % image_name,
initializer=io_ops.read_file(image_file_path))
else:
single_image = image_ops.decode_jpeg(
io_ops.read_file(image_file_path), channels=3, name='single_image')
# Tile the image to composite a new larger image.
tiled_image = array_ops.tile(single_image, tile)
image_content = variable_scope.get_variable(
'tiled_image_%s' % image_name,
initializer=image_ops.encode_jpeg(tiled_image))
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
images = []
for _ in xrange(parallelism):
if crop_window is None:
# No crop.
image = image_ops.decode_jpeg(image_content, channels=3)
elif crop_during_decode:
# combined decode and crop.
image = image_ops.decode_and_crop_jpeg(
image_content, crop_window, channels=3)
else:
# separate decode and crop.
image = image_ops.decode_jpeg(image_content, channels=3)
image = image_ops.crop_to_bounding_box(
image,
offset_height=crop_window[0],
offset_width=crop_window[1],
target_height=crop_window[2],
target_width=crop_window[3])
images.append(image)
r = control_flow_ops.group(*images)
for _ in xrange(3):
# Skip warm up time.
self.evaluate(r)
start_time = time.time()
for _ in xrange(num_iters):
self.evaluate(r)
end_time = time.time()
return end_time - start_time
def benchmarkDecodeJpegSmall(self):
"""Evaluate single DecodeImageOp for small size image."""
num_iters = 10
crop_window = [10, 10, 50, 50]
for parallelism in [1, 100]:
duration_decode = self._evalDecodeJpeg('small.jpg', parallelism,
num_iters)
duration_decode_crop = self._evalDecodeJpeg('small.jpg', parallelism,
num_iters, False, crop_window)
duration_decode_after_crop = self._evalDecodeJpeg(
'small.jpg', parallelism, num_iters, True, crop_window)
self.report_benchmark(
name='decode_jpeg_small_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode)
self.report_benchmark(
name='decode_crop_jpeg_small_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_crop)
self.report_benchmark(
name='decode_after_crop_jpeg_small_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_after_crop)
def benchmarkDecodeJpegMedium(self):
"""Evaluate single DecodeImageOp for medium size image."""
num_iters = 10
crop_window = [10, 10, 50, 50]
for parallelism in [1, 100]:
duration_decode = self._evalDecodeJpeg('medium.jpg', parallelism,
num_iters)
duration_decode_crop = self._evalDecodeJpeg('medium.jpg', parallelism,
num_iters, False, crop_window)
duration_decode_after_crop = self._evalDecodeJpeg(
'medium.jpg', parallelism, num_iters, True, crop_window)
self.report_benchmark(
name='decode_jpeg_medium_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode)
self.report_benchmark(
name='decode_crop_jpeg_medium_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_crop)
self.report_benchmark(
name='decode_after_crop_jpeg_medium_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_after_crop)
def benchmarkDecodeJpegLarge(self):
"""Evaluate single DecodeImageOp for large size image."""
num_iters = 10
crop_window = [10, 10, 50, 50]
tile = [4, 4, 1]
for parallelism in [1, 100]:
# Tile the medium size image to composite a larger fake image.
duration_decode = self._evalDecodeJpeg('medium.jpg', parallelism,
num_iters, tile)
duration_decode_crop = self._evalDecodeJpeg(
'medium.jpg', parallelism, num_iters, False, crop_window, tile)
duration_decode_after_crop = self._evalDecodeJpeg(
'medium.jpg', parallelism, num_iters, True, crop_window, tile)
self.report_benchmark(
name='decode_jpeg_large_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode)
self.report_benchmark(
name='decode_crop_jpeg_large_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_crop)
self.report_benchmark(
name='decode_after_crop_jpeg_large_p%d' % (parallelism),
iters=num_iters,
wall_time=duration_decode_after_crop)
if __name__ == '__main__':
test.main()
|
py | 1a324f50a4bfe9c2e345d8a66b9cf22b196821cf | # encoding: utf-8
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import shutil
import tempfile
from io import StringIO
import docker
import py
import pytest
from docker.constants import DEFAULT_DOCKER_API_VERSION
from .. import mock
from .. import unittest
from ..helpers import build_config
from compose.cli.command import get_project
from compose.cli.command import get_project_name
from compose.cli.docopt_command import NoSuchCommand
from compose.cli.errors import UserError
from compose.cli.main import TopLevelCommand
from compose.const import IS_WINDOWS_PLATFORM
from compose.project import Project
class CLITestCase(unittest.TestCase):
def test_default_project_name(self):
test_dir = py._path.local.LocalPath('tests/fixtures/simple-composefile')
with test_dir.as_cwd():
project_name = get_project_name('.')
self.assertEqual('simplecomposefile', project_name)
def test_project_name_with_explicit_base_dir(self):
base_dir = 'tests/fixtures/simple-composefile'
project_name = get_project_name(base_dir)
self.assertEqual('simplecomposefile', project_name)
def test_project_name_with_explicit_uppercase_base_dir(self):
base_dir = 'tests/fixtures/UpperCaseDir'
project_name = get_project_name(base_dir)
self.assertEqual('uppercasedir', project_name)
def test_project_name_with_explicit_project_name(self):
name = 'explicit-project-name'
project_name = get_project_name(None, project_name=name)
self.assertEqual('explicitprojectname', project_name)
@mock.patch.dict(os.environ)
def test_project_name_from_environment_new_var(self):
name = 'namefromenv'
os.environ['COMPOSE_PROJECT_NAME'] = name
project_name = get_project_name(None)
self.assertEqual(project_name, name)
def test_project_name_with_empty_environment_var(self):
base_dir = 'tests/fixtures/simple-composefile'
with mock.patch.dict(os.environ):
os.environ['COMPOSE_PROJECT_NAME'] = ''
project_name = get_project_name(base_dir)
self.assertEqual('simplecomposefile', project_name)
@mock.patch.dict(os.environ)
def test_project_name_with_environment_file(self):
base_dir = tempfile.mkdtemp()
try:
name = 'namefromenvfile'
with open(os.path.join(base_dir, '.env'), 'w') as f:
f.write('COMPOSE_PROJECT_NAME={}'.format(name))
project_name = get_project_name(base_dir)
assert project_name == name
# Environment has priority over .env file
os.environ['COMPOSE_PROJECT_NAME'] = 'namefromenv'
assert get_project_name(base_dir) == os.environ['COMPOSE_PROJECT_NAME']
finally:
shutil.rmtree(base_dir)
def test_get_project(self):
base_dir = 'tests/fixtures/longer-filename-composefile'
project = get_project(base_dir)
self.assertEqual(project.name, 'longerfilenamecomposefile')
self.assertTrue(project.client)
self.assertTrue(project.services)
def test_command_help(self):
with mock.patch('sys.stdout', new=StringIO()) as fake_stdout:
TopLevelCommand.help({'COMMAND': 'up'})
assert "Usage: up" in fake_stdout.getvalue()
def test_command_help_nonexistent(self):
with pytest.raises(NoSuchCommand):
TopLevelCommand.help({'COMMAND': 'nonexistent'})
@pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason="requires dockerpty")
@mock.patch('compose.cli.main.RunOperation', autospec=True)
@mock.patch('compose.cli.main.PseudoTerminal', autospec=True)
def test_run_interactive_passes_logs_false(self, mock_pseudo_terminal, mock_run_operation):
mock_client = mock.create_autospec(docker.APIClient)
mock_client.api_version = DEFAULT_DOCKER_API_VERSION
project = Project.from_config(
name='composetest',
client=mock_client,
config_data=build_config({
'service': {'image': 'busybox'}
}),
)
command = TopLevelCommand(project)
with pytest.raises(SystemExit):
command.run({
'SERVICE': 'service',
'COMMAND': None,
'-e': [],
'--user': None,
'--no-deps': None,
'-d': False,
'-T': None,
'--entrypoint': None,
'--service-ports': None,
'--publish': [],
'--volume': [],
'--rm': None,
'--name': None,
'--workdir': None,
})
_, _, call_kwargs = mock_run_operation.mock_calls[0]
assert call_kwargs['logs'] is False
def test_run_service_with_restart_always(self):
mock_client = mock.create_autospec(docker.APIClient)
mock_client.api_version = DEFAULT_DOCKER_API_VERSION
project = Project.from_config(
name='composetest',
client=mock_client,
config_data=build_config({
'service': {
'image': 'busybox',
'restart': 'always',
}
}),
)
command = TopLevelCommand(project)
command.run({
'SERVICE': 'service',
'COMMAND': None,
'-e': [],
'--user': None,
'--no-deps': None,
'-d': True,
'-T': None,
'--entrypoint': None,
'--service-ports': None,
'--publish': [],
'--volume': [],
'--rm': None,
'--name': None,
'--workdir': None,
})
self.assertEqual(
mock_client.create_host_config.call_args[1]['restart_policy']['Name'],
'always'
)
command = TopLevelCommand(project)
command.run({
'SERVICE': 'service',
'COMMAND': None,
'-e': [],
'--user': None,
'--no-deps': None,
'-d': True,
'-T': None,
'--entrypoint': None,
'--service-ports': None,
'--publish': [],
'--volume': [],
'--rm': True,
'--name': None,
'--workdir': None,
})
self.assertFalse(
mock_client.create_host_config.call_args[1].get('restart_policy')
)
def test_command_manual_and_service_ports_together(self):
project = Project.from_config(
name='composetest',
client=None,
config_data=build_config({
'service': {'image': 'busybox'},
}),
)
command = TopLevelCommand(project)
with self.assertRaises(UserError):
command.run({
'SERVICE': 'service',
'COMMAND': None,
'-e': [],
'--user': None,
'--no-deps': None,
'-d': True,
'-T': None,
'--entrypoint': None,
'--service-ports': True,
'--publish': ['80:80'],
'--rm': None,
'--name': None,
})
|
py | 1a324f9c7ced6313cc4ee633e19995da99d16c9e | from flask import Flask
from flask import request
from flask import render_template
app = Flask(__name__)
@app.route('/')
def hello():
return render_template('index.html')
@app.route("/post", methods=['POST'])
def post():
#1번을 해보세요!
value = request.form['input']
msg = "%s 님 환영합니다." % value
return msg
if __name__ == '__main__':
app.run() |
py | 1a32505991d8803c4431dff0450269d0d6f22791 | def lambda_handler(event, context):
"""Sample pure Lambda function
"""
return "Hello from a Lambda Image!" |
py | 1a325179a4d5ee5b43d349ba99f22b9d3040211a | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# Created By Rodrigo Wilkens
# Last update 02/April/2022
# version ='1.0'
# ---------------------------------------------------------------------------
import openreview
import os
import yaml
from tqdm import tqdm
import sys
from util import *
username = sys.argv[1]
password = sys.argv[2]
try:
client_acl = openreview.Client(baseurl='https://api.openreview.net', username=username, password=password)
except:
print("OpenReview connection refused")
exit()
download_all = eval(sys.argv[4]) if len(sys.argv)>4 else True
download_pdf = eval(sys.argv[5]) if len(sys.argv)>5 else True
if not download_all or not download_pdf:
print("The output of this run cannot be used at ACLPUB2")
acl_name = 'aclweb.org/ACL/2022/Conference' if len(sys.argv)<=3 else sys.argv[3]
attachment_types = {"software":"software", "Data":"note"}
papers_folder = "papers"
attachments_folder = "attachments"
if not os.path.exists(papers_folder):
os.mkdir(papers_folder)
if not os.path.exists(attachments_folder):
os.mkdir(attachments_folder)
submissions=list(openreview.tools.iterget_notes(client_acl, invitation=acl_name+'/-/Blind_Submission', details='original'))
decision_by_forum={d.forum: d for d in list(openreview.tools.iterget_notes(client_acl, invitation=acl_name+'/Paper.*/-/Decision')) if 'accept' in d.content['decision'].lower()}
papers = []
small_log = open("papers.log","w")
for submission in tqdm(submissions):
if submission.id not in decision_by_forum:
continue
######################
#### main
authorsids = submission.details['original']['content']['authorids']
authors = []
for authorsid in authorsids:
author, error = get_user(authorsid, client_acl)
if error:
small_log.write("Error at " + authorsid + " from (#" + str(submission.number) + "; openreview ID: " + submission.id + ") " + submission.content["title"] + "\n")
if author:
authors.append(author)
assert len(authors)>0
paper = {
"id": submission.number,# len(papers)+1,
"title":submission.content["title"],
"authors":authors,
"abstract":submission.content["abstract"] if "abstract" in submission.content else "",
"file": str(submission.number) + ".pdf", #str(len(papers)+1) + ".pdf",
"pdf_file":submission.content["pdf"].split("/")[-1],
'decision':decision_by_forum[submission.id].content['decision'],
"openreview_id":submission.id
}
######################
#### attributes
submitted_area = submission.content["track"] if "track" in submission.content else None
if 'paper_type' in submission.content:
paper_type = " ".join(submission.content['paper_type'].split()[:2]).lower()
else:
paper_type = "N/A"
presentation_type = "N/A"
paper["attributes"] = {
"submitted_area":submitted_area,
"paper_type":paper_type,
"presentation_type":presentation_type,
}
######################
#### attachments
attachments = []
for att_type in attachment_types:
if att_type in submission.content and submission.content[att_type]:
attachments.append({"type": attachment_types[att_type],
"file": "attachments/" + str(paper["id"]) + "_" + str(submission.content[att_type].split(".")[-1]),
"open_review_id": str(submission.content[att_type])
} )
if download_all:
file_tye = submission.content["software"].split(".")[-1]
f = client_acl.get_attachment(submission.id, att_type)
with open(os.path.join(attachments_folder, str(paper["id"]) + "." + file_tye),'wb') as op: op.write(f)
if download_pdf:
f = client_acl.get_pdf(id=paper['openreview_id'])
with open(os.path.join(papers_folder, str(paper["id"]) + ".pdf"),'wb') as op: op.write(f)
if len(attachments)>0:
paper["attachments"] = attachments
papers.append(paper)
# if len(papers)>10:
# print(len(papers))
# break
small_log.close()
def get_paper_key(p):
return p["id"]
papers.sort(key=get_paper_key)
yaml.dump(papers, open('papers.yml', 'w'))
|
py | 1a32521161a04d02e3d54983a9c52ad255628b7f | import argparse
import datetime
import json
import numpy as np
import pysparkling
import trajnetplusplustools.show
def read_log(path):
sc = pysparkling.Context()
return (sc
.textFile(path)
.filter(lambda line: line.startswith(('{', 'json:')))
.map(lambda line: json.loads(line.strip('json:')))
.groupBy(lambda data: data.get('type'))
.collectAsMap())
def plots(log_files, output_prefix, labels=None):
if not labels:
labels = log_files
datas = [read_log(log_file) for log_file in log_files]
with trajnetplusplustools.show.canvas(output_prefix + 'time.png') as ax:
for data, label in zip(datas, labels):
if 'train' in data:
x = np.array([row.get('epoch') + row.get('batch') / row.get('n_batches')
for row in data['train']])
y = [datetime.datetime.strptime(row.get('asctime')[:-4], '%Y-%m-%d %H:%M:%S')
for row in data['train']]
y = [(yi - y[0]).total_seconds() / 3600.0 for yi in y]
ax.plot(x, y, label=label)
ax.set_xlabel('epoch')
ax.set_ylabel('time [h]')
ax.legend()
with trajnetplusplustools.show.canvas(output_prefix + 'epoch-time.png') as ax:
for data, label in zip(datas, labels):
if 'train-epoch' in data:
x = np.array([row.get('epoch') for row in data['train-epoch']])
y = [datetime.datetime.strptime(row.get('asctime')[:-4], '%Y-%m-%d %H:%M:%S')
for row in data['train-epoch']]
y = [(yi - prev_yi).total_seconds() / 60.0
for prev_yi, yi in zip(y[:-1], y[1:])]
ax.plot(x[1:], y, label=label)
ax.set_xlabel('epoch')
ax.set_ylabel('epoch-time [min]')
ax.legend()
with trajnetplusplustools.show.canvas(output_prefix + 'lr.png') as ax:
for data, label in zip(datas, labels):
if 'train' in data:
x = [row.get('epoch') for row in data['train']]
y = [row.get('lr') for row in data['train']]
ax.plot(x, y, label=label)
ax.set_xlabel('epoch')
ax.set_ylabel('learning rate')
ax.set_yscale('log', nonpositive='clip')
ax.legend()
with trajnetplusplustools.show.canvas(output_prefix + 'val.png') as ax:
for data, label in zip(datas, labels):
if 'val' in data:
x = [row.get('epoch') for row in data['val']]
y = [row.get('accuracy', row.get('prec@1')) for row in data['val']]
ax.plot(x, y, label=label)
ax.set_xlabel('epoch')
ax.set_ylabel('accuracy')
ax.legend()
with trajnetplusplustools.show.canvas(output_prefix + 'epoch-loss.png') as ax:
for data, label in zip(datas, labels):
val_color = None
if 'val-epoch' in data:
x = [row.get('epoch') for row in data['val-epoch'] if row.get('epoch') < 100]
y = [row.get('loss') for row in data['val-epoch'] if row.get('epoch') < 100]
val_line, = ax.plot(x, y, label=label)
val_color = val_line.get_color()
if 'train-epoch' in data:
x = [row.get('epoch') for row in data['train-epoch'] if row.get('epoch') < 100]
y = [row.get('loss') for row in data['train-epoch'] if row.get('epoch') < 100]
ax.plot(x, y, color=val_color, linestyle='dotted')
ax.set_xlabel('epoch')
ax.set_ylabel('start-loss')
# ax.set_ylim(0.05, 0.3)
# if min(y) > -0.1:
# ax.set_yscale('log', nonpositive='clip')
ax.legend(loc=1)
with trajnetplusplustools.show.canvas(output_prefix + 'seq-loss.png') as ax:
for data, label in zip(datas, labels):
val_color = None
if 'val-epoch' in data:
x = [row.get('epoch') for row in data['val-epoch'] if row.get('epoch') < 100]
y = [row.get('test_loss') for row in data['val-epoch'] if row.get('epoch') < 100]
val_line, = ax.plot(x, y, label=label)
val_color = val_line.get_color()
if 'train-epoch' in data:
x = [row.get('epoch') for row in data['train-epoch'] if row.get('epoch') < 100]
y = [row.get('seq_loss') for row in data['train-epoch'] if row.get('epoch') < 100]
ax.plot(x, y, color=val_color, linestyle='dotted')
ax.set_xlabel('epoch')
ax.set_ylabel('seq-loss')
# ax.set_ylim(0.0, 5.0)
# if min(y) > -0.1:
# ax.set_yscale('log', nonpositive='clip')
ax.legend(loc=1)
# # with trajnetplusplustools.show.canvas(output_prefix + 'preprocess_time.png') as ax:
# # for data, label in zip(datas, labels):
# # if 'train' in data:
# # x = np.array([row.get('epoch') + row.get('batch') / row.get('n_batches')
# # for row in data['train']])
# # y = np.array([row.get('data_time') / row.get('time') * 100.0
# # for row in data['train']])
# # stride = int(len(x) / (x[-1] - x[0]) / 3.0) # 3 per epoch
# # if x[-1] - x[0] > 1.0 and stride > 5:
# # x_binned = np.array([x[i] for i in range(0, len(x), stride)][:-1])
# # y_binned = np.stack([y[i:i + stride] for i in range(0, len(x), stride)][:-1])
# # y_mean = np.mean(y_binned, axis=1)
# # y_min = np.min(y_binned, axis=1)
# # y_max = np.max(y_binned, axis=1)
# # ax.fill_between(x_binned, y_min, y_max, alpha=0.2)
# # ax.plot(x_binned, y_mean, label=label)
# # else:
# # ax.plot(x, y, label=label)
# # ax.set_xlabel('epoch')
# # ax.set_ylabel('data preprocessing time [%]')
# # ax.set_ylim(0, 100)
# # ax.legend()
with trajnetplusplustools.show.canvas(output_prefix + 'train.png') as ax:
for data, label in zip(datas, labels):
if 'train' in data:
x = np.array([row.get('epoch') + row.get('batch') / row.get('n_batches')
for row in data['train']])
y = np.array([row.get('loss')
for row in data['train']])
stride = int(len(x) / (x[-1] - x[0]) / 3.0) # 3 per epoch
if x[-1] - x[0] > 1.0 and stride > 5:
x_binned = np.array([x[i] for i in range(0, len(x), stride)][:-1])
y_binned = np.stack([y[i:i + stride] for i in range(0, len(x), stride)][:-1])
y_mean = np.mean(y_binned, axis=1)
y_min = np.min(y_binned, axis=1)
y_max = np.max(y_binned, axis=1)
ax.fill_between(x_binned, y_min, y_max, alpha=0.2)
ax.plot(x_binned, y_mean, label=label)
else:
y_mean = [0, 0]
ax.plot(x, y, label=label)
ax.set_xlabel('epoch')
ax.set_ylabel('training loss')
# ax.set_ylim(-5, 6)
# if min(y_mean) > -0.1:
# ax.set_yscale('log', nonpositive='clip')
ax.legend()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('log_file', nargs='+',
help='path to log file')
parser.add_argument('--label', nargs='+',
help='labels in the same order as files')
parser.add_argument('-o', '--output', default=None,
help='output prefix (default is log_file + .)')
args = parser.parse_args()
if args.output is None:
args.output = args.log_file[-1] + '.'
plots(args.log_file, args.output, args.label)
if __name__ == '__main__':
main()
|
py | 1a3252e9c69ae4a090beac5e3f571f77b8417780 | from dash import html, dcc
from trading_tool.db import create_connection
from trading_tool.client import TEST_CLIENT
from views.header import make_header
from views.backtesting import make_backtesting_container_1, make_backtesting_container_2
from views.profile import make_profile_description
from views.footer import make_footer
conn = create_connection("trading_tool.db")
def make_layout():
overview_tab = dcc.Tab(
label="Overview",
value="overview-tab",
className="my-tab",
selected_className="my-tab-selected",
children=[make_profile_description(TEST_CLIENT)],
)
backtesting_tab = dcc.Tab(
label="Backtesting",
value="backtesting-tab",
className="my-tab",
selected_className="my-tab-selected",
children=[make_backtesting_container_1(), make_backtesting_container_2()],
)
# body
layout = html.Div(
[
# header
make_header(),
# horizontal line
html.Hr(),
# tabs
dcc.Tabs(
value="overview-tab",
className="my-tab-container",
children=[overview_tab, backtesting_tab],
),
# footer
make_footer(),
],
id="layout",
)
layout = dcc.Loading(children=layout)
return layout
|
py | 1a32531901b60e5c6b9eb9f5a291824fd28a646b | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
class CBScoreMixin(object):
@property
def cqe_performance_score(self):
"""
Compute perf score from corresponding overheads using CQE formula.
:return: performance score, [0.0, 1.0]
"""
# Computed according to the CQE scoring manual
# https://cgc.darpa.mil/CQE_Scoring.pdf
perf_score = None
perf_factor = 1 + max(0.25 * self.size_overhead,
self.memory_overhead,
self.time_overhead)
if 0 <= perf_factor < 1.10:
perf_score = 1
elif 1.10 <= perf_factor < 1.62:
perf_score = (perf_factor - 0.1) ** -4
elif 1.62 <= perf_factor < 2:
perf_score = (-1 * 0.493 * perf_factor) + 0.986
else:
perf_score = 0
return perf_score
@property
def cqe_functionality_score(self):
"""
Compute functionality score from functionality factor using CQE formula.
:return: functionality score [0.0, 1.0]
"""
func_factor = self.success
func_score = 0.0
if func_factor == 1:
func_score = 1.0
elif 0.40 <= func_factor < 1:
func_score = (2 - func_factor) ** (-4)
elif 0 < func_factor < 0.40:
func_score = 0.381 * func_factor
else:
func_score = 0.0
return float(func_score)
@property
def availability(self):
return min(self.cqe_performance_score, self.cqe_functionality_score)
@property
def cb_score(self):
return self.availability * self.security
|
py | 1a325354f907fbd555f1737aae45665c954865f3 | # Code listing #27
# Note: This contains a second fix only for the find_optimal_route_to_my_office_from_home function
# Since this is a fixed module, and its second version, we will call it metrictest_fix2.py.
import random
def find_optimal_route_to_my_office_from_home(start_time,
expected_time,
favorite_route='SBS1K',
favorite_option='bus'):
""" Find optimal route for me to go from home to office.
First two inputs should be datetime instances.
"""
# Convert to minutes
tdiff = (expected_time - start_time).total_seconds()/60.0
options = {range(0, 30): 'car',
range(30, 45): ('car', 'metro'),
range(45, 60): ('bus:335E', 'bus:connector')}
if tdiff < 80:
# Pick the range it falls into
for drange in options:
if tdiff in drange:
return drange[tdiff]
# Might as well go by normal bus
return random.choice(('bus:330', 'bus:331', ':'.join((favorite_option,
favorite_route))))
|
py | 1a325424908b56d782ac06dd9f634276ce5ec186 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""ActiveMaster definition."""
from config_bootstrap import Master
class V8FYI(Master.Master3):
base_app_url = 'https://v8-status.appspot.com'
tree_status_url = base_app_url + '/status'
store_revisions_url = base_app_url + '/revisions'
last_good_url = base_app_url + '/lkgr'
project_name = 'V8 FYI'
master_port_id = 12
project_url = 'http://v8.googlecode.com'
buildbot_url = 'http://build.chromium.org/p/client.v8.fyi/'
service_account_file = 'service-account-v8.json'
pubsub_service_account_file = 'service-account-luci-milo.json'
pubsub_topic = 'projects/luci-milo/topics/public-buildbot'
name = 'client.v8.fyi'
|
py | 1a3254e7aa988c1711a068e05404f07edc0277db | # coding=utf-8
# Copyright 2018 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs the tf_cnn_benchmarks tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import unittest
from absl import app
from absl import flags as absl_flags
from cnn_quantization.tf_cnn_benchmarks import all_reduce_benchmark_test
from cnn_quantization.tf_cnn_benchmarks import allreduce_test
from cnn_quantization.tf_cnn_benchmarks import benchmark_cnn_distributed_test
from cnn_quantization.tf_cnn_benchmarks import benchmark_cnn_test
from cnn_quantization.tf_cnn_benchmarks import cnn_util_test
from cnn_quantization.tf_cnn_benchmarks import variable_mgr_util_test
from cnn_quantization.tf_cnn_benchmarks.models import nasnet_test
# Ideally, we wouldn't need this option, and run both distributed tests and non-
# distributed tests. But, TensorFlow allocates all the GPU memory by default, so
# the non-distributed tests allocate all the GPU memory. The distributed tests
# spawn processes that run TensorFlow, and cannot run if all the GPU memory is
# already allocated. If a non-distributed test is run, then a distributed test
# is run in the same process, the distributed test will fail because there is no
# more GPU memory for the spawned processes to allocate.
absl_flags.DEFINE_boolean('run_distributed_tests', False,
'If True, run the distributed tests. If False, the'
'non-distributed tests.')
absl_flags.DEFINE_boolean('full_tests', False,
'If True, all distributed or non-distributed tests '
'are run, which can take hours. If False, only a '
'subset of tests will be run. This subset runs much '
'faster and tests almost all the functionality as '
'the full set of tests, so it is recommended to keep '
'this option set to False.')
FLAGS = absl_flags.FLAGS
def main(_):
loader = unittest.defaultTestLoader
if FLAGS.full_tests:
suite = unittest.TestSuite([
loader.loadTestsFromModule(allreduce_test),
loader.loadTestsFromModule(cnn_util_test),
loader.loadTestsFromModule(variable_mgr_util_test),
loader.loadTestsFromModule(benchmark_cnn_test),
loader.loadTestsFromModule(all_reduce_benchmark_test),
loader.loadTestsFromModule(nasnet_test),
])
dist_suite = unittest.TestSuite([
loader.loadTestsFromModule(benchmark_cnn_distributed_test),
])
else:
suite = unittest.TestSuite([
loader.loadTestsFromModule(allreduce_test),
loader.loadTestsFromModule(cnn_util_test),
loader.loadTestsFromModule(all_reduce_benchmark_test),
loader.loadTestsFromModule(variable_mgr_util_test),
loader.loadTestsFromTestCase(benchmark_cnn_test.TestAlexnetModel),
loader.loadTestsFromTestCase(benchmark_cnn_test.TfCnnBenchmarksTest),
loader.loadTestsFromTestCase(benchmark_cnn_test.VariableUpdateTest),
loader.loadTestsFromTestCase(
benchmark_cnn_test.VariableMgrLocalReplicatedTest),
])
dist_suite = unittest.TestSuite([
loader.loadTestsFromNames([
'benchmark_cnn_distributed_test.DistributedVariableUpdateTest'
'.testVarUpdateDefault',
'benchmark_cnn_distributed_test.TfCnnBenchmarksDistributedTest'
'.testParameterServer',
]),
])
if FLAGS.run_distributed_tests:
print('Running distributed tests')
result = unittest.TextTestRunner(verbosity=2).run(dist_suite)
else:
print('Running non-distributed tests')
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
if __name__ == '__main__':
app.run(main)
|
py | 1a3254f3adfcc8a8c6c469023a93b184f23c8539 | import os
import sys
import codecs
import pkg_resources
from setuptools import setup, find_packages
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), 'r') as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
# __version__ = "1.x.x"
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
def parse_requirements(filename):
""" load requirements from a pip requirements file. (replacing from pip.req import parse_requirements)"""
lineiter = (line.strip() for line in open(filename))
reqs = [line for line in lineiter if line and not line.startswith("#")]
if sys.platform == "win32":
reqs.append('pywin32')
if sys.version_info[:2] <= (3, 6) and \
"opencv-contrib-python" not in [d.project_name for d in pkg_resources.working_set]:
# If py<=3.6 and opencv-contrib-python has not been installed, install version==3.2.0.7
reqs.remove("opencv-contrib-python")
reqs.append("opencv-contrib-python==3.2.0.7")
if sys.version_info.major == 2:
# facebook-wda only supports py3
reqs.remove("facebook-wda>=1.3.3")
return reqs
setup(
name='airtest',
version=get_version("airtest/utils/version.py"),
author='Netease Games',
author_email='[email protected]',
description='UI Test Automation Framework for Games and Apps on Android/iOS/Windows/Linux',
long_description='UI Test Automation Framework for Games and Apps on Android/iOS/Windows, present by NetEase Games',
url='https://github.com/AirtestProject/Airtest',
license='Apache License 2.0',
keywords=['automation', 'automated-test', 'game', 'android', 'ios', 'windows', 'linux'],
packages=find_packages(exclude=['cover', 'playground', 'tests', 'dist']),
package_data={
'android_deps': ["*.apk", "airtest/core/android/static"],
'html_statics': ["airtest/report"],
'ios_deps': ["airtest/core/ios/iproxy"],
},
include_package_data=True,
install_requires=parse_requirements('requirements.txt'),
extras_require={
'tests': [
'nose',
],
'docs': [
'sphinx',
'recommonmark',
'sphinx_rtd_theme',
'mock',
]},
entry_points="""
[console_scripts]
airtest = airtest.cli.__main__:main
""",
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
|
py | 1a325510cce8366336fd668fd594e13f23ca23ad | """
WSGI config for divine_flower_29602 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'divine_flower_29602.settings')
application = get_wsgi_application()
|
py | 1a325533d6517b221e25efc66e133e52954e434e | from rest_framework.permissions import BasePermission, SAFE_METHODS
class IsNotAuthenticated(BasePermission):
def has_permission(self, request, view):
if request.method in SAFE_METHODS:
return True
return request.user.is_anonymous
class UserObjectOwner(BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in SAFE_METHODS:
return True
return bool(request.user.id == obj.id)
class IsJobSeeker(BasePermission):
def has_permission(self, request, view):
if request.method in SAFE_METHODS:
return True
return bool(request.user.is_authenticated and request.user.is_job_seeker)
def has_object_permission(self, request, view, obj):
if request.method in SAFE_METHODS:
return True
return bool(request.user.is_authenticated and request.user.is_job_seeker and request.user == obj.user)
class IsEmployer(BasePermission):
def has_permission(self, request, view):
return bool(request.user.is_authenticated and request.user.is_employer)
def has_object_permission(self, request, view, obj):
if request.method in SAFE_METHODS:
return True
return bool(request.user.is_authenticated and request.user.is_employer and request.user == obj.user)
|
py | 1a32560ab223832b11312f9a58849e6ceda7ada3 | # 354, https://leetcode.com/problems/russian-doll-envelopes/description/
# Sol-1 normal dp with time complexity: O(n^2), TLE
# no optimization
class Solution(object):
def maxEnvelopes(self, envelopes):
"""
:type envelopes: List[List[int]]
:rtype: int
"""
if not envelopes or len(envelopes) == 0:
return 0
# dp with time complexity: O(n^2)
# sort envelopes
envelopes.sort(key=lambda x:(x[0], x[1]))
dp = [1 for _ in range(len(envelopes))]
for i in range(1, len(envelopes)):
for j in range(i):
if envelopes[i][0] > envelopes[j][0] and envelopes[i][1] > envelopes[j][1]:
dp[i] = max(dp[i], dp[j] + 1)
return max(dp)
# dp with optimization based on 300) longest increasing sub-sequence
# time complexity: O(nlgn), Accepted, by huijiang
# good explanation: https://leetcode.com/problems/russian-doll-envelopes/discuss/82751/
# O(Nlog(N))-python-solution-explained
# e =[[5,4],[6,4],[6,7],[2,3]]
# e.sort(key=lambda x: (x[0], -x[1]))
# [[2, 3], [5, 4], [6, 7], [6, 4]]
# Since the width is increasing, we only need to consider height.
# [3, 4] cannot contains [3, 3], so we need to put [3, 4] before [3, 3]
# when sorting otherwise it will be counted as an increasing number if the order is [3, 3], [3, 4]
class Solution(object):
def maxEnvelopes(self, envelopes):
"""
:type envelopes: List[List[int]]
:rtype: int
"""
envelopes.sort(key=lambda x:(x[0],-x[1]))
res = [0] * len(envelopes)
size = 0
for envelop in envelopes:
i, j = 0, size
while i != j:
m = (i + j) // 2
if envelop[1] > res[m]:
i = m + 1
else:
j = m
res[i] = envelop[1]
size = max(size, i + 1)
return size
s = Solution()
print(s.maxEnvelopes([[5,4],[6,4],[6,7],[2,3]]))
|
py | 1a3256eb82bea129dc6b964b59bcf1d8426db0ab | import os
import board
import displayio
from adafruit_display_text.label import Label
from adafruit_bitmap_font import bitmap_font
# the current working directory (where this file is)
cwd = ("/"+__file__).rsplit('/', 1)[0]
fonts = [file for file in os.listdir(cwd+"/fonts/")
if (file.endswith(".bdf") and not file.startswith("._"))]
for i, filename in enumerate(fonts):
fonts[i] = cwd+"/fonts/"+filename
print(fonts)
##########################################################################
THE_FONT = fonts[0]
DISPLAY_STRING = "A multi-line-\nexample of\n font bounding!"
WRAP_CHARS = 40
##########################################################################
# Make the display context
splash = displayio.Group()
board.DISPLAY.show(splash)
# Make a background color fill
color_bitmap = displayio.Bitmap(320, 240, 1)
color_palette = displayio.Palette(1)
color_palette[0] = 0xFFFFFF
bg_sprite = displayio.TileGrid(color_bitmap,
pixel_shader=color_palette,
position=(0, 0))
splash.append(bg_sprite)
# Load the font
font = bitmap_font.load_font(THE_FONT)
font.load_glyphs(DISPLAY_STRING.encode('utf-8'))
print(DISPLAY_STRING)
text = Label(font, text=DISPLAY_STRING)
text.x = 20
text.y = 100
text.color = 0x0
# Make a background color fill
dims = text.bounding_box
print(dims)
textbg_bitmap = displayio.Bitmap(dims[2], dims[3], 1)
textbg_palette = displayio.Palette(1)
textbg_palette[0] = 0xFF0000
textbg_sprite = displayio.TileGrid(textbg_bitmap,
pixel_shader=textbg_palette,
position=(text.x+dims[0], text.y+dims[1]))
splash.append(textbg_sprite)
splash.append(text)
board.DISPLAY.refresh_soon()
board.DISPLAY.wait_for_frame()
while True:
pass
|
py | 1a3257954b0b0213cb1b9af59e0217c3f9ce9634 | """Ensures that the config for the package is handled correctly."""
import io
import pytest
import unittest.mock as mock
fake_io_file1 = io.StringIO('{"DEFAULT_DATABASE_ROOT": "./molscore_data"}')
fake_io_file2 = io.StringIO('')
fake_io_file2.close = lambda: None
def test_local_variables():
import molscore.config
"""ensure the config variables are initialized"""
assert type(molscore.config._initial_config) == dict,\
"Config should be loaded as dict"
assert type(molscore.config.VALID_CONFIG_PARAMETERS) == list,\
"Config param options should be list of str."
return
def test__check_config():
import molscore.config
"""Another layer of protection for ensuring configs are handled."""
# good dict, valid config params
# nothing should happen, eg nothing raised
good = {'DEFAULT_DATABASE_ROOT': './'}
molscore.config._check_config(good)
# bad dict, incorrect value
bad = {'DEFAULT_DATABASE_ROOT': 5}
with pytest.raises(TypeError):
molscore.config._check_config(bad)
# bad dict, folder does not exist
bad = {'DEFAULT_DATABASE_ROOT': './not_a_real_folder_hopefully/my_data'}
with pytest.raises(ValueError):
molscore.config._check_config(bad)
return
@mock.patch('molscore._set_globals')
@mock.patch('molscore.config.open', side_effect=[fake_io_file1, fake_io_file2])
def test_update(mocked_open, mocked_global_setter, working_test_dir):
import molscore.config
"""Update config without actually doing so."""
molscore.config.update('DEFAULT_DATABASE_ROOT',
f'{working_test_dir}/new_data')
assert mocked_global_setter.called,\
"""Did not update global variables"""
assert fake_io_file2.getvalue() == '{"DEFAULT_DATABASE_ROOT": "'+str(working_test_dir)+'/new_data"}',\
"Did not save to file the new variable"
return
|
py | 1a3257e20830dc121016367e7865e30011b7fb66 | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2017 Svenzva Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of University of Arizona nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Author: Maxwell Svetlik. Code snippets used and modified where indicated.
"""
import rospy
import rospkg
import actionlib
import yaml
import rospkg
from collections import defaultdict, deque
from threading import Thread
from mx_driver import dynamixel_io
from mx_driver.dynamixel_const import *
from svenzva_drivers.joint_trajectory_action_controller import *
from svenzva_drivers.revel_cartesian_controller import *
from svenzva_drivers.revel_arm_services import *
from svenzva_drivers.revel_gripper_server import *
from svenzva_drivers.svenzva_compliance_controller import *
from std_msgs.msg import Bool
from trajectory_msgs.msg import JointTrajectoryPoint
from control_msgs.msg import JointTrajectoryAction, JointTrajectoryGoal, FollowJointTrajectoryAction, FollowJointTrajectoryGoal
from svenzva_msgs.msg import MotorState, MotorStateList, SvenzvaJointAction, SvenzvaJointResult
class SvenzvaDriver:
#adapted from controller_manager.py [https://github.com/SvenzvaRobotics/mx_dynamixel], 3/17/17
def __init__(self,
port_name='/dev/ttyUSB0',
port_namespace='revel',
baud_rate='115200',
min_motor_id=1,
max_motor_id=7,
update_rate=10,
diagnostics_rate=0,
readback_echo=False):
rospy.init_node('svenzva_driver', anonymous=False)
self.port_name = port_name
self.port_namespace = port_namespace
self.baud_rate = baud_rate
self.min_motor_id = min_motor_id
self.max_motor_id = max_motor_id
self.update_rate = rospy.get_param('~update_rate', update_rate)
self.diagnostics_rate = diagnostics_rate
self.readback_echo = readback_echo
self.actual_rate = update_rate
self.error_counts = {'non_fatal': 0, 'checksum': 0, 'dropped': 0}
self.current_state = MotorStateList()
self.num_ping_retries = 5
self.traj_client = None
self.motor_states_pub = rospy.Publisher('%s/motor_states' % self.port_namespace, MotorStateList, queue_size=1)
rospy.on_shutdown(self.disconnect)
self.connect(port_name, baud_rate, False)
self.__find_motors()
self.initialze_motor_states()
self.start_modules()
#adapted from serial_proxy.py [https://github.com/SvenzvaRobotics/mx_dynamixel], 3/17/17
def connect(self, port_name, baud_rate, readback_echo):
try:
self.dxl_io = dynamixel_io.DynamixelIO(port_name, baud_rate, readback_echo)
except dynamixel_io.SerialOpenError, e:
rospy.logfatal(e.message)
sys.exit(1)
if self.update_rate > 0: Thread(target=self.__update_motor_states).start()
def disconnect(self):
return
"""
Check if all motors are reachable on the serial port
"""
#adapted from serial_proxy.py [https://github.com/SvenzvaRobotics/mx_dynamixel], 3/17/17
def __find_motors(self):
rospy.loginfo('%s: Pinging motor IDs %d through %d...' % (self.port_namespace, self.min_motor_id, self. max_motor_id))
self.motors = []
self.motor_static_info = {}
for motor_id in range(self.min_motor_id, self.max_motor_id + 1):
for trial in range(self.num_ping_retries):
try:
result = self.dxl_io.ping(motor_id)
except Exception as ex:
rospy.logerr('Exception thrown while pinging motor %d - %s' % (motor_id, ex))
continue
if result:
self.motors.append(motor_id)
break
if not self.motors:
rospy.logfatal('%s: No motors found.' % self.port_namespace)
self.dxl_io.close()
sys.exit(1)
counts = defaultdict(int)
status_str = '%s: Found %d motors - ' % (self.port_namespace, len(self.motors))
rospy.loginfo('%s, actuator initialization complete.' % status_str[:-2])
#adapted from serial_proxy.py [https://github.com/SvenzvaRobotics/mx_dynamixel], 3/17/17
def __update_motor_states(self):
num_events = 50
debug_polling_rate = False
rates = deque([float(self.update_rate)]*num_events, maxlen=num_events)
last_time = rospy.Time.now()
gr = [4,7,7,3,4,1,1]
rate = rospy.Rate(self.update_rate)
id_list = range(self.min_motor_id, self.max_motor_id+1)
rad_per_tick = 6.2831853 / 4096.0
conseq_drops = 0
while not rospy.is_shutdown():
motor_states = []
try:
status_ar = self.dxl_io.get_sync_feedback(id_list)
conseq_drops = 0
for index, state in enumerate(status_ar):
if state:
#convert to radians, and resolve multiplicative of gear ratio
state['goal'] = self.raw_to_rad(state['goal'] / gr[index])
state['position'] = self.raw_to_rad(state['position'] / gr[index])
#linear model: -9.539325804e-18 + 1.0837745x
state['load'] = (state['load'] )
state['speed'] = self.spd_raw_to_rad(state['speed'] / gr[index])
motor_states.append(MotorState(**state))
if dynamixel_io.exception: raise dynamixel_io.exception
self.error_counts['dropped'] = 0
except dynamixel_io.FatalErrorCodeError, fece:
rospy.logerr(fece)
except dynamixel_io.NonfatalErrorCodeError, nfece:
self.error_counts['non_fatal'] += 1
rospy.logdebug(nfece)
except dynamixel_io.ChecksumError, cse:
self.error_counts['checksum'] += 1
rospy.logdebug(cse)
except dynamixel_io.DroppedPacketError, dpe:
self.error_counts['dropped'] += 1
conseq_drops += 1
rospy.loginfo(dpe.message)
except OSError, ose:
if ose.errno != errno.EAGAIN:
rospy.logfatal(errno.errorcode[ose.errno])
rospy.signal_shutdown(errno.errorcode[ose.errno])
#DroppedPackets can happen due to congestion, or due to loss of connectivity.
#The latter will cause 100% drop rate
if self.error_counts['dropped'] > 10:
rospy.logerr("Lost connectivitity to servo motors.")
rospy.logerr("Shutting down driver.")
rospy.signal_shutdown("Arm connection lost.")
if motor_states:
msl = MotorStateList()
msl.motor_states = motor_states
self.motor_states_pub.publish(msl)
self.current_state = msl
# calculate actual update rate
if debug_polling_rate:
current_time = rospy.Time.now()
rates.append(1.0 / (current_time - last_time).to_sec())
self.actual_rate = round(sum(rates)/num_events, 2)
last_time = current_time
rospy.loginfo("Actual poling rate: %f", self.actual_rate)
rate.sleep()
"""
This enables velocity control mode.
Necessary for cartesian movement for remote controls
"""
def velocity_mode(self):
tup_list_dis = tuple(((1,0),(2,0),(3,0),(4,0),(5,0),(6,0),(7,0)))
self.dxl_io.sync_set_torque_enabled(tup_list_dis)
tup_list_op = tuple(((1,1),(2,1),(3,1),(4,1),(5,1),(6,1),(7,0)))
self.dxl_io.sync_set_operation_mode(tup_list_op)
tup_list_en = tuple(((1,1),(2,1),(3,1),(4,1),(5,1),(6,1),(7,1)))
self.dxl_io.sync_set_torque_enabled(tup_list_en)
"""
This enables the teaching mode of the Revel. Teaching mode senses outside forces and assists movement in the direction
of the felt force.
"""
def teaching_mode(self):
tup_list_dis = tuple(((1,0),(2,0),(3,0),(4,0),(5,0),(6,0),(7,0)))
self.dxl_io.sync_set_torque_enabled(tup_list_dis)
tup_list_op = tuple(((1,0),(2,0),(3,0),(4,0),(5,0),(6,0),(7,0)))
self.dxl_io.sync_set_operation_mode(tup_list_op)
tup_list_en = tuple(((1,1),(2,1),(3,1),(4,1),(5,1),(6,1),(7,1)))
self.dxl_io.sync_set_torque_enabled(tup_list_en)
self.compliance_controller = SvenzvaComplianceController(self.port_namespace, self.dxl_io, True)
rospy.sleep(0.1)
Thread(target=self.compliance_controller.start).start()
"""
Sets motor mode based on parameter file
"""
def set_user_defined_mode(self, params):
tup_list_dis = tuple(((1,0),(2,0),(3,0),(4,0),(5,0),(6,0),(7,0)))
self.dxl_io.sync_set_torque_enabled(tup_list_dis)
tup_list_op = []
for i in range(self.min_motor_id, self.max_motor_id + 1):
tup_list_op.append((i, params[i]["mode"]))
self.dxl_io.sync_set_operation_mode(tup_list_op)
tup_list_en = tuple(((1,1),(2,1),(3,1),(4,1),(5,1),(6,1),(7,1)))
self.dxl_io.sync_set_torque_enabled(tup_list_en)
def start_modules(self):
open_close_gripper = rospy.get_param('~cycle_gripper_on_start', False)
jtac = JointTrajectoryActionController(self.port_namespace, self.dxl_io, self.current_state)
rospy.sleep(1.0)
jtac.start()
self.traj_client = actionlib.SimpleActionClient('/revel/follow_joint_trajectory', FollowJointTrajectoryAction)
self.traj_client.wait_for_server()
self.fkine_action = actionlib.SimpleActionServer("svenzva_joint_action", SvenzvaJointAction, self.fkine_action, auto_start = False)
self.fkine_action.start()
arm_utils = RevelArmServices(self.port_namespace, self.dxl_io, self.max_motor_id)
gripper_server = RevelGripperActionServer(self.port_namespace, self.dxl_io)
gripper_server.start()
mode = rospy.get_param('~mode', "user_defined")
if mode == 'velocity':
cart_server = RevelCartesianController(self.port_namespace, self.dxl_io)
rospy.loginfo("Started Cartesian controller")
compliance_demonstration = False
if compliance_demonstration:
rospy.loginfo("Starting with experimental dynamic compliance.")
self.compliance_controller = SvenzvaComplianceController(self.port_namespace, self.dxl_io,False)
rospy.sleep(0.1)
Thread(target=self.compliance_controller.start).start()
#Thread(target=self.compliance_controller.update_state).start()
"""
Initialize internal motor parameters that are reset when powered down.
Enables torque mode.
Uses settings in ../config/control_params.yaml
"""
#NOTE: Due to dynamixel limitations, initial encoder values must be [-4096, 4096]
#otherwise, the motor_states will be inaccurate
def initialze_motor_states(self):
rospack = rospkg.RosPack()
path = rospack.get_path('svenzva_drivers')
config_file = rospy.get_param('~param_file', 'control_params.yaml')
params = ''
with open( path+"/config/"+config_file, 'r') as stream:
try:
params = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
rospy.logerr("Unable to open control_params.yaml. Exiting driver.")
exit()
mode = rospy.get_param('~mode', "user_defined")
teaching_mode = mode == "gravity"
vel_mode = mode == "velocity"
if teaching_mode:
self.teaching_mode()
return
elif vel_mode:
self.velocity_mode()
else:
#for nearly atomic context switch, use sync functions
self.set_user_defined_mode(params)
for i in range(self.min_motor_id, self.max_motor_id + 1):
self.dxl_io.set_position_p_gain(i, params[i]['p'])
self.dxl_io.set_position_i_gain(i, params[i]['i'])
self.dxl_io.set_position_d_gain(i, params[i]['d'])
self.dxl_io.set_acceleration_profile(i, params[i]['acceleration'])
self.dxl_io.set_velocity_profile(i, params[i]['velocity'])
#set current / torque limit for gripper
self.dxl_io.set_goal_current(7, 0)
self.dxl_io.set_current_limit(7, 1900)
"""
TODO
To increase reliability of packet transmission and reduce the number of packets required to fetch
motor status, set the indirect addresses on each motor.
This is REQUIRED to be called before starting any status based callbacks.
"""
"""
def set_indirect_address(self):
bulk write ( INDIR_ADDR_1, (1, MX_PRESENT_CURRENT), (2, MX_PRESENT_CURRENT), ... )
bulk write ( INDIR_ADDR_1 + 2, (1, MX_PRESENT_CURRENT+1), (2, MX_PRESENT_CURRENT+1), ...)
...
"""
"""
Given an array of joint positions (in radians), send request to individual servos
"""
def fkine_action(self, data):
goal = FollowJointTrajectoryGoal()
goal.trajectory.joint_names = ['joint_1', 'joint_2', 'joint_3', 'joint_4', 'joint_5', 'joint_6']
point = JointTrajectoryPoint()
point.positions = data.positions
#Since this is asynchronous, the time from 2 points is 0 and the action will return immediately
point.time_from_start = rospy.Duration(0.1)
goal.trajectory.points.append(point)
self.traj_client.send_goal_and_wait(goal)
res = SvenzvaJointResult()
res.is_done = True
self.fkine_action.set_succeeded(res)
@staticmethod
def rad_to_raw(angle):
#encoder ticks = resolution / radian range
return int(round( angle * 4096.0 / 6.2831853 ))
@staticmethod
def raw_to_rad(raw):
#radians_per = radians_range / resolution
return raw * 6.2831853 / 4096.0
@staticmethod
def spd_rad_to_raw(vel):
return max(1, int(round(vel / (RPM_TO_RADSEC * RPM_PER_TICK))))
@staticmethod
def spd_raw_to_rad(vel):
return vel * RPM_PER_TICK * RPM_TO_RADSEC
if __name__ == '__main__':
try:
sd = SvenzvaDriver()
rospy.spin()
sd.disconnect()
except rospy.ROSInterruptException:
pass
|
py | 1a3258cb9308502f20253b8bc0d247550e01c60e | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #stops agressive error message printing
import tensorflow as tf
import MLDashboard.MLDashboardBackend as MLDashboardBackend
import MLDashboard.MLCallbacksBackend as MLCallbacksBackend
from MLDashboard.MLCommunicationBackend import Message, MessageMode
import time
from MLDashboard.Examples.InteractiveDashboardDemo import get_model
class myCustomCallback(MLCallbacksBackend.DashboardCallbacks):
def __init__(self, updatelist, returnlist, model, x_train, y_train, x_test, y_test, labels, config):
super().__init__(updatelist, returnlist, model, x_train, y_train, x_test, y_test, labels, config)
def custom_on_test_begin(self, logs):
print("We are beginning the evaluation step.")
def run(testmode = False):
print("Starting custom callbacks demo...")
print("Setting up dashboard...")
#Create dashboard and return communication tools (this starts the process)
dashboardjsonfile = os.path.dirname(__file__) + '/dashboarddemo.json'
dashboardProcess, updatelist, returnlist = MLDashboardBackend.createDashboard(dashboardjsonfile,
openatend=not testmode)
print("Loading data...")
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
print("Formatting data...")
x_train = x_train.reshape(-1, 784).astype("float32") / 255.0
x_test = x_test.reshape(-1, 784).astype("float32") / 255.0
print("Sampling data...")
# Limit the train data to 10000 samples
x_train = x_train[:10000]
y_train = y_train[:10000]
# Limit test data to 1000 samples
x_test = x_test[:1000]
y_test = y_test[:1000]
print("Creating model...")
model = get_model()
print("Creating custom callbacks...")
#Callbacks require update and return list for communicating with dashboard
#Model and datasets are useful for sending that data to certain modules
config = MLCallbacksBackend.CallbackConfig()
labels = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']
callback = myCustomCallback(updatelist, returnlist, model, x_train, y_train, x_test, y_test, labels, config)
print("Starting training...")
trainingstarttime = time.time()
model.fit(x_train, y_train, epochs=20, callbacks=[callback])
print("Training finished in: ", round(time.time() - trainingstarttime, 3), " seconds.")
print("Evaluating model...")
model.evaluate(x_test, y_test, batch_size=128, callbacks=[callback])
updatelist.append(Message(MessageMode.End, {}))
print("Exiting cleanly...")
dashboardProcess.join()
print("Dashboard exited.")
#This handles any extra data that the dashboard sent, such as save commands
callback.HandleRemaingCommands()
if __name__ == '__main__':
run() |
py | 1a3258cf7badd255a9307d5f3956237fbeb82bff | from django.conf.urls import url
from . import views
app_name = 'artists'
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^(?P<pk>[0-9]+)/$', views.DetailView.as_view(), name='detail'),
url(r'^edit/(?P<pk>\d+)/$', views.UpdateArtist.as_view(), name='edit'),
url(r'^create/$', views.CreateArtist.as_view(), name='create'),
url(r'^delete/(?P<pk>\d+)$', views.DeleteArtist.as_view(), name='delete')
] |
py | 1a3258e0941dadaf2588d5402b530e2fd8e57ba3 | # import warnings
import numpy as np
from types import SimpleNamespace
import warnings
import matplotlib.pyplot as plt
from power_planner import graphs
# EXAMPLE DATA
instance = np.random.rand(1, 100, 100)
instance_corr = np.zeros((100, 100))
# corridor: 1 is feasible region, 0 is forbidden
# pad at the border, necessary for weighted_graph processing (np.roll function)
instance_corr[6:-6, 6:-6] = 1
instance_corr[:]
cfg = SimpleNamespace(
**{
# angle weight doesn't matter
"ANGLE_WEIGHT": 0,
# maximum angle -> needed to define half donut, can stay like that
"MAX_ANGLE": 1.57079,
"MAX_ANGLE_LG": 1.57079,
# scale can stay 1 as well, probably not used
"scale": 1,
# you need to set this, the pixel-wise minimum and maximum distance
# between pylons
"PYLON_DIST_MAX": 5.0,
"PYLON_DIST_MIN": 3.0,
# if you have only one category:
"class_weights": [1],
"layer_classes": ["resistance"],
# you need to set this, the start and destination points
"dest_inds": np.array([93, 90]),
"start_inds": np.array([7, 9])
}
)
graph = graphs.WeightedKSP(instance, instance_corr)
# single shortest path (building the graph)
path, path_cost, cost_sum = graph.single_sp(**vars(cfg))
print("output path:", path)
graph.get_shortest_path_tree()
# to output k paths
ksp = graph.find_ksp(5, overlap=0.5)
# ksp ist a list of form:
# [[path1, path_costs1, cost_sum1], [path2, path_costs2, cost_sum2], ...]
ksp_output_paths = [k[0] for k in ksp]
print(ksp_output_paths)
plt.figure(figsize=(10, 10))
plt.imshow(np.tile(np.expand_dims(instance[0], 2), 3))
for path in ksp_output_paths:
path = np.asarray(path)
# switched 0 and 1 because scatter and imshow have switched axes
plt.scatter(path[:, 1], path[:, 0], s=50)
plt.savefig("test_ksp.png")
|
py | 1a3258e1990032c5d88243b126e425693f0dec39 | class Solution:
def mySqrt(self, x):
sqrt = 0
num = sqrt // 2 - 1
while sqrt <= x:
num += 1
sqrt = num * num
return num - 1 |
py | 1a3259ae8a661d2d7a9a4526c61abd7243d2c696 | from django.http import JsonResponse
from django.views import generic
from .models import Person
class PersonDetail(generic.DetailView):
model = Person
context_object_name = 'person'
template_name = 'people/person-detail.html.dj'
class PersonListByName(generic.ListView):
context_object_name = 'people'
template_name = 'people/person-list-by-name.html.dj'
def get(self, request, *args, **kwargs):
response = super().get(request, *args, **kwargs)
response['X-Frame-Options'] = 'ALLOWALL'
return response
def get_queryset(self):
return Person.objects.filter(name=self.kwargs['name'])
class PersonListName(generic.ListView):
model = Person
allow_empty = False
def get(self, request, *args, **kwargs):
names = self.get_queryset().values_list('name', flat=True)
return JsonResponse({
'data': list(names),
})
|
py | 1a325b35efbdf80a076af007f2f4849e9d2bfae0 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
-------------------------------------------------------------------------------
Python Script for Figure 2b
-------------------------------------------------------------------------------
Article Title: A Deep Learning-Based Model of Global Terrestrial Evaporation
Author: Akash Koppa
Affiliation: Hydro-Climate Extremes Lab (H-CEL), Ghent University, Belgium
Contact: Akash Koppa ([email protected])
-------------------------------------------------------------------------------
"""
## import required libraries
import pandas as pd
import os as os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mp
import matplotlib.lines as li
import seaborn as sb
## user defined configuration
inpdir = "<< Specify path to input data here >>"
reffil = {"ref": os.path.join(inpdir, "e_observed_sites.h5")}
modfil = {"mo1": os.path.join(inpdir, "e_process_sites.h5"),
"mo2": os.path.join(inpdir, "e_process_sites.h5")}
modmap = {"ref": "FLUXNET",
"mo1": "GLEAMv35b",
"mo2": "GLEAMHybrid"}
figmap = {"ref": "FLUXNET",
"mo1": "Process-Based Model",
"mo2": "Hybrid Model"}
marmap = {"ref": "FluxMarker",
"mo1": "GLEAMv35bMarker",
"mo2": "GLEAMHybridMarker"}
sizmap = {"ref": "FluxSize",
"mo1": "GLEAMv35bSize",
"mo2": "GLEAMHybridSize"}
sitfil = {"siteda": os.path.join(inpdir, "sites.h5")}
## main code
# read in the site data
sitdat = pd.read_hdf(sitfil["siteda"])
# read in the reference FLUXNET data
refdat = pd.read_hdf(reffil["ref"])
refdat[refdat < 0.0] = 0.0
# loop through the models and calculate the correlation for every site
corall = sitdat
stdall = sitdat
rmsall = sitdat
kgeall = sitdat
for modtmp in modfil.keys():
moddat = pd.read_hdf(modfil[modtmp])
moddat[moddat < 0.0] = 0.0
cortmp = []
stdtmp = []
rmstmp = []
kgetmp = []
# loop through the sites and calculate correlation and std
for sittmp in sitdat.index:
refsit = refdat[sittmp]
refsit.name = "ref"
modsit = moddat[sittmp]
modsit.name = modtmp
datsit = pd.concat([refsit, modsit], axis = 1)
datsit = datsit.dropna(how = "any")
datcor = datsit["ref"].corr(datsit[modtmp], method = "spearman")
modstd = datsit[modtmp].std()
datrms = ((datsit[modtmp] - datsit["ref"])**2).mean() ** 0.5
# kge
corrat = (datcor - 1)**2
stdrat = ((modstd/datsit["ref"].std()) - 1)**2
menrat = ((datsit[modtmp].mean()/datsit["ref"].mean()) - 1)**2
kgeval = 1 - np.sqrt(corrat + stdrat + menrat)
# append
kgetmp.append(kgeval)
cortmp.append(datcor)
stdtmp.append(modstd)
rmstmp.append(datrms)
# create a pandas series from the correlation and standard deviation data
cortm1 = pd.Series(cortmp, index = sitdat.index, name = modmap[modtmp])
stdtm1 = pd.Series(stdtmp, index = sitdat.index, name = modmap[modtmp])
rmstm1 = pd.Series(rmstmp, index = sitdat.index, name = modmap[modtmp])
kgetm1 = pd.Series(kgetmp, index = sitdat.index, name = modmap[modtmp])
# append the data to the final data frames
corall = pd.concat([corall, cortm1], axis = 1)
stdall = pd.concat([stdall, stdtm1], axis = 1)
rmsall = pd.concat([rmsall, rmstm1], axis = 1)
kgeall = pd.concat([kgeall, kgetm1], axis = 1)
# replace all infinite values with nan
stdall = stdall.replace(float('inf'), np.nan)
corall = corall.replace(float('inf'), np.nan)
rmsall = rmsall.replace(float('inf'), np.nan)
# melt all datasets
kgevio = kgeall[["svortv", "GLEAMHybrid","GLEAMv35b"]]
kgevio = kgevio.rename(columns = {"svortv": "Vegetation Type",
"GLEAMHybrid": "Hybrid Model",
"GLEAMv35b": "Process-Based Model"})
kgevio = kgevio.melt(id_vars = "Vegetation Type")
kgevio = kgevio.rename(columns = {"value": "Kling-Gupta Efficiency"})
kgevio.loc[kgevio["Kling-Gupta Efficiency"] < -1.5, "Kling-Gupta Efficiency"] = np.nan
# plot the violin plots
mm = 0.0393701
sb.set_theme(style = "darkgrid")
sb.set_style("ticks")
figure = mp.pyplot.figure(figsize = (89*mm, 89*mm))
figaxi = figure.add_subplot(1, 1, 1)
figaxi.set_title("Evaporation ($E$)", fontsize = 8)
figaxi = sb.violinplot(x = "Vegetation Type",
y = "Kling-Gupta Efficiency",
hue = "variable",
split = "true",
data = kgevio,
inner = "quartile",
palette = "Set2",
fontsize = 7,
linewidth = 1.0,
edgecolor = "black",
order = ["Short", "Tall"])
plt.legend(loc = "lower left", edgecolor = "black", fontsize = 7)
yticks = figaxi.get_yticks()
yticks[yticks == -0.5] = -0.41
figaxi.set_yticks(yticks)
figaxi.set_ylim(-2.0)
figaxi.set_xlabel(figaxi.get_xlabel(), fontsize = 8)
figaxi.set_ylabel(figaxi.get_ylabel(), fontsize = 8)
figaxi.tick_params(axis='both', which='major', labelsize=7)
plt.axhline(-0.41, color = "red",
linestyle = "solid",
linewidth = 1.0)
figure.tight_layout()
plt.savefig("<< Specify output path for the figure here >>")
|
py | 1a325b6117a0d2d635aa93b0a82e0203991c3582 | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ActionsList(Model):
"""List all the actions.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar next_link: URL to fetch the next set of actions.
:vartype next_link: str
:param value: Required. Array of actions.
:type value: list[~securityinsights.models.ActionResponse]
"""
_validation = {
'next_link': {'readonly': True},
'value': {'required': True},
}
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[ActionResponse]'},
}
def __init__(self, *, value, **kwargs) -> None:
super(ActionsList, self).__init__(**kwargs)
self.next_link = None
self.value = value
|
py | 1a325b79ad2bab1b12e9b8d21ee2b5fc391eca85 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 18 00:38:17 2019
@author: yifan
"""
'''
Energy Evaulation function given a super cell
'''
import os
import json
import pickle
import random
import numpy as np
from ase import Atom, Atoms
from ase.build import surface
from ase.data import covalent_radii
from ase.io import read, write
from ase.visualize import view
from numpy.linalg import norm
from sklearn.metrics import mean_squared_error
from itertools import combinations
import lattice_functions as lf
from set_ce_lattice import dz, mother
import matplotlib
# matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
# plt.switch_backend('agg')
font = {'family': 'normal', 'size': 15}
matplotlib.rc('font', **font)
import platform
HomePath = os.path.expanduser('~')
ProjectPath = os.path.join(HomePath, 'Documents', 'GitHub', 'Pdn-CO-Stability')
if platform.system() == 'Linux':
ProjectPath = '/work/ccei_biomass/users/wangyf/cluster_project/CE_opt'
path = os.path.join(ProjectPath, 'Pdn-CE')
#%%
'''
Useful functions
'''
'''
ind - individal, one hot encoding array consisting of 0s and 1s
ind_index - config or occupied node index list consisting of integer numbers
'''
def occupancy():
'''
Creat occupancy for a node in the configuration, value between 0 or 1
'''
occ = random.randint(0, 1)
return occ
def one_hot_to_index(individual):
'''
Convert an individual from one hot encoding to a list index
'''
ind_index = list(np.nonzero(individual)[0])
return ind_index
def index_to_one_hot(ind_index, n_nodes):
'''
Convert an individual from a list index to one hot encoding
'''
individual = np.zeros(n_nodes, dtype=int)
individual[np.array(ind_index)] = 1
return individual
def check_Pd_Pd_distance(ind_index, mother):
'''
Takes in configuration and return False if atoms are closer than nearest neighbors
'''
acceptance_flag = True
combos = list(combinations(ind_index, 2))
ncombo = len(combos)
for i in range(ncombo):
pt1 = mother[combos[i][0]]
pt2 = mother[combos[i][1]]
distance = lf.two_points_D(pt1, pt2)
if distance < 1.0:
acceptance_flag = False
break
return acceptance_flag
def check_Pd_Pd_neighboring(occ_node_index, ind_index, mother):
'''
Takes in a node index and mother
return if the node is near an existing node
'''
acceptance_flag = True
pt1 = mother[occ_node_index[0]]
min_distance = np.min([lf.two_points_D(pt1, pt2) for pt2 in mother[ind_index] if not np.all(pt2 == pt1)])
# print(min_distance)
if not min_distance == 1.0:
acceptance_flag = False
return acceptance_flag
def swap_occ_empty(ind):
'''
Core function of the random walk
Swap an occupied site and an empty site
takes in one hot numpy array - ind
return the new configuration and the chosen node
'''
x_new = ind.copy()
occ_indices = np.where(x_new == 1)[0]
chosen_occ_i = np.random.choice(occ_indices, 1)
x_new[chosen_occ_i] = 0
empty_indices = np.where(x_new == 0)[0]
chosen_empty_i = np.random.choice(empty_indices, 1)
x_new[chosen_empty_i] = 1
return x_new, chosen_empty_i, chosen_occ_i
def append_support(ind_index, mother, view_flag=False):
'''
Append the configuration onto a ceria support surface
- Inputs
- ind_index : the occupied nodes for a given configuration
- mother : the mother cell
- view_flag : show in ase GUI
'''
# Useful bond information
Pdr = covalent_radii[46]
#Or = covalent_radii[8]
PdPd = Pdr * 2
PdO = 2.1 # the average PdO length take from CONTCAR files
def ceria():
a = 5.49 # Lattice constant
CeO2 = Atoms('Ce4O8', scaled_positions=[(0., 0., 0.),
(0., 0.5, 0.5),
(0.5, 0., 0.5),
(0.5, 0.5, 0.),
(0.75, 0.25, 0.25),
(0.25, 0.75, 0.75),
(0.75, 0.75, 0.75),
(0.25, 0.25, 0.25),
(0.25, 0.25, 0.75),
(0.75, 0.75, 0.25),
(0.25, 0.75, 0.25),
(0.75, 0.25, 0.75)],
cell=[a, a, a],
pbc=True)
#(1,1,1) is the slab type. There are 2 unit cells along z direction
slab = surface(CeO2, (1, 1, 1), 2)
# Repeating the slab 5 unit cells in x and 5 unit cell in y directions
# At the end the ceria slab is 10 by 10
# the Pd supercell mother is also 10 by 10
slab = slab.repeat((5, 5, 1))
slab.center(vacuum=10.0, axis=2)
# clave the top layer O atoms
del slab[[atom.index for atom in slab if atom.z > 15]]
return slab
support = ceria()
# set origin value by looking at the ase GUI, pick one oxygen atom
origin_index = 17
origin = support[origin_index].position.copy()
origin[2] = origin[2] + PdO
# superpose the Pd lattice onto ceria lattice
mother_with_support = origin + (mother - mother[0]) * PdPd
# select the occupied nodes
Pdpos = mother_with_support[ind_index]
# append Pd atoms to the support
nPd = len(Pdpos)
for i in range(nPd):
support.append(Atom('Pd', position=Pdpos[i]))
'''
Append an atom in the vaccum at the top corner
for plotting purpose just becase POV is dumb
'''
dumb_x = 0 # support.cell[0][0] + support.cell[0][1]
dumb_y = 0 # support.cell[1][0] + support.cell[1][1]
dumb_z = support.cell[2][2] - 1
dumb_pos = np.array([dumb_x, dumb_y, dumb_z])
support.append(Atom('He', position=dumb_pos))
if view_flag:
view(support)
return support, mother_with_support
def check_floating_atoms(ind, mother):
'''
Check if the configuration has any floating atoms in the layer above base layer
If floating_flag = true, the configuration is considered as infeasible,
If floatinfg_flag = false, the configuration can be accepted
Input the individial one-hot coding and the mother coordinates
'''
# Convert to config list
config = one_hot_to_index(ind)
# Collect the atoms above the base layer
config_layer = lf.cal_layers(mother, dz, config)
config_base_above = list(np.array(config)[np.where(config_layer > 1 )])
# Check the CN of atoms above the base layer
Graphs = lf.initialize_graph_object(mother, dz, NN1 = 1)
Gm = Graphs.Gm
cn_list = []
for ci in config_base_above:
cni = len([i for i in list(Gm.neighbors(ci)) if i in config])
cn_list.append(cni)
# Isolated node list, CN < 2
iso_list = list(np.array(config_base_above)[np.where(np.array(cn_list) < 2)])
floating_flag = (len(iso_list) > 0)
return floating_flag
#%%
class Pdn():
def __init__(self, model_file, mother=mother, super_cell_flag=False):
'''
loading the regression results
'''
self.mother = mother
# The flag to inluce 1NN and edges shorter than 1NN
NN1 = 1
[self.Gcv, self.J, self.intercept, self.RMSE_test_atom, self.RMSE_test_site] = pickle.load(open(model_file, "rb"))
self.super_cell_flag = super_cell_flag
# Initialize graph object
self.Graphs = lf.initialize_graph_object(self.mother, dz, NN1 = 1)
# Initialize calculation object
empty = 'grey'
filled = 'r'
occ = [empty, filled]
self.Cal = lf.calculations(occ)
self.Gm = self.Graphs.Gm
def save_super_clusters(self):
'''
save the signficant clusters in super cell to a json file
called 'clusters_super_nonzero.json'
'''
with open('clusters_super_cell.json') as f:
Gcv_super = json.load(f)['Gcv']
Gcv_super_nonzero = []
Gcv_model_nonrepeat = [Gi[0] for Gi in self.Gcv] # take the first clusters in each list
for Gi_super in Gcv_super:
for Gi_model_nonrepeat in Gcv_model_nonrepeat: # check if the first one is in Gcv_super
if Gi_model_nonrepeat in Gi_super:
Gcv_super_nonzero.append(Gi_super)
# save to a json file
Gcv_super_nonzero_dict = {'Gcv': Gcv_super_nonzero}
with open(os.path.join(path, 'clusters_super_nonzero.json'), 'w') as outfile:
json.dump(Gcv_super_nonzero_dict, outfile)
def load_super_cluster(self):
'''
load 'cluster_super_cell.json'
'''
with open(os.path.join(path, 'clusters_super_nonzero.json')) as f:
self.Gcv_super = json.load(f)['Gcv']
self.Gcv = self.Gcv_super # substitue the original Gcv
def load_config(self, ind_index):
'''
load the configuration into self.Graph.Gsv
'''
if self.super_cell_flag:
self.load_super_cluster()
self.Graphs.get_configs([ind_index])
def predict_E(self, ind_index):
'''
Predict Energy of the cluster only, take in ind index
'''
self.load_config(ind_index)
pi_pred = self.Cal.get_pi_matrix_l(self.Graphs.Gsv, self.Gcv)
E_pred = float(np.dot(pi_pred, self.J) + self.intercept)
# return Graphs
return E_pred, pi_pred
def swap_occ_empty_fast(self, ind):
'''
Core function of the random walk
Swap an occupied site and a NEARBY (must be 1NN) empty site
takes in one hot numpy array - ind
'''
x_new = ind.copy()
occ_indices = list(np.where(x_new == 1)[0])
self.load_config(occ_indices)
config_G = self.Graphs.Gsv[0]
NN1_list = []
for node_i in occ_indices:
NN1_list += list(config_G.neighbors(node_i))
NN1_list = list(set(NN1_list))
NN1_list_empty = [i for i in NN1_list if i not in occ_indices]
chosen_occ_i = np.random.choice(occ_indices, 1)
chosen_empty_i = np.random.choice(NN1_list_empty, 1)
if not chosen_occ_i == chosen_empty_i:
x_new[chosen_occ_i] = 0
x_new[chosen_empty_i] = 1
return x_new, chosen_empty_i, chosen_occ_i
def swap_occ_empty_reverse(self, ind):
'''
Core function of the random walk
Swap an occupied site to an empty site on the base
takes in one hot numpy array - ind
'''
x_new = ind.copy()
occ_indices = list(np.where(x_new == 1)[0])
base_indices = np.where(self.mother[:,2] == dz)[0]
base_indices_empty = list(np.where(x_new[base_indices] == 0)[0])
chosen_occ_i = np.random.choice(occ_indices, 1)
chosen_empty_i = np.random.choice(base_indices_empty, 1)
if not chosen_occ_i == chosen_empty_i:
x_new[chosen_occ_i] = 0
x_new[chosen_empty_i] = 1
return x_new, chosen_empty_i, chosen_occ_i
def swap_iso_neighbors(self, ind, alpha1=1.0, alpha2=0.25):
'''
New version
Core function of the random walk
if there is isolated nodes:
Randomly put n isolated nodes into NN1 nodes of the existing nodes
if there is non-isolated nodes:
Shuffle the occupied nodes to NN1 nodes of the existing nodes
takes in one hot numpy array - ind
'''
x_new = ind.copy()
config = one_hot_to_index(x_new) # convert to config
NN1_list = [] # the NN1 nodes to config
cn_list = [] # the cn number for each node
for ci in config:
NN1_neighbors_i = [i for i in list(self.Gm.neighbors(ci))]
cni = len([i for i in list(self.Gm.neighbors(ci)) if i in config])
cn_list.append(cni)
NN1_list += NN1_neighbors_i
# Unique NN1 nodes
NN1_list = list(set(NN1_list))
# All possible empty NN1 nodes
NN1_list_empty = [i for i in NN1_list if i not in config]
# Get both NN1 and NN2 nodes
NN2_list = []
for ci in NN1_list:
NN2_neighbors_i = [i for i in list(self.Gm.neighbors(ci))]
NN2_list += NN2_neighbors_i
# Unique NN1 nodes
NN2_list = list(set(NN2_list + NN1_list))
# All possible empty NN1 nodes
NN2_list_empty = [i for i in NN2_list if i not in config]
# All isolated nodes with coorination number < 2
iso_list = list(np.array(config)[np.where(np.array(cn_list) < 2)])
# Given a alpha, determine the number of nodes involved in exchange
m = int(np.floor(min(len(iso_list), len(NN1_list_empty)) * alpha1))
if m > 0: # Randomly put n isolated nodes into NN1 nodes of the existing nodes
chosen_occ_i = np.unique(np.random.choice(iso_list, m, replace=False))
x_new[chosen_occ_i] = 0
chosen_empty_i = np.unique(np.random.choice(NN1_list_empty, m, replace= False))
x_new[chosen_empty_i] = 1
if m == 0: # Shuffle the occupied nodes to NN1 nodes of the existing nodes and choose n from it
# the number of occupied nodes
n = len(config)
n_possible = [n * alpha2, len(NN2_list_empty)]
if min(n_possible) > 1:
nswap = int(np.floor(min(n_possible)))
else: nswap = 1
#print('\t Swap {} atoms'.format(nswap))
chosen_occ_i = np.unique(np.random.choice(config, nswap, replace = False))
x_new[chosen_occ_i] = 0
chosen_empty_i = np.unique(np.random.choice(NN2_list_empty, nswap, replace= False))
x_new[chosen_empty_i] = 1
return x_new, chosen_empty_i, chosen_occ_i
|
py | 1a325c4c2100609aa71bb3a7274ccaad474cf130 | #
# Copyright 2019 Xilinx, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inaccel.coral as inaccel
import numpy as np
import time
BinomialTreeInputDataTypeDouble = np.dtype([('S', np.double), ('K', np.double), ('T', np.double), ('rf', np.double), ('V', np.double), ('q', np.double), ('N', np.int32), ('packed', np.int32, 3)])
class BinomialTree:
MAX_OPTION_CALCULATIONS = 1024;
BinomialTreeEuropeanPut = np.int32(1);
BinomialTreeEuropeanCall = np.int32(2);
BinomialTreeAmericanPut = np.int32(3);
BinomialTreeAmericanCall = np.int32(4);
def __init__(self):
with inaccel.allocator:
self.inputBuffer = np.ndarray(self.MAX_OPTION_CALCULATIONS, dtype = BinomialTreeInputDataTypeDouble)
self.outputBuffer = np.ndarray(self.MAX_OPTION_CALCULATIONS, dtype = np.double)
def run(self, optionType):
self.m_runStartTime = int(round(time.time() * 1000000))
numOptions = np.int32(self.inputBuffer.size)
startIndex = np.int32(0)
if ((numOptions % 8) != 0):
raise RuntimeError("[XLNX] BinomialTree::run - number of options to calculate should be a multiple of 8")
req = inaccel.request("com.xilinx.vitis.quantitativeFinance.binomialTree.engine")
req.arg(self.inputBuffer).arg(self.outputBuffer).arg(optionType).arg(numOptions).arg(startIndex)
inaccel.submit(req).result()
self.m_runEndTime = int(round(time.time() * 1000000))
def lastruntime(self):
duration = self.m_runEndTime - self.m_runStartTime
return duration
|
py | 1a325df7b837db35a33aaca1d39d2568f2651a56 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachineExtensionsOperations(object):
"""VirtualMachineExtensionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2018_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
vm_name, # type: str
vm_extension_name, # type: str
extension_parameters, # type: "models.VirtualMachineExtension"
**kwargs # type: Any
):
# type: (...) -> "models.VirtualMachineExtension"
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualMachineExtension"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'vmExtensionName': self._serialize.url("vm_extension_name", vm_extension_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(extension_parameters, 'VirtualMachineExtension')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineExtension', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualMachineExtension', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
vm_name, # type: str
vm_extension_name, # type: str
extension_parameters, # type: "models.VirtualMachineExtension"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.VirtualMachineExtension"]
"""The operation to create or update the extension.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine where the extension should be created or
updated.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension.
:type vm_extension_name: str
:param extension_parameters: Parameters supplied to the Create Virtual Machine Extension
operation.
:type extension_parameters: ~azure.mgmt.compute.v2018_10_01.models.VirtualMachineExtension
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualMachineExtension or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2018_10_01.models.VirtualMachineExtension]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualMachineExtension"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name,
extension_parameters=extension_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualMachineExtension', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}'} # type: ignore
def _update_initial(
self,
resource_group_name, # type: str
vm_name, # type: str
vm_extension_name, # type: str
extension_parameters, # type: "models.VirtualMachineExtensionUpdate"
**kwargs # type: Any
):
# type: (...) -> "models.VirtualMachineExtension"
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualMachineExtension"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'vmExtensionName': self._serialize.url("vm_extension_name", vm_extension_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(extension_parameters, 'VirtualMachineExtensionUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineExtension', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}'} # type: ignore
def begin_update(
self,
resource_group_name, # type: str
vm_name, # type: str
vm_extension_name, # type: str
extension_parameters, # type: "models.VirtualMachineExtensionUpdate"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.VirtualMachineExtension"]
"""The operation to update the extension.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine where the extension should be updated.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension.
:type vm_extension_name: str
:param extension_parameters: Parameters supplied to the Update Virtual Machine Extension
operation.
:type extension_parameters: ~azure.mgmt.compute.v2018_10_01.models.VirtualMachineExtensionUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualMachineExtension or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2018_10_01.models.VirtualMachineExtension]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualMachineExtension"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name,
extension_parameters=extension_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualMachineExtension', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
vm_name, # type: str
vm_extension_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'vmExtensionName': self._serialize.url("vm_extension_name", vm_extension_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
vm_name, # type: str
vm_extension_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""The operation to delete the extension.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine where the extension should be deleted.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension.
:type vm_extension_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
vm_extension_name=vm_extension_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
vm_name, # type: str
vm_extension_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.VirtualMachineExtension"
"""The operation to get the extension.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine containing the extension.
:type vm_name: str
:param vm_extension_name: The name of the virtual machine extension.
:type vm_extension_name: str
:param expand: The expand expression to apply on the operation.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineExtension, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2018_10_01.models.VirtualMachineExtension
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualMachineExtension"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'vmExtensionName': self._serialize.url("vm_extension_name", vm_extension_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineExtension', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
vm_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.VirtualMachineExtensionsListResult"
"""The operation to get all extensions of a Virtual Machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine containing the extension.
:type vm_name: str
:param expand: The expand expression to apply on the operation.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineExtensionsListResult, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2018_10_01.models.VirtualMachineExtensionsListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualMachineExtensionsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineExtensionsListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions'} # type: ignore
|
py | 1a325ece0d995a843a64ca246e535616005dc57e | # Generated by Django 2.1.7 on 2019-03-08 23:24
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
('password', models.CharField(max_length=256)),
],
),
]
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.