ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7dffa9f144249c9fd11316cc0413c0c4a2342be9 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import time
import tenacity
from typing import Tuple, Optional
from airflow.settings import pod_mutation_hook
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.state import State
from datetime import datetime as dt
from kubernetes.client.models.v1_pod import V1Pod
from kubernetes import watch, client
from kubernetes.client.rest import ApiException
from kubernetes.stream import stream as kubernetes_stream
from airflow import AirflowException
from requests.exceptions import BaseHTTPError
from .kube_client import get_kube_client
from airflow.kubernetes.pod_generator import PodDefaults
class PodStatus:
PENDING = 'pending'
RUNNING = 'running'
FAILED = 'failed'
SUCCEEDED = 'succeeded'
class PodLauncher(LoggingMixin):
def __init__(self, kube_client=None, in_cluster=True, cluster_context=None,
extract_xcom=False):
super().__init__()
self._client = kube_client or get_kube_client(in_cluster=in_cluster,
cluster_context=cluster_context)
self._watch = watch.Watch()
self.extract_xcom = extract_xcom
def run_pod_async(self, pod: V1Pod, **kwargs):
pod_mutation_hook(pod)
sanitized_pod = self._client.api_client.sanitize_for_serialization(pod)
json_pod = json.dumps(sanitized_pod, indent=2)
self.log.debug('Pod Creation Request: \n%s', json_pod)
try:
resp = self._client.create_namespaced_pod(body=sanitized_pod,
namespace=pod.metadata.namespace, **kwargs)
self.log.debug('Pod Creation Response: %s', resp)
except Exception as e:
self.log.exception('Exception when attempting '
'to create Namespaced Pod: %s', json_pod)
raise e
return resp
def delete_pod(self, pod: V1Pod):
try:
self._client.delete_namespaced_pod(
pod.metadata.name, pod.metadata.namespace, body=client.V1DeleteOptions())
except ApiException as e:
# If the pod is already deleted
if e.status != 404:
raise
def run_pod(
self,
pod: V1Pod,
startup_timeout: int = 120,
get_logs: bool = True) -> Tuple[State, Optional[str]]:
"""
Launches the pod synchronously and waits for completion.
:param pod:
:param startup_timeout: Timeout for startup of the pod (if pod is pending for too long, fails task)
:param get_logs: whether to query k8s for logs
:return:
"""
resp = self.run_pod_async(pod)
curr_time = dt.now()
if resp.status.start_time is None:
while self.pod_not_started(pod):
delta = dt.now() - curr_time
if delta.seconds >= startup_timeout:
raise AirflowException("Pod took too long to start")
time.sleep(1)
self.log.debug('Pod not yet started')
return self._monitor_pod(pod, get_logs)
def _monitor_pod(self, pod: V1Pod, get_logs: bool) -> Tuple[State, Optional[str]]:
if get_logs:
logs = self.read_pod_logs(pod)
for line in logs:
self.log.info(line)
result = None
if self.extract_xcom:
while self.base_container_is_running(pod):
self.log.info('Container %s has state %s', pod.metadata.name, State.RUNNING)
time.sleep(2)
result = self._extract_xcom(pod)
self.log.info(result)
result = json.loads(result)
while self.pod_is_running(pod):
self.log.info('Pod %s has state %s', pod.metadata.name, State.RUNNING)
time.sleep(2)
return self._task_status(self.read_pod(pod)), result
def _task_status(self, event):
self.log.info(
'Event: %s had an event of type %s',
event.metadata.name, event.status.phase)
status = self.process_status(event.metadata.name, event.status.phase)
return status
def pod_not_started(self, pod: V1Pod):
state = self._task_status(self.read_pod(pod))
return state == State.QUEUED
def pod_is_running(self, pod: V1Pod):
state = self._task_status(self.read_pod(pod))
return state != State.SUCCESS and state != State.FAILED
def base_container_is_running(self, pod: V1Pod):
event = self.read_pod(pod)
status = next(iter(filter(lambda s: s.name == 'base',
event.status.container_statuses)), None)
if not status:
return False
return status.state.running is not None
@tenacity.retry(
stop=tenacity.stop_after_attempt(3),
wait=tenacity.wait_exponential(),
reraise=True
)
def read_pod_logs(self, pod: V1Pod):
try:
return self._client.read_namespaced_pod_log(
name=pod.metadata.name,
namespace=pod.metadata.namespace,
container='base',
follow=True,
tail_lines=10,
_preload_content=False
)
except BaseHTTPError as e:
raise AirflowException(
'There was an error reading the kubernetes API: {}'.format(e)
)
@tenacity.retry(
stop=tenacity.stop_after_attempt(3),
wait=tenacity.wait_exponential(),
reraise=True
)
def read_pod(self, pod: V1Pod):
try:
return self._client.read_namespaced_pod(pod.metadata.name, pod.metadata.namespace)
except BaseHTTPError as e:
raise AirflowException(
'There was an error reading the kubernetes API: {}'.format(e)
)
def _extract_xcom(self, pod: V1Pod):
resp = kubernetes_stream(self._client.connect_get_namespaced_pod_exec,
pod.metadata.name, pod.metadata.namespace,
container=PodDefaults.SIDECAR_CONTAINER_NAME,
command=['/bin/sh'], stdin=True, stdout=True,
stderr=True, tty=False,
_preload_content=False)
try:
result = self._exec_pod_command(
resp, 'cat {}/return.json'.format(PodDefaults.XCOM_MOUNT_PATH))
self._exec_pod_command(resp, 'kill -s SIGINT 1')
finally:
resp.close()
if result is None:
raise AirflowException('Failed to extract xcom from pod: {}'.format(pod.metadata.name))
return result
def _exec_pod_command(self, resp, command):
if resp.is_open():
self.log.info('Running command... %s\n', command)
resp.write_stdin(command + '\n')
while resp.is_open():
resp.update(timeout=1)
if resp.peek_stdout():
return resp.read_stdout()
if resp.peek_stderr():
self.log.info(resp.read_stderr())
break
def process_status(self, job_id, status):
status = status.lower()
if status == PodStatus.PENDING:
return State.QUEUED
elif status == PodStatus.FAILED:
self.log.info('Event with job id %s Failed', job_id)
return State.FAILED
elif status == PodStatus.SUCCEEDED:
self.log.info('Event with job id %s Succeeded', job_id)
return State.SUCCESS
elif status == PodStatus.RUNNING:
return State.RUNNING
else:
self.log.info('Event: Invalid state %s on job %s', status, job_id)
return State.FAILED
|
py | 7dffacc75e92a8f57f4f419ef3841846ceb10ab2 | import sys
import time
import threading
from utils import console
class VIA():
SR = 4
SET_CLEAR = 128
def __init__(self, start_addr, mpu):
self.mpu = mpu
self.VIA_SR = start_addr + 0x0a # shift register
self.VIA_IFR = start_addr + 0x0d # interrupt flags register
self.VIA_IER = start_addr + 0x0e # interrupt enable register
self.SRThread = False
self.escape = False
self.quit = False
self.dbFlag = False
self.name = 'VIA'
# init
self.reset()
self.install_interrupts()
def check_debug(self, flag=None):
if flag != None:
self.dbFlag = flag
return self.dbFlag
def install_interrupts(self):
def getc(address):
char = console.getch_noblock(sys.stdin)
if char:
byte = ord(char)
if self.escape:
self.escape = False
if byte == 0x51 or byte == 0x71: # handles <ESC>Q or <ESC>q
byte = 0
self.quit = True
elif byte == 0x44 or byte == 0x64: # handles <ESC>D or <ESC>d
byte = 0
self.dbFlag = True
else:
if byte == 0x1b:
self.escape = True
byte = 0
else:
self.mpu.memory[self.VIA_IFR] &= 0xfb
else:
byte = 0
return byte
def SR_enable(address, value):
if value & self.SET_CLEAR:
# enable interrupts
if value & self.SR and not self.SRThread:
t = threading.Thread(target=SR_thread, daemon = True)
self.SRThread = True
t.start()
else:
# disable interrupts
if value & self.SR and self.SRThread:
self.SRThread = False
def SR_thread():
while(self.SRThread):
time.sleep(.05) # delay needed to allow processing of interrupt prior to setting it again *** TODO: would be nice to eliminate this with a flag or something ***
if (self.mpu.p & self.mpu.INTERRUPT == 0) and self.mpu.IRQ_pin:
if console.kbhit():
self.mpu.memory[self.VIA_IFR] |= 0x04
self.mpu.IRQ_pin = 0
count_irq = 0 # we need a short delay here
while count_irq < 100:
count_irq += 1
self.mpu.memory.subscribe_to_write([self.VIA_IER], SR_enable)
self.mpu.memory.subscribe_to_read([self.VIA_SR], getc)
def reset(self):
self.mpu.memory[self.VIA_IER] = 0
self.mpu.memory[self.VIA_IFR] = 0
#def irq(self):
#return (IFR6 and IER6) or (IFR5 and IER5) or (IFR4 and IER4) or (IFR3 and IER3) or (IFR2 and IER2) or (IFR1 and IER1) or (IFR0 and IER0)
#return (self.mpu.memory[self.VIA_IFR] and self.SR) and ((self.mpu.memory[self.VIA_IER] and self.SR))
|
py | 7dffaccd7e0ceaab691aaf679d16462ae94bc339 | import numpy as np
import torch
##################################
from espnet.nets.transducer_decoder_interface import Hypothesis
from dataclasses import asdict
##################################
class SegmentStreamingE2E(object):
"""SegmentStreamingE2E constructor.
:param E2E e2e: E2E ASR object
:param recog_args: arguments for "recognize" method of E2E
"""
def __init__(self, e2e, recog_args, beam_search, rnnlm=None):
self._e2e = e2e
self._recog_args = recog_args
self._char_list = e2e.char_list
self._rnnlm = rnnlm
self._e2e.eval()
self._blank_idx_in_char_list = -1
for idx in range(len(self._char_list)):
if self._char_list[idx] == self._e2e.blank:
self._blank_idx_in_char_list = idx
break
self._subsampling_factor = np.prod(e2e.subsample)
self._activates = 0
self._blank_dur = 0
self._previous_input = []
self._previous_encoder_recurrent_state = None
self._encoder_states = []
self._ctc_posteriors = []
################################################
self.beam_search = beam_search
self.beam = min(beam_search.beam_size, beam_search.vocab_size)
self.beam_k = min(self.beam, (beam_search.vocab_size - 1))
self.dec_state = self.beam_search.decoder.init_state(1)
self.kept_hyps = [Hypothesis(score=0.0, yseq=[beam_search.blank], dec_state=self.dec_state)]
self.cache = {}
self._encoder_output = []
################################################
assert (
self._recog_args.batchsize <= 1
), "SegmentStreamingE2E works only with batch size <= 1"
assert (
"b" not in self._e2e.etype
), "SegmentStreamingE2E works only with uni-directional encoders"
def accept_input_for_tt(self, x):
"""Call this method each time a new batch of input is available."""
h = self._e2e.subsample_frames(x)
self._previous_input.append(h)
hyp = None
if self._activates == 0:
if "custom" in self._e2e.etype:
h = self._e2e.encode_custom(h)
else:
h = self._e2e.encode_rnn(h)
self._encoder_states.append(h)
nbest_hyps = self.beam_search(h, self.beam, self.beam_k, self.dec_state, self.kept_hyps, self.cache)
z = nbest_hyps[0].yseq
if any(z) != self._blank_idx_in_char_list:
self._activates = 1
else:
# h = torch.cat(self._previous_input, dim=0).view(
# -1, self._previous_input[0].size(1)
# )
if "custom" in self._e2e.etype:
h = self._e2e.encode_custom(h)
else:
h = self._e2e.encode_rnn(h)
self._encoder_states.append(h)
h = torch.cat(self._encoder_states, dim=0).view(
-1, self._encoder_states[0].size(1)
)
print(h.size())
hyp = self.beam_search(h, self.beam, self.beam_k, self.dec_state, self.kept_hyps, self.cache)
return [asdict(n) for n in hyp] if hyp != None else hyp |
py | 7dffadf036b5cd2909afc90558b7203cf49002b3 | #MenuTitle: Highest & Lowest Glyphs
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Finds tallest and lowest glyphs / Y coordinates.
"""
maxY = None
minY = None
highest = None
lowest = None
font = Glyphs.font
for glyph in font.glyphs:
for layer in glyph.layers:
if not maxY or layer.bounds.origin.y + layer.bounds.size.height > maxY:
maxY = layer.bounds.origin.y + layer.bounds.size.height
highest = layer
if not minY or layer.bounds.origin.y < minY:
minY = layer.bounds.origin.y
lowest = layer
print( 'highest: %s' % maxY )
print( 'lowest: %s' % minY )
font.newTab( [highest, lowest] )
|
py | 7dffaebae2041d1cf9c1c7f8b0c6eebf98194fc8 | from matplotlib.colors import LogNorm
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
import matplotlib as mpl
mpl.use('Agg')
def plot_history(out, history, metric='loss', title=None, width=8, height=6):
title = title or 'model {}'.format(metric)
val_metric = 'val_{}'.format(metric)
plt.figure(figsize=(width, height))
plt.plot(history.history[metric], marker='o')
plt.plot(history.history[val_metric], marker='d')
plt.title(title)
plt.ylabel(metric)
plt.xlabel('epoch')
plt.legend(['train_{}'.format(metric), 'val_{}'.format(metric)], loc='upper center')
png = '{}.plot.{}.png'.format(out, metric)
plt.savefig(png, bbox_inches='tight')
def plot_scatter(data, classes, out, width=10, height=8):
cmap = plt.cm.get_cmap('gist_rainbow')
plt.figure(figsize=(width, height))
plt.scatter(data[:, 0], data[:, 1], c=classes, cmap=cmap, lw=0.5, edgecolor='black', alpha=0.7)
plt.colorbar()
png = '{}.png'.format(out)
plt.savefig(png, bbox_inches='tight')
def plot_error(y_true, y_pred, batch, file_ext, file_pre='output_dir', subsample=1000):
if batch % 10:
return
total = len(y_true)
if subsample and subsample < total:
usecols = np.random.choice(total, size=subsample, replace=False)
y_true = y_true[usecols]
y_pred = y_pred[usecols]
y_true = y_true * 100
y_pred = y_pred * 100
diffs = y_pred - y_true
bins = np.linspace(-200, 200, 100)
if batch == 0:
y_shuf = np.random.permutation(y_true)
plt.hist(y_shuf - y_true, bins, alpha=0.5, label='Random')
# plt.hist(diffs, bins, alpha=0.35-batch/100., label='Epoch {}'.format(batch+1))
plt.hist(diffs, bins, alpha=0.3, label='Epoch {}'.format(batch + 1))
plt.title("Histogram of errors in percentage growth")
plt.legend(loc='upper right')
plt.savefig(file_pre + '.histogram' + file_ext + '.b' + str(batch) + '.png')
plt.close()
# Plot measured vs. predicted values
fig, ax = plt.subplots()
plt.grid('on')
ax.scatter(y_true, y_pred, color='red', s=10)
ax.plot([y_true.min(), y_true.max()],
[y_true.min(), y_true.max()], 'k--', lw=4)
ax.set_xlabel('Measured')
ax.set_ylabel('Predicted')
plt.savefig(file_pre + '.diff' + file_ext + '.b' + str(batch) + '.png')
plt.close()
# UTILS for UQ / CALIBRATION VISUALIZATION
def plot_density_observed_vs_predicted(Ytest, Ypred, pred_name=None, figprefix=None):
"""Functionality to plot a 2D histogram of the distribution of observed (ground truth)
values vs. predicted values. The plot generated is stored in a png file.
Parameters
----------
Ytest : numpy array
Array with (true) observed values
Ypred : numpy array
Array with predicted values.
pred_name : string
Name of data colum or quantity predicted (e.g. growth, AUC, etc.)
figprefix : string
String to prefix the filename to store the figure generated.
A '_density_predictions.png' string will be appended to the
figprefix given.
"""
xbins = 51
fig = plt.figure(figsize=(24, 18)) # (30,16)
ax = plt.gca()
plt.rc('xtick', labelsize=16) # fontsize of the tick labels
ax.plot([Ytest.min(), Ytest.max()], [Ytest.min(), Ytest.max()], 'r--', lw=4.)
plt.hist2d(Ytest, Ypred, bins=xbins, norm=LogNorm())
cb = plt.colorbar()
ax.set_xlabel('Observed ' + pred_name, fontsize=38, labelpad=15.)
ax.set_ylabel('Mean ' + pred_name + ' Predicted', fontsize=38, labelpad=15.)
ax.axis([Ytest.min() * 0.98, Ytest.max() * 1.02, Ytest.min() * 0.98, Ytest.max() * 1.02])
plt.setp(ax.get_xticklabels(), fontsize=32)
plt.setp(ax.get_yticklabels(), fontsize=32)
cb.ax.set_yticklabels(cb.ax.get_yticklabels(), fontsize=28)
plt.grid(True)
plt.savefig(figprefix + '_density_predictions.png')
plt.close()
print('Generated plot: ', figprefix + '_density_predictions.png')
def plot_2d_density_sigma_vs_error(sigma, yerror, method=None, figprefix=None):
"""Functionality to plot a 2D histogram of the distribution of
the standard deviations computed for the predictions vs. the
computed errors (i.e. values of observed - predicted).
The plot generated is stored in a png file.
Parameters
----------
sigma : numpy array
Array with standard deviations computed.
yerror : numpy array
Array with errors computed (observed - predicted).
method : string
Method used to comput the standard deviations (i.e. dropout,
heteroscedastic, etc.).
figprefix : string
String to prefix the filename to store the figure generated.
A '_density_sigma_error.png' string will be appended to the
figprefix given.
"""
xbins = 51
ybins = 31
fig = plt.figure(figsize=(24, 12)) # (30,16)
ax = plt.gca()
plt.rc('xtick', labelsize=16) # fontsize of the tick labels
plt.hist2d(sigma, yerror, bins=[xbins, ybins], norm=LogNorm())
cb = plt.colorbar()
ax.set_xlabel('Sigma (' + method + ')', fontsize=38, labelpad=15.)
ax.set_ylabel('Observed - Mean Predicted', fontsize=38, labelpad=15.)
ax.axis([sigma.min() * 0.98, sigma.max() * 1.02, -yerror.max(), yerror.max()])
plt.setp(ax.get_xticklabels(), fontsize=28)
plt.setp(ax.get_yticklabels(), fontsize=28)
cb.ax.set_yticklabels(cb.ax.get_yticklabels(), fontsize=22)
plt.grid(True)
plt.savefig(figprefix + '_density_sigma_error.png')
plt.close()
print('Generated plot: ', figprefix + '_density_sigma_error.png')
def plot_histogram_error_per_sigma(sigma, yerror, method=None, figprefix=None):
"""Functionality to plot a 1D histogram of the distribution of
computed errors (i.e. values of observed - predicted) observed
for specific values of standard deviations computed. The range of
standard deviations computed is split in xbins values and the
1D histograms of error distributions for the smallest six
standard deviations are plotted.
The plot generated is stored in a png file.
Parameters
----------
sigma : numpy array
Array with standard deviations computed.
yerror : numpy array
Array with errors computed (observed - predicted).
method : string
Method used to comput the standard deviations (i.e. dropout,
heteroscedastic, etc.).
figprefix : string
String to prefix the filename to store the figure generated.
A '_histogram_error_per_sigma.png' string will be appended to
the figprefix given.
"""
xbins = 21
ybins = 31
H, xedges, yedges, img = plt.hist2d(sigma, yerror, # normed=True,
bins=[xbins, ybins])
fig = plt.figure(figsize=(14, 16))
legend = []
for ii in range(6): # (H.shape[0]):
if ii is not 1:
plt.plot(yedges[0:H.shape[1]], H[ii, :] / np.sum(H[ii, :]), marker='o',
markersize=12, lw=6.)
legend.append(str((xedges[ii] + xedges[ii + 1]) / 2))
plt.legend(legend, fontsize=16)
ax = plt.gca()
plt.title('Error Dist. per Sigma for ' + method, fontsize=40)
ax.set_xlabel('Observed - Mean Predicted', fontsize=38, labelpad=15.)
ax.set_ylabel('Density', fontsize=38, labelpad=15.)
plt.setp(ax.get_xticklabels(), fontsize=28)
plt.setp(ax.get_yticklabels(), fontsize=28)
plt.grid(True)
plt.savefig(figprefix + '_histogram_error_per_sigma.png')
plt.close()
print('Generated plot: ', figprefix + '_histogram_error_per_sigma.png')
def plot_calibration_and_errors(mean_sigma, sigma_start_index, sigma_end_index,
min_sigma, max_sigma,
error_thresholds,
error_thresholds_smooth,
err_err,
s_interpolate,
coverage_percentile,
method=None, figprefix=None,
steps=False):
"""Functionality to plot empirical calibration curves
estimated by binning the statistics of computed
standard deviations and errors.
Parameters
----------
mean_sigma : numpy array
Array with the mean standard deviations computed per bin.
sigma_start_index : non-negative integer
Index of the mean_sigma array that defines the start of
the valid empirical calibration interval (i.e. index to
the smallest std for which a meaningful error is obtained).
sigma_end_index : non-negative integer
Index of the mean_sigma array that defines the end of
the valid empirical calibration interval (i.e. index to
the largest std for which a meaningful error is obtained).
min_sigma : numpy array
Array with the minimum standard deviations computed per bin.
max_sigma : numpy array
Array with the maximum standard deviations computed per bin.
error_thresholds : numpy array
Thresholds of the errors computed to attain a certain
error coverage per bin.
error_thresholds_smooth : numpy array
Thresholds of the errors computed to attain a certain
error coverage per bin after a smoothed operation is applied
to the frequently noisy bin-based estimations.
err_err : numpy array
Vertical error bars (usually one standard deviation for a binomial
distribution estimated by bin) for the error calibration
computed empirically.
s_interpolate : scipy.interpolate python object
A python object from scipy.interpolate that computes a
univariate spline (InterpolatedUnivariateSpline) constructed
to express the mapping from standard deviation to error. This
spline is generated during the computational empirical
calibration procedure.
coverage_percentile : float
Value used for the coverage in the percentile estimation
of the observed error.
method : string
Method used to comput the standard deviations (i.e. dropout,
heteroscedastic, etc.).
figprefix : string
String to prefix the filename to store the figure generated.
A '_empirical_calibration.png' string will be appended to
the figprefix given.
steps : boolean
Besides the complete empirical calibration (including raw
statistics, error bars and smoothing), also generates partial
plots with only the raw bin statistics (step1) and with only
the raw bin statistics and the smoothing interpolation (step2).
"""
xp23 = np.linspace(mean_sigma[sigma_start_index], mean_sigma[sigma_end_index], 200)
yp23 = s_interpolate(xp23)
p_cov = coverage_percentile
if steps:
# Plot raw bin statistics
fig = plt.figure(figsize=(18, 12))
ax = plt.gca()
ax.errorbar(mean_sigma, error_thresholds,
yerr=err_err,
xerr=[mean_sigma - min_sigma, max_sigma - mean_sigma],
fmt='o', ecolor='k', capthick=2, ms=8)
plt.xlabel('Sigma Predicted (' + method + ')', fontsize=24.)
plt.ylabel(str(p_cov) + '% Coverage for ABS Observed - Mean Predicted', fontsize=24.)
plt.title('Calibration', fontsize=28)
ax.axis([0, np.max(max_sigma) * 1.1, np.min(error_thresholds) * 0.9, np.max(yp23) * 1.2])
plt.grid()
plt.setp(ax.get_xticklabels(), fontsize=22)
plt.setp(ax.get_yticklabels(), fontsize=22)
plt.savefig(figprefix + '_empirical_calibration_step1.png')
plt.close()
print('Generated plot: ', figprefix + '_empirical_calibration_step1.png')
# Plot raw bin statistics and smoothing
fig = plt.figure(figsize=(18, 12))
ax = plt.gca()
ax.plot(mean_sigma, error_thresholds_smooth, 'g^', ms=12)
ax.errorbar(mean_sigma, error_thresholds,
yerr=err_err,
xerr=[mean_sigma - min_sigma, max_sigma - mean_sigma],
fmt='o', ecolor='k', capthick=2, ms=8)
plt.xlabel('Sigma Predicted (' + method + ')', fontsize=24.)
plt.ylabel(str(p_cov) + '% Coverage for ABS Observed - Mean Predicted', fontsize=24.)
plt.title('Calibration', fontsize=28)
ax.axis([0, np.max(max_sigma) * 1.1, np.min(error_thresholds) * 0.9, np.max(yp23) * 1.2])
plt.grid()
plt.setp(ax.get_xticklabels(), fontsize=22)
plt.setp(ax.get_yticklabels(), fontsize=22)
plt.savefig(figprefix + '_empirical_calibration_step2.png')
plt.close()
print('Generated plot: ', figprefix + '_empirical_calibration_step2.png')
# Plot raw bin statistics, smoothing and empirical calibration
fig = plt.figure(figsize=(18, 12))
ax = plt.gca()
ax.plot(xp23, yp23, 'rx', ms=20)
ax.plot(mean_sigma, error_thresholds_smooth, 'g^', ms=12)
ax.errorbar(mean_sigma, error_thresholds,
yerr=err_err,
xerr=[mean_sigma - min_sigma, max_sigma - mean_sigma],
fmt='o', ecolor='k', capthick=2, ms=8)
plt.xlabel('Sigma Predicted (' + method + ')', fontsize=24.)
plt.ylabel(str(p_cov) + '% Coverage for ABS Observed - Mean Predicted', fontsize=24.)
plt.title('Calibration', fontsize=28)
ax.axis([0, np.max(max_sigma) * 1.1, np.min(error_thresholds) * 0.9, np.max(yp23) * 1.2])
plt.grid()
plt.setp(ax.get_xticklabels(), fontsize=22)
plt.setp(ax.get_yticklabels(), fontsize=22)
plt.savefig(figprefix + '_empirical_calibration.png')
plt.close()
print('Generated plot: ', figprefix + '_empirical_calibration.png')
def plot_percentile_predictions(Ypred, Ypred_Lp, Ypred_Hp, percentile_list, pred_name=None, figprefix=None):
"""Functionality to plot the mean of the percentiles predicted.
The plot generated is stored in a png file.
Parameters
----------
Ypred : numpy array
Array with mid percentile predicted values.
Ypred_Lp : numpy array
Array with low percentile predicted values.
Ypred_Hp : numpy array
Array with high percentile predicted values.
percentile_list : string list
List of percentiles predicted (e.g. '10p', '90p', etc.)
pred_name : string
Name of data colum or quantity predicted (e.g. growth, AUC, etc.)
figprefix : string
String to prefix the filename to store the figure generated.
A '_density_predictions.png' string will be appended to the
figprefix given.
"""
index_ = np.argsort(Ypred)
fig = plt.figure(figsize=(24, 18))
plt.scatter(range(index_.shape[0]), Ypred[index_])
plt.scatter(range(index_.shape[0]), Ypred_Lp[index_])
plt.scatter(range(index_.shape[0]), Ypred_Hp[index_])
plt.legend(percentile_list, fontsize=20)
plt.xlabel('Index', fontsize=18.)
plt.ylabel(pred_name, fontsize=18.)
plt.title('Predicted ' + pred_name + ' Percentiles', fontsize=28)
plt.grid()
ax = plt.gca()
plt.setp(ax.get_xticklabels(), fontsize=16)
plt.setp(ax.get_yticklabels(), fontsize=16)
plt.savefig(figprefix + '_percentile_predictions.png')
plt.close()
print('Generated plot: ', figprefix + '_percentile_predictions.png')
# plot training and validation metrics together and generate one chart per metrics
def plot_metrics(history, title=None, skip_ep=0, outdir='.', add_lr=False):
""" Plots keras training curves history.
Args:
skip_ep: number of epochs to skip when plotting metrics
add_lr: add curve of learning rate progression over epochs
"""
def capitalize_metric(met):
return ' '.join(s.capitalize() for s in met.split('_'))
all_metrics = list(history.history.keys())
pr_metrics = ['_'.join(m.split('_')[1:]) for m in all_metrics if 'val' in m]
epochs = np.asarray(history.epoch) + 1
if len(epochs) <= skip_ep:
skip_ep = 0
eps = epochs[skip_ep:]
hh = history.history
for p, m in enumerate(pr_metrics):
metric_name = m
metric_name_val = 'val_' + m
y_tr = hh[metric_name][skip_ep:]
y_vl = hh[metric_name_val][skip_ep:]
ymin = min(set(y_tr).union(y_vl))
ymax = max(set(y_tr).union(y_vl))
lim = (ymax - ymin) * 0.1
ymin, ymax = ymin - lim, ymax + lim
# Start figure
fig, ax1 = plt.subplots()
# Plot metrics
ax1.plot(eps, y_tr, color='b', marker='.', linestyle='-', linewidth=1, alpha=0.6, label=capitalize_metric(metric_name))
ax1.plot(eps, y_vl, color='r', marker='.', linestyle='--', linewidth=1, alpha=0.6, label=capitalize_metric(metric_name_val))
ax1.set_xlabel('Epoch')
ax1.set_ylabel(capitalize_metric(metric_name))
ax1.set_xlim([min(eps) - 1, max(eps) + 1])
ax1.set_ylim([ymin, ymax])
ax1.tick_params('y', colors='k')
# Add learning rate
if (add_lr is True) and ('lr' in hh):
ax2 = ax1.twinx()
ax2.plot(eps, hh['lr'][skip_ep:], color='g', marker='.', linestyle=':', linewidth=1,
alpha=0.6, markersize=5, label='LR')
ax2.set_ylabel('Learning rate', color='g', fontsize=12)
ax2.set_yscale('log')
ax2.tick_params('y', colors='g')
ax1.grid(True)
legend = ax1.legend(loc='best', prop={'size': 10})
frame = legend.get_frame()
frame.set_facecolor('0.95')
if title is not None:
plt.title(title)
figpath = Path(outdir) / (metric_name + '.png')
plt.savefig(figpath, bbox_inches='tight')
plt.close()
|
py | 7dffaee7434ece21e8c81c8341b8a4614b7f9b84 | """
main.py -- contains classes that model scrabble game
"""
import itertools
import multiprocessing
import operator
import random
from . import config
from . import helpers
def get_move_set_generator(new_game, reference_game, move_list):
legal_move_set = get_legal_move_set(new_game, reference_game)
player_to_move_id = new_game.move_number % len(new_game.player_rack_list)
player_score_list = reference_game.player_score_list_list[
player_to_move_id
]
player_move_number = new_game.move_number // len(new_game.player_rack_list)
target_score = player_score_list[player_move_number]
next_move_set = set(
frozenset((tile.letter, location) for tile, location in move_set)
for score, move_set in legal_move_set
if score == target_score
)
for next_move in next_move_set:
new_game_copy = copy_game(new_game)
move_list_copy = move_list[:]
player_to_move_id = (
new_game_copy.move_number % len(new_game_copy.player_rack_list)
)
next_move_str = ''.join(letter for letter, location in next_move)
new_game_copy.cheat_create_rack_word(next_move_str, player_to_move_id)
new_game_copy.next_player_move(next_move, False)
move_list_copy.append(next_move)
if new_game_copy.move_number == reference_game.move_number:
if helpers.boards_are_equivalent(reference_game.board,
new_game_copy.board):
yield move_list_copy
else:
yield from get_move_set_generator(new_game_copy,
reference_game,
move_list_copy)
def get_location_best_move_helper(argument_list):
return get_location_best_move(*argument_list)
def get_location_best_move(game, location, word_list):
player_to_move_id = game.move_number % len(game.player_rack_list)
high_score = 0
best_move = None
for word in word_list:
for is_vertical in [True, False]:
temp_game = copy_game(game)
if temp_game.place_word(word, location, is_vertical, False):
letter_location_set = (
helpers.get_word_letter_location_set(
word,
location,
is_vertical
)
)
location_set = set(location
for _, location in letter_location_set)
if helpers.all_created_words_are_english(temp_game.board,
location_set):
player_score_list = (
temp_game.player_score_list_list[player_to_move_id]
)
word_score = player_score_list[-1]
if word_score > high_score:
best_move = (location, word, is_vertical)
high_score = word_score
return high_score, best_move
def get_legal_move_set(new_game, reference_game):
all_possible_moves_set = helpers.get_all_possible_moves_set(new_game,
reference_game)
legal_move_set = set()
for move_set in all_possible_moves_set:
if helpers.move_is_legal(new_game.board,
new_game.move_number,
move_set):
temp_board = copy_board(new_game.board)
for tile, location in move_set:
temp_board[location] = tile
legal_move_set.add(
(helpers.score_move(move_set, temp_board), move_set)
)
return legal_move_set
def copy_board(input_board):
input_square_dict = input_board.board_square_dict
new_board = ScrabbleBoard()
new_square_dict = new_board.board_square_dict
for location, square in input_square_dict.items():
if square.tile:
new_board_square = new_square_dict[location]
new_board_square.letter_multiplier = square.letter_multiplier
new_board_square.word_multiplier = square.word_multiplier
new_board_square.tile = ScrabbleTile(
square.tile.letter
)
return new_board
def copy_game(input_game):
new_game = ScrabbleGame(len(input_game.player_rack_list))
new_game.board = copy_board(input_game.board)
new_game.move_number = input_game.move_number
new_game.player_score_list_list = [
input_player_score_list[:]
for input_player_score_list in input_game.player_score_list_list
]
new_player_rack_list = []
for player_rack in input_game.player_rack_list:
new_rack = []
for tile in player_rack:
new_rack.append(ScrabbleTile(tile.letter))
new_player_rack_list.append(new_rack)
new_game.player_rack_list = new_player_rack_list
return new_game
def get_move_set_notation(move_set, reference_game):
new_game = ScrabbleGame(len(reference_game.player_rack_list))
word_notation_list_list = [
[] for _ in range(len(reference_game.player_rack_list))
]
for move in move_set:
player_to_move_id = (
new_game.move_number % len(new_game.player_rack_list)
)
move_location_set = set(location for _, location in move)
rack_word = ''.join([letter for letter, _ in move])
new_game.cheat_create_rack_word(rack_word, player_to_move_id)
player_words_notation_list = (
word_notation_list_list[player_to_move_id]
)
notation_word_list = []
new_game.next_player_move(move, False)
word_set = helpers.get_word_set(new_game.board, move_location_set)
for word_location_set in word_set:
if word_location_set:
notation_word_list.append(
helpers.get_move_word(word_location_set,
move_location_set,
new_game)
)
player_words_notation_list.append(notation_word_list)
return word_notation_list_list
def get_new_tile_bag():
return [ScrabbleTile(letter=letter)
for letter, magnitude in config.LETTER_DISTRIBUTION_DICT.items()
for _ in range(magnitude)]
def read_input_file(input_filename):
board_character_array, player_score_list_list = helpers.load_file(
input_filename
)
num_players = len(player_score_list_list)
game = ScrabbleGame(num_players)
game.player_score_list_list = player_score_list_list
game.player_rack_list = [[] for _ in range(num_players)]
game.tile_bag = []
game.move_number = sum(1 for player_score_list in player_score_list_list
for _ in player_score_list)
for row_number, row in enumerate(board_character_array, 1):
for column_number, letter in enumerate(row):
if letter:
column_letter = chr(ord('a') + column_number)
this_location = (column_letter, row_number)
game.board[this_location] = ScrabbleTile(letter)
return game
def initialize_new_board_square_dict():
initial_board_square_dict = {}
for column in config.BOARD_CODE_DICT:
for row in range(1, config.BOARD_NUM_ROWS + 1):
location = (column, row)
word_multiplier = config.WORD_SCORE_MULT_LOCATION_DICT.get(
location,
1
)
letter_multiplier = config.LETTER_SCORE_MULT_LOCATION_DICT.get(
location,
1
)
initial_board_square_dict[location] = BoardSquare(
tile=None,
word_multiplier=word_multiplier,
letter_multiplier=letter_multiplier
)
return initial_board_square_dict
def recover_game(input_filename):
reference_game = read_input_file(input_filename)
new_game = ScrabbleGame(
len(reference_game.player_rack_list)
)
move_set_generator = get_move_set_generator(new_game,
reference_game,
[])
move_set_list = list(move_set_generator)
notated_move_set_list = [
get_move_set_notation(move_set, reference_game)
for move_set in move_set_list
]
return notated_move_set_list
class ScrabbleTile(object):
def __init__(self, letter):
self.letter = letter
self.point_value = config.LETTER_POINT_VALUES_DICT[letter]
def __repr__(self):
return self.letter
class BoardSquare(object):
def __init__(self, tile, letter_multiplier, word_multiplier):
self.tile = tile
self.letter_multiplier = letter_multiplier
self.word_multiplier = word_multiplier
def __repr__(self):
if self.tile:
return str(self.tile)
else:
return config.BLANK_SQUARE_CHARACTER
class ScrabbleBoard(object):
def __init__(self):
self.board_square_dict = initialize_new_board_square_dict()
center_row = (config.BOARD_NUM_ROWS // 2) + 1
center_column = chr(
(config.BOARD_NUM_COLUMNS // 2) + config.LOWER_COLUMN_INT_BOUND
)
self.start_square_location = (center_column, center_row)
def __getitem__(self, key):
return self.board_square_dict.get(key).tile
def __setitem__(self, key, value):
self.board_square_dict[key].tile = value
def __repr__(self):
square_letter_gen = (
str(square)
for location, square in sorted(self.board_square_dict.items())
)
board_array_first_row = (
[' ', ' '] + sorted(list(config.BOARD_CODE_DICT.keys()))
)
board_array = [board_array_first_row] # Column labels
board_array.extend(
[' ' for _ in range(config.BOARD_NUM_COLUMNS+1)]
for _ in range(config.BOARD_NUM_ROWS)
)
for i in range(1, config.BOARD_NUM_ROWS + 1):
board_array[i][0] = str(i) # Row labels
if i < 10:
board_array[i][0] += ' ' # Pad single digit numbers with space
for j in range(1, config.BOARD_NUM_COLUMNS + 1):
board_array[j][i] = next(square_letter_gen) # swap x and y
center_row_num = (config.BOARD_NUM_ROWS // 2) + 1
center_column_num = (config.BOARD_NUM_COLUMNS // 2) + 1
start_char = config.START_SQUARE_CHARACTER
blank_char = config.BLANK_SQUARE_CHARACTER
if board_array[center_column_num][center_row_num] == blank_char:
board_array[center_column_num][center_row_num] = start_char
return_line_list = [''.join(row) for row in board_array]
return_str = '\n'.join(return_line_list)
return return_str
class ScrabbleGame(object):
def __init__(self, num_players):
self.tile_bag = get_new_tile_bag()
self.player_rack_list = self._get_new_player_rack_list(num_players)
self.board = ScrabbleBoard()
self.player_score_list_list = [[] for _ in range(num_players)]
self.move_number = 0
def __repr__(self):
player_score_str_list = []
for i, player_score_list in enumerate(self.player_score_list_list, 1):
player_score_str_list.append(
'Player {player_number}: {score}'.format(
player_number=i,
score=sum(player_score_list)
)
)
player_scores_str = '\n'.join(player_score_str_list)
player_to_move_id, _ = helpers.get_current_player_data(
self.move_number,
self.player_rack_list
)
return (
'{board}\n'
'{player_rack_list}\n'
'Moves played: {move_number}\n'
'Player {player_to_move}\'s move\n'
'{tiles_remaining} tiles remain in bag\n'
'{player_scores_str}'
).format(
board=str(self.board),
player_rack_list=self.player_rack_list,
move_number=self.move_number,
player_to_move=player_to_move_id + 1,
tiles_remaining=len(self.tile_bag),
player_scores_str=player_scores_str
)
def exchange(self, letter_list):
if (len(self.tile_bag) < config.PLAYER_RACK_SIZE or
len(letter_list) > config.PLAYER_RACK_SIZE):
return False
else:
_, player_rack = helpers.get_current_player_data(
self.move_number,
self.player_rack_list
)
player_letter_list = [tile.letter for tile in player_rack]
if helpers.move_is_sublist(letter_list, player_letter_list):
self._perform_bag_exchange(letter_list, player_rack)
self.move_number += 1
return True
else:
return False
def place_word(self, word, start_location, is_vertical_move,
allow_challenge=True):
letter_location_set = helpers.get_word_letter_location_set(
word,
start_location,
is_vertical_move
)
return self.next_player_move(letter_location_set, allow_challenge)
def next_player_move(self, letter_location_set, allow_challenge=True):
player_to_move_id, player_rack = helpers.get_current_player_data(
self.move_number,
self.player_rack_list
)
is_legal_move = helpers.move_is_legal(self.board,
self.move_number,
letter_location_set,
player_rack)
if is_legal_move:
if allow_challenge and helpers.move_successfully_challenged():
letter_location_set = set()
for move_letter, board_location in letter_location_set:
tile_index = helpers.get_rack_tile_index(player_rack,
move_letter)
tile_obj = player_rack.pop(tile_index)
self.board[board_location] = tile_obj
move_score = helpers.score_move(letter_location_set, self.board)
self.player_score_list_list[player_to_move_id].append(move_score)
self._refill_player_rack(player_rack)
self._cancel_bonus_squares(letter_location_set)
if len(player_rack) == 0 and len(self.tile_bag) == 0: # Final move
self.conclude_game(empty_rack_id=player_to_move_id)
self.move_number += 1
return True
else:
return False
def cheat_create_rack_word(self, word, player_id):
player_rack = self.player_rack_list[player_id]
for character in word:
tile = ScrabbleTile(letter=character)
player_rack.append(tile)
def conclude_game(self, empty_rack_id=None):
if empty_rack_id:
last_move_score_list = helpers.score_playing_out(
self.player_rack_list,
empty_rack_id
)
for i, last_move_score in enumerate(last_move_score_list):
self.player_score_list_list[i].append(last_move_score)
player_score_total_list = [
sum(player_score_list)
for player_score_list in self.player_score_list_list
]
winning_player_id, winning_player_score = max(
enumerate(player_score_total_list, 1),
key=operator.itemgetter(1)
)
print(
'Game Over! Player {} wins with a score of {}'.format(
winning_player_id,
winning_player_score
)
)
def get_best_move(self):
player_to_move_id = self.move_number % len(self.player_rack_list)
player_rack = self.player_rack_list[player_to_move_id]
player_letter_list = [tile.letter for tile in player_rack]
word_list = []
for i in range(1, config.PLAYER_RACK_SIZE + 1):
for this_list in itertools.permutations(player_letter_list, i):
this_word = ''.join(this_list)
word_list.append(this_word)
input_arguments_list = [
(self, location, word_list)
for location in sorted(self.board.board_square_dict)
]
process_pool = multiprocessing.Pool(config.NUM_PROCESSING_CORES)
result_list = process_pool.map(get_location_best_move_helper,
input_arguments_list)
return max(result_list)
def _get_new_player_rack_list(self, num_players):
player_rack_list = []
for _ in range(num_players):
this_rack = []
for _ in range(config.PLAYER_RACK_SIZE):
this_tile = self._draw_random_tile()
this_rack.append(this_tile)
player_rack_list.append(this_rack)
return player_rack_list
def _draw_random_tile(self):
random_index = random.randrange(0, len(self.tile_bag))
selected_tile = self.tile_bag.pop(random_index)
return selected_tile
def _refill_player_rack(self, player_rack):
while len(player_rack) < config.PLAYER_RACK_SIZE:
if self.tile_bag:
tile = self._draw_random_tile()
player_rack.append(tile)
else:
break
def _cancel_bonus_squares(self, letter_location_set):
for _, location in letter_location_set:
square = self.board.board_square_dict[location]
square.letter_multiplier = 1
square.word_multiplier = 1
def _perform_bag_exchange(self, letter_list, player_rack):
exchange_tile_list = []
for letter in letter_list:
for tile in player_rack:
if tile.letter == letter:
exchange_tile_list.append(tile)
player_rack.remove(tile)
for _ in range(len(letter_list)):
new_tile = self._draw_random_tile()
player_rack.append(new_tile)
self.tile_bag.extend(exchange_tile_list)
|
py | 7dffaf8b6ba975f2e57cbc1abf8022925f5213c3 | import os
import cv2
import sys
import caffe
import utils
import argparse
import tempfile
import subprocess
import numpy as np
import _init_paths
import matplotlib.pyplot as plt
from utils import load_position_map
from test import get_probabilities_with_position
from custom_layers.dom_tree import DOMTree
import custom_layers.web_data_utils as data_utils
#----------------
# PREPARE PHANTOMJS DOWNLOAD PAGE (URL, TMPFILENAME) -> IMAGE.JPEG, DOM-TREE.JSON
# CALL PHANTOMJS WITH FILENAME
#-----------------
# NETWORK SETTINGS
N_FEATURES = 128
Y_SIZE = 1280
X_SIZE = 1280
SPATIAL_SHAPE = (Y_SIZE, X_SIZE)
TEXT_MAP_SCALE = 0.125
GAUSS_VAR = 80
def download_page(url):
print "Downloading:",url
temp_dir = tempfile.mkdtemp()
result = subprocess.check_output(["phantomjs", "download_page.js",url,temp_dir])
return temp_dir
def load_position_maps(position_map_path):
#--- LOAD SMOTHED POSITION MAPS
position_maps = []
for i in range(4):
path = os.path.join(position_map_path,str(i)+'.pkl')
position_maps.append(load_position_map(path,sigma=80))
return position_maps
def load_image_blob(image_path):
# load image
im = cv2.imread(image_path)
size_x = min(im.shape[1],X_SIZE)
size_y = min(im.shape[0],Y_SIZE)
# Crop
im_croped = np.zeros((Y_SIZE,X_SIZE,3),dtype=np.uint8)
im_croped[:size_y,:size_x,:] = im[:size_y,:size_x,:]
# TODO - this part is tensor_list_to_blob -> move to CUSTOM LAYER utils
n_channels = im.shape[2]
im_blob = np.zeros((1, Y_SIZE, X_SIZE, n_channels), dtype=np.float32)
im_blob[0, 0:im_croped.shape[0], 0:im_croped.shape[1], :] = im_croped
im_blob = im_blob.transpose((0, 3, 1, 2))
return im_blob
def load_text_blob(leaf_nodes):
# get text nodes
text_nodes = data_utils.get_text_nodes(leaf_nodes,N_FEATURES)
# get text maps
text_maps = data_utils.get_text_maps(text_nodes, N_FEATURES, SPATIAL_SHAPE, TEXT_MAP_SCALE)
# TODO - this part is tensor_list_to_blob -> move to CUSTOM LAYER utils
n_channels = text_maps.shape[2]
text_blob = np.zeros((1, text_maps.shape[0], text_maps.shape[1], n_channels), dtype=np.float32)
text_blob[0, 0:text_maps.shape[0], 0:text_maps.shape[1], :] = text_maps
text_blob = text_blob.transpose((0, 3, 1, 2))
return text_blob
def load_boxes_blob(leaf_nodes, max_x, max_y):
# get input boxes
boxes = np.array([leaf['position'] for leaf in leaf_nodes],dtype = np.float32)
# remove boxes outside the considered area
keep_indices = np.logical_and.reduce(((boxes[:,0]>=0), (boxes[:,1]>=0),(boxes[:,2]<=max_x), (boxes[:,3]<=max_y)))
boxes = boxes[keep_indices,:]
boxes_this_image = np.hstack((np.zeros((boxes.shape[0], 1)), boxes))
return boxes_this_image
def net_forward(model, weights, im_blob, text_blob, boxes_blob):
#LOAD NET
net = caffe.Net(model, weights, caffe.TEST)
# SET DATA
net.blobs['im_data'].reshape(*(im_blob.shape))
net.blobs['txt_data'].reshape(*(text_blob.shape))
net.blobs['boxes'].reshape(*(boxes_blob.shape))
# NET FORWARD
net.forward(im_data=im_blob.astype(np.float32, copy=False),txt_data=text_blob.astype(np.float32, copy=False),
boxes=boxes_blob.astype(np.float32, copy=False))
return net
def show(net, position_maps):
# colors for particular classes
colors = ['r','g','b']
# get image
image = net.blobs['im_data'].data
image = np.squeeze(image[0,:,:,:])
image = image/255.0
image = np.transpose(image, (1,2,0))
image = image[:,:,(2,1,0)]
plt.imshow(image)
# get predictions with boxes
predicted = net.blobs['prob'].data[:,0:4,0,0]
boxes = net.blobs['boxes'].data[:,1:5].astype(int)
# get probabilities with position likelihoods
probs = get_probabilities_with_position(boxes, predicted, position_maps)
# compute maximum
box_class = np.argmax(probs,axis=1)
max_boxes = np.argmax(probs,axis=0)
# draw result
for cls in range(1,4):
ind = max_boxes[cls]
print probs[ind]
pred_box = boxes[ind,:]
rect = plt.Rectangle((pred_box[0], pred_box[1]), pred_box[2] - pred_box[0],
pred_box[3] - pred_box[1], fill=True, alpha=0.5,facecolor=colors[cls-1],
edgecolor=colors[cls-1], linewidth=3)
plt.gca().add_patch(rect)
plt.show()
if __name__ == "__main__":
#--- Get params
parser = argparse.ArgumentParser()
parser.add_argument('--url', type=str, help='URL to classify', required=True)
parser.add_argument('--model', type=str, default='../models/inference.prototxt', help='Model definition in prototxt')
parser.add_argument('--weights', type=str, default='../models/weights/snapshot_split_1_10000.caffemodel', help='Initialize with pretrained model weights')
parser.add_argument('--position_maps_dir', type=str, default='../models/likelihoods/', help='Number of iterations for training')
args = parser.parse_args()
#-- Load params
url = args.url
model = args.model
weights = args.weights
position_map_path = args.position_maps_dir
# DOWNLOAD PAGE
try:
download_dir = download_page(url)
except subprocess.CalledProcessError:
print "Download was not succesfull"
sys.exit(1)
screenshot_path = os.path.join(download_dir,"screenshot.jpeg")
dom_path = os.path.join(download_dir,"dom.json")
# LOAD POSITION LIKELIHOODS
position_maps = load_position_maps(position_map_path)
# LOAD IMAGE BLOB
im_blob = load_image_blob(screenshot_path)
# LOAD TEXT BLOB AND BOXES BLOB
dom = DOMTree(dom_path)
leaf_nodes = dom.getPositionedLeafNodes()
text_blob = load_text_blob(leaf_nodes)
boxes_blob = load_boxes_blob(leaf_nodes,im_blob.shape[3],im_blob.shape[2])
# NET FORWARD
net = net_forward(model, weights, im_blob, text_blob, boxes_blob)
show(net, position_maps)
|
py | 7dffb127581cf1f436c7c3e90aa366ac90265136 | import argparse
import os
from typing import List, Tuple
import torch
from comet_ml import Experiment
from easydict import EasyDict as edict
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from src.constants import SAVED_META_INFO_PATH, SAVED_MODELS_BASE_PATH
from src.data_loader.data_set import Data_Set
from src.experiments.evaluation_utils import evaluate
from src.models.callbacks.upload_comet_logs import UploadCometLogs
from src.models.semisupervised.denoised_heatmap_head_model import DenoisedHeatmapHead
from src.models.semisupervised.denoised_supervised_head_model import (
DenoisedSupervisedHead,
)
from src.models.semisupervised.heatmap_head_model import HeatmapHead
from src.models.semisupervised.supervised_head_model import SupervisedHead
from src.models.supervised.baseline_model import BaselineModel
from src.models.supervised.denoised_baseline import DenoisedBaselineModel
from src.models.supervised.denoised_heatmap_model import DenoisedHeatmapmodel
from src.models.supervised.heatmap_model import HeatmapPoseModel
from src.models.unsupervised.pairwise_heatmap_model import PairwiseHeatmapModel
from src.models.unsupervised.pairwise_model import PairwiseModel
from src.models.unsupervised.simclr_heatmap_model import SimCLRHeatmap
from src.models.unsupervised.simclr_model import SimCLR
from src.models.utils import get_latest_checkpoint
from src.models.callbacks.model_checkpoint import UpdatedModelCheckpoint
from src.utils import get_console_logger
from torch.utils.data import WeightedRandomSampler
from torch.utils.data.dataset import ConcatDataset
def get_general_args(
description: str = "Script for training baseline supervised model",
) -> argparse.Namespace:
"""Function to parse the arguments given as input to a general experiment.
only parses augmentation flag and data parameters like training ratio, num_workers,
batchsize, epochs.
Returns:
argparse.Namespace: Parsed arguments as namespace.
"""
parser = argparse.ArgumentParser(description=description)
# Augmentation flags
parser.add_argument(
"--color_drop", action="store_true", help="To enable random color drop"
)
parser.add_argument(
"--color_jitter", action="store_true", help="To enable random jitter"
)
parser.add_argument("--crop", action="store_true", help="To enable cropping")
parser.add_argument(
"--cut_out", action="store_true", help="To enable random cur out"
)
parser.add_argument("--flip", action="store_true", help="To enable random flipping")
parser.add_argument(
"--gaussian_blur", action="store_true", help="To enable gaussina blur"
)
parser.add_argument(
"--rotate", action="store_true", help="To rotate samples randomly"
)
parser.add_argument(
"--random_crop", action="store_true", help="To enable random cropping"
)
parser.add_argument("--resize", action="store_true", help="To enable resizing")
parser.add_argument(
"--sobel_filter", action="store_true", help="To enable sobel filtering"
)
parser.add_argument(
"--gaussian_noise", action="store_true", help="To add gaussian noise."
)
parser.add_argument("-tag", action="append", help="Tag for comet", default=[])
# Training and data loader params.
parser.add_argument("-batch_size", type=int, help="Batch size")
parser.add_argument("-epochs", type=int, help="Number of epochs")
parser.add_argument("-seed", type=int, help="To add seed")
parser.add_argument(
"-num_workers", type=int, help="Number of workers for Dataloader."
)
parser.add_argument(
"-train_ratio", type=float, help="Ratio of train:validation split."
)
parser.add_argument(
"-accumulate_grad_batches",
type=int,
help="Number of batches to accumulate gradient.",
)
parser.add_argument("-lr", type=float, help="learning rate", default=None)
parser.add_argument(
"-optimizer",
type=str,
help="Select optimizer",
default=None,
choices=["LARS", "adam"],
)
parser.add_argument(
"--denoiser", action="store_true", help="To enable denoising", default=False
)
parser.add_argument(
"--heatmap", action="store_true", help="To enable heatmap model", default=False
)
parser.add_argument(
"-sources",
action="append",
help="Data sources to use.",
default=[],
choices=["freihand", "interhand", "mpii", "youtube"],
)
parser.add_argument(
"-log_interval",
type=str,
help="To enable denoising",
default="epoch",
choices=["step", "epoch"],
)
parser.add_argument(
"-experiment_key",
type=str,
help="Experiment key of pretrained encoder",
default=None,
)
parser.add_argument(
"-checkpoint", type=str, help="checkpoint name to restore.", default=""
)
parser.add_argument(
"-meta_file",
type=str,
help="File to save the name of the experiment.",
default=None,
)
parser.add_argument(
"-experiment_name", type=str, help="experiment name for logging", default=""
)
parser.add_argument(
"-save_period",
type=int,
help="interval at which experiments should be saved",
default=1,
)
parser.add_argument(
"-save_top_k", type=int, help="Top snapshots to save", default=3
)
parser.add_argument(
"--encoder_trainable",
action="store_true",
help="To enable encoder training in SSL",
default=False,
)
parser.add_argument(
"-resnet_size",
type=str,
help="Resnet size",
default="18",
choices=["18", "34", "50", "101", "152"],
)
parser.add_argument(
"-lr_max_epochs", type=int, help="Top snapshots to save", default=None
)
parser.add_argument("-warmup_epochs", type=int, help="Warmup epochs", default=10)
parser.add_argument(
"--use_palm",
action="store_true",
help="To regress plam instead of wrist.",
default=False,
)
args = parser.parse_args()
return args
def get_hybrid1_args(
description: str = "Script for training hybrid1 model",
) -> argparse.Namespace:
"""Function to parse the arguments given as input to a hybrid1 experiment.
Returns:
argparse.Namespace: Parsed arguments as namespace.
"""
parser = argparse.ArgumentParser(description=description)
# Augmentation flags
parser.add_argument(
"-contrastive",
action="append",
help="Add augmentations for contrastive sample.",
choices=["rotate", "crop", "color_jitter"],
default=[],
)
parser.add_argument(
"-pairwise",
action="append",
help="Add augmentations for pairwise sample.",
choices=["rotate", "crop", "color_jitter"],
default=[],
)
parser.add_argument("-batch_size", type=int, help="Batch size")
parser.add_argument("-tag", action="append", help="Tag for comet", default=[])
parser.add_argument("-epochs", type=int, help="Number of epochs")
parser.add_argument("-seed", type=int, help="To add seed")
parser.add_argument(
"-num_workers", type=int, help="Number of workers for Dataloader."
)
parser.add_argument(
"-train_ratio", type=float, help="Ratio of train:validation split."
)
parser.add_argument(
"-accumulate_grad_batches",
type=int,
help="Number of batches to accumulate gradient.",
)
parser.add_argument(
"-optimizer",
type=str,
help="Select optimizer",
default=None,
choices=["LARS", "adam"],
)
parser.add_argument("-lr", type=float, help="learning rate", default=None)
parser.add_argument(
"--denoiser", action="store_true", help="To enable denoising", default=False
)
parser.add_argument(
"--heatmap", action="store_true", help="To enable heatmap model", default=False
)
parser.add_argument(
"-sources",
action="append",
help="Data sources to use.",
default=[],
choices=["freihand", "interhand", "mpii", "youtube"],
)
parser.add_argument(
"-log_interval",
type=str,
help="To enable denoising",
default="epoch",
choices=["step", "epoch"],
)
parser.add_argument(
"-meta_file",
type=str,
help="File to save the name of the experiment.",
default=None,
)
parser.add_argument(
"-save_period",
type=int,
help="interval at which experiments should be saved",
default=1,
)
parser.add_argument(
"-save_top_k", type=int, help="Top snapshots to save", default=3
)
args = parser.parse_args()
return args
def update_hybrid1_train_args(args: argparse.Namespace, train_param: edict) -> edict:
if args.pairwise is not None:
for item in args.pairwise:
train_param.pairwise.augmentation_flags[item] = True
if args.contrastive is not None:
for item in args.contrastive:
train_param.contrastive.augmentation_flags[item] = True
if args.train_ratio is not None:
train_param.train_ratio = (args.train_ratio * 100 % 100) / 100.0
if args.train_ratio is not None:
train_param.train_ratio = (args.train_ratio * 100 % 100) / 100.0
if args.accumulate_grad_batches is not None:
train_param.accumulate_grad_batches = args.accumulate_grad_batches
train_param.update(
update_param(
args,
train_param,
["batch_size", "epochs", "train_ratio", "num_workers", "seed"],
)
)
return train_param
def update_train_params(args: argparse.Namespace, train_param: edict) -> edict:
"""Updates and returns the training hyper paramters as per args
Args:
args (argparse.Namespace): Arguments from get_experiement_args().
train_param (edict): Default training parameter.
Returns:
edict: Updated training parameters.
"""
if args.train_ratio is not None:
train_param.train_ratio = (args.train_ratio * 100 % 100) / 100.0
train_param.update(
update_param(
args,
train_param,
["batch_size", "epochs", "train_ratio", "num_workers", "seed", "use_palm"],
)
)
train_param.augmentation_flags = update_param(
args,
train_param.augmentation_flags,
[
"color_drop",
"color_jitter",
"crop",
"cut_out",
"flip",
"gaussian_blur",
"random_crop",
"resize",
"rotate",
"sobel_filter",
"gaussian_noise",
],
)
if args.accumulate_grad_batches is not None:
train_param.accumulate_grad_batches = args.accumulate_grad_batches
return train_param
def update_param(args: argparse.Namespace, config: edict, params: List[str]) -> edict:
"""Update the config according to the argument.
Args:
args (edict): script arguments
config (edict): configuration as read from json
params (List[str]): Name of paramters that must be edited.
Returns:
edict: Updated config.
"""
args_dict = vars(args)
for param in params:
if args_dict[param] is not None:
config[param] = args_dict[param]
return config
def prepare_name(prefix: str, train_param: edict, hybrid_naming: bool = False) -> str:
"""Encodes the train paramters into string for appropraite naming of experiment.
Args:
prefix (str): prefix to attach to the name example sup , simclr, ssl etc.
train_param (edict): train params used for the experiment.
Returns:
str: name of the experiment.
"""
codes = {
"color_drop": "CD",
"color_jitter": "CJ",
"crop": "C",
"cut_out": "CO",
"flip": "F",
"gaussian_blur": "GB",
"random_crop": "RC",
"resize": "Re",
"rotate": "Ro",
"sobel_filter": "SF",
"gaussian_noise": "GN",
}
if hybrid_naming:
pairwise_augmentations = "_".join(
sorted(
[
codes[key]
for key, value in train_param.pairwise.augmentation_flags.items()
if value
]
)
)
contrastive_augmentations = "_".join(
sorted(
[
codes[key]
for key, value in train_param.contrastive.augmentation_flags.items()
if value
]
)
)
return (
f"{prefix}{train_param.batch_size}_rel_{pairwise_augmentations}"
f"_con_{contrastive_augmentations}"
)
else:
augmentations = "_".join(
sorted(
[
codes[key]
for key, value in train_param.augmentation_flags.items()
if value
]
)
)
return f"{prefix}{train_param.batch_size}{augmentations}"
def save_experiment_key(
experiment_name: str, experiment_key: str, filename="default.csv"
):
"""Writes the experiemtn name and key in a file for quick reference to use the
saved models.
Args:
experiment_name (str]): Name of the experiment. from prepare_name()
experiment_key (str): comet generated experiment key.
filename (str, optional): Name of the file where the info should be appended.
Defaults to "default.csv".
"""
with open(os.path.join(SAVED_META_INFO_PATH, filename), "a") as f:
f.write(f"{experiment_name},{experiment_key}\n")
def get_nips_a1_args():
parser = argparse.ArgumentParser(
description="Experiment NIPS A1: SIMCLR ablative studies"
)
parser.add_argument(
"augmentation", type=str, default=None, help="Select augmentation to apply"
)
args = parser.parse_args()
return args
def get_nips_a2_args():
parser = argparse.ArgumentParser(
description="Experiment NIPS A2: Pairwise ablative studies"
)
parser.add_argument(
"augmentation", type=str, default=None, help="Select augmentation to apply"
)
args = parser.parse_args()
return args
def get_downstream_args():
parser = argparse.ArgumentParser(description="Downstream training experiment")
parser.add_argument("experiment_key", type=str, default=None, help="Experiment key")
parser.add_argument(
"experiment_name",
type=str,
default=None,
help="Name of the pretrained experiment",
)
parser.add_argument(
"experiment_type",
type=str,
default=None,
help="Type of experiment for tagging.",
)
parser.add_argument(
"--denoiser", action="store_true", help="To enable denoising", default=False
)
parser.add_argument(
"-num_of_checkpoints",
type=int,
help="Numberof checkpoints to fine tune",
default=-1,
)
args = parser.parse_args()
args = parser.parse_args()
return args
def downstream_evaluation(
model,
data: Data_Set,
num_workers: int,
batch_size: int,
logger: Experiment,
max_crop_jitter: float = 0.0,
max_rotate_angle: float = 0.0,
min_rotate_angle: float = 0.0,
seed: int = 5,
) -> Tuple[dict, dict]:
"""Returns train and validate results respectively.
Args:
model ([type]): [description]
data (Data_Set): [description]
num_workers (int): [description]
batch_size (int): [description]
logger (Experiment):
Returns:
Tuple[dict, dict]: [description]
"""
torch.manual_seed(seed)
model.eval()
if isinstance(data, ConcatDataset) and len(data.datasets) > 1:
val_weights = []
val_datasets = []
for i in range(len(data.datasets)):
val_dataset = data.datasets[i]
val_dataset.config.augmentation_params.max_angle = max_rotate_angle
val_dataset.config.augmentation_params.min_angle = min_rotate_angle
val_dataset.config.augmentation_flags.random_crop = False
val_dataset.config.augmentation_params.crop_box_jitter = [
0.0,
max_crop_jitter,
]
val_dataset.augmenter = val_dataset.get_sample_augmenter(
val_dataset.config.augmentation_params,
val_dataset.config.augmentation_flags,
)
val_datasets.append(val_dataset)
val_datasets[-1].is_training(False)
val_weights += [1.0 / len(val_datasets[-1])] * len(val_datasets[-1])
data = ConcatDataset(val_datasets)
validate_results = evaluate(
model,
data,
num_workers=num_workers,
batch_size=batch_size,
sampler=WeightedRandomSampler(
weights=val_weights, num_samples=len(val_weights), replacement=True
),
)
else:
data = data.datasets[0] if isinstance(data, ConcatDataset) else data
data.is_training(False)
data.config.augmentation_params.max_angle = max_rotate_angle
data.config.augmentation_params.min_angle = min_rotate_angle
data.config.augmentation_params.crop_box_jitter = [0.0, max_crop_jitter]
data.config.augmentation_flags.random_crop = False
data.augmenter = data.get_sample_augmenter(
data.config.augmentation_params, data.config.augmentation_flags
)
validate_results = evaluate(
model, data, num_workers=num_workers, batch_size=batch_size
)
with logger.experiment.validate():
logger.experiment.log_metrics(validate_results)
# return validate_results
def restore_model(model, experiment_key: str, checkpoint: str = ""):
"""Restores the experiment with the most recent checkpoint.
Args:
experiment_key (str): experiment key
"""
saved_state_dict = torch.load(get_latest_checkpoint(experiment_key, checkpoint))[
"state_dict"
]
print(f"REstroing {get_latest_checkpoint(experiment_key, checkpoint)}")
model.load_state_dict(saved_state_dict)
return model
def get_checkpoints(experiment_key: str, number: int = 3) -> List[str]:
"""Returns last 'n' checkpoints.
Args:
experiment_key (str): [description]
number (int, optional): [description]. Defaults to 3.
Returns:
List[str]: Name of last n checkpoints.
"""
return sorted(
os.listdir(os.path.join(SAVED_MODELS_BASE_PATH, experiment_key, "checkpoints"))
)[::-1][:number]
def get_model(experiment_type: str, heatmap_flag: bool, denoiser_flag: bool):
if experiment_type == "supervised":
if heatmap_flag and not denoiser_flag:
return HeatmapPoseModel
elif heatmap_flag and denoiser_flag:
return DenoisedHeatmapmodel
elif denoiser_flag:
return DenoisedBaselineModel
else:
return BaselineModel
elif experiment_type == "simclr":
if heatmap_flag:
return SimCLRHeatmap
else:
return SimCLR
elif experiment_type == "pairwise":
if heatmap_flag:
return PairwiseHeatmapModel
else:
return PairwiseModel
elif experiment_type == "semisupervised":
if heatmap_flag and not denoiser_flag:
return HeatmapHead
elif heatmap_flag and denoiser_flag:
return DenoisedHeatmapHead
elif denoiser_flag:
return DenoisedSupervisedHead
else:
return SupervisedHead
def get_callbacks(
logging_interval: str,
experiment_type: str,
save_top_k: int = 1,
period: int = 1,
monitor: str = "checkpoint_saving_loss",
):
upload_comet_logs = UploadCometLogs(
logging_interval, get_console_logger("callback"), experiment_type
)
lr_monitor = LearningRateMonitor(logging_interval=logging_interval)
# saving the best model as per the validation loss.
checkpoint_callback = UpdatedModelCheckpoint(
save_top_k=save_top_k, period=period, monitor=monitor
)
return {
"callbacks": [lr_monitor, upload_comet_logs],
"checkpoint_callback": checkpoint_callback,
}
def update_model_params(model_param: edict, args, data_length: int, train_param: edict):
model_param = update_param(
args,
model_param,
["optimizer", "lr", "resnet_size", "lr_max_epochs", "warmup_epochs"],
)
model_param.num_samples = data_length
model_param.batch_size = train_param.batch_size
model_param.num_of_mini_batch = train_param.accumulate_grad_batches
return model_param
|
py | 7dffb26e5b8a0017c7a01f26b1fafbc086045b87 | """Sphinx documentation """
import os
from os.path import abspath, dirname
import sys
SETUP_PATH = abspath(dirname(dirname(__file__)))
sys.path.insert(0, SETUP_PATH)
from setup import PACKAGE_INFO # noqa: E402
SPHINX_INFO = PACKAGE_INFO["command_options"]["build_sphinx"]
if os.environ.get("READTHEDOCS"):
# Prepare environment for ReadTheDocs
from subprocess import Popen # nosec
current_dir = os.getcwd()
os.chdir(SETUP_PATH)
try:
Popen( # nosec
(sys.executable, "-m", "pip", "install", "-e", ".[all]")
).communicate()
finally:
os.chdir(current_dir)
project = SPHINX_INFO["project"][1]
copyright = SPHINX_INFO["copyright"][1]
author = PACKAGE_INFO["author"]
version = SPHINX_INFO["version"][1]
release = SPHINX_INFO["release"][1]
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.coverage",
"sphinx.ext.viewcode",
]
source_suffix = ".rst"
master_doc = "index"
language = "en"
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
pygments_style = "default"
html_theme = "sphinx_rtd_theme"
htmlhelp_basename = f"{project}doc"
latex_elements = {} # type: ignore
latex_documents = [
(master_doc, f"{project}.tex", f"{project} Documentation", author, "manual")
]
man_pages = [
(master_doc, PACKAGE_INFO["name"], f"{project} Documentation", [author], 1)
]
texinfo_documents = [
(
master_doc,
project,
f"{project} Documentation",
author,
project,
PACKAGE_INFO["description"],
"Miscellaneous",
)
]
|
py | 7dffb2c8aad8c3dbc3891fa87427ddb18097160c | from __future__ import print_function
import numpy
import sys
def main():
w0,w1,w2,alpha=.1,.1,.1,.1
twodarr=[[]]
fl=open("slpand.txt","r")
num_lines = sum(1 for line in open("slpand.txt","r"))
print("-------initial Parameters------\n")
print("W0= %f \t W1= %f \t W2= %f \t Alpha= %f \n" %(w0,w1,w2,alpha))
print("")
count=1
line=fl.readline()#skiped first input line
trnsample=num_lines-1
#print(trnsample)
h = numpy.empty(trnsample,float)
while count<=num_lines:
line=fl.readline()
inputline=line.strip()
mylist=inputline.split(",") #access from 2nd line
count1=count-1
twodarr.insert(count1,mylist)
# print()
count=count+1
fl.close()
MAXITR=10 #Max iterations
trnstep=1
while trnstep<=MAXITR:
print("******* %dth Training Step(iteration) *******\n"%trnstep)
sample=1
while sample<= trnsample: #Hypothesis and Weight values calculation
h[sample-1]=(w0*float(twodarr[sample-1][0]))+(w1*float(twodarr[sample-1][1]))+(w2*float(twodarr[sample-1][2]))
if h[sample-1]>=0:#apply Activation Function
h[sample-1]=1
else:
h[sample-1]=0
w0=w0+alpha*(float(twodarr[sample-1][3])-h[sample-1])*float(twodarr[sample-1][0])
w1=w1+alpha*(float(twodarr[sample-1][3])-h[sample-1])*float(twodarr[sample-1][1])
w2=w2+alpha*(float(twodarr[sample-1][3])-h[sample-1])*float(twodarr[sample-1][2])
print("For %dth training sample:"%sample)
print("h(x) is %f "%(h[sample-1]))
print("W0=%f W1=%f W2=%f"%(w0,w1,w2))
sample=sample+1
print()
#Error Function calculation
errsum=0
for i in range(1,trnsample+1,1):
errsum=errsum+pow((float(twodarr[i-1][3])-h[i-1]),2)
err_funct=0.5*errsum
print("Total Error = %f\n"%(err_funct))
if err_funct<0.5:
break
trnstep=trnstep+1
print("----------------------------------------------------------")
print("\nSINGLE LAYER PERCEPTON curve of form (Y=w0+w1x1+w2x2) using increamental gradient descent is:\n")
print("Y=(%f)+(%fx1)+(%fx2)"%(w0,w1,w2))
if __name__ == "__main__":
main() |
py | 7dffb32adfcfb4c2d745b8f3377513aa36283e36 | from enum import Enum
class Rotation(Enum):
NORTH = 0
SOUTH = 1
EAST = 2
WEST = 3 |
py | 7dffb3f50bb3751b8712c64d4f64e6b79904ef2c | """
Wrapper class on an mpv process
"""
import sys
import subprocess
import configuration.application_settings as app_settings
from backends.backend_process import BackendProcess
class MpvProcess(BackendProcess):
"""
A wrapper class for a mpv process
"""
def __execute(self, command):
"""
Executes the given command on the mpv process
"""
raise NotImplementedError
|
py | 7dffb42929e17147bf402cfd4886076819802ace | import cherrypy
from cherrypy.test import helper
from cherrypy._cpcompat import json
json_out = cherrypy.config(**{'tools.json_out.on': True})
json_in = cherrypy.config(**{'tools.json_in.on': True})
class JsonTest(helper.CPWebCase):
@staticmethod
def setup_server():
class Root(object):
@cherrypy.expose
def plain(self):
return 'hello'
@cherrypy.expose
@json_out
def json_string(self):
return 'hello'
@cherrypy.expose
@json_out
def json_list(self):
return ['a', 'b', 42]
@cherrypy.expose
@json_out
def json_dict(self):
return {'answer': 42}
@cherrypy.expose
@json_in
def json_post(self):
if cherrypy.request.json == [13, 'c']:
return 'ok'
else:
return 'nok'
@cherrypy.expose
@json_out
@cherrypy.config(**{'tools.caching.on': True})
def json_cached(self):
return 'hello there'
root = Root()
cherrypy.tree.mount(root)
def test_json_output(self):
if json is None:
self.skip("json not found ")
return
self.getPage("/plain")
self.assertBody("hello")
self.getPage("/json_string")
self.assertBody('"hello"')
self.getPage("/json_list")
self.assertBody('["a", "b", 42]')
self.getPage("/json_dict")
self.assertBody('{"answer": 42}')
def test_json_input(self):
if json is None:
self.skip("json not found ")
return
body = '[13, "c"]'
headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(body)))]
self.getPage("/json_post", method="POST", headers=headers, body=body)
self.assertBody('ok')
body = '[13, "c"]'
headers = [('Content-Type', 'text/plain'),
('Content-Length', str(len(body)))]
self.getPage("/json_post", method="POST", headers=headers, body=body)
self.assertStatus(415, 'Expected an application/json content type')
body = '[13, -]'
headers = [('Content-Type', 'application/json'),
('Content-Length', str(len(body)))]
self.getPage("/json_post", method="POST", headers=headers, body=body)
self.assertStatus(400, 'Invalid JSON document')
def test_cached(self):
if json is None:
self.skip("json not found ")
return
self.getPage("/json_cached")
self.assertStatus(200, '"hello"')
self.getPage("/json_cached") # 2'nd time to hit cache
self.assertStatus(200, '"hello"')
|
py | 7dffb4f7ab31beabb97de92ab811ee699d38b943 | from collections import ChainMap
import os, argparse
# 构造缺省参数:
defaults = {
'color': 'red',
'user': 'guest'
}
# 构造命令行参数:
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--user')
parser.add_argument('-c', '--color')
namespace = parser.parse_args()
command_line_args = { k: v for k, v in vars(namespace).items() if v }
# 组合成ChainMap:
combined = ChainMap(command_line_args, os.environ, defaults)
# 打印参数:
print('color=%s' % combined['color'])
print('user=%s' % combined['user']) |
py | 7dffb57cf316a9ae29f3e6025bde6480df123485 | """Unit test the backend interface."""
# fmt: off
# local imports
from mqclient import backend_interface
def test_RawQueue() -> None:
"""Test RawQueue."""
assert hasattr(backend_interface, "RawQueue")
def test_Message() -> None:
"""Test Message."""
m = backend_interface.Message('foo', b'abc')
assert m.msg_id == 'foo'
assert m.payload == b'abc'
|
py | 7dffb585f98f1b15cda86a36f82aed765431ca27 | #!/usr/bin/env python3
import argparse
import subprocess
import os
import json
import math
from jinja2 import Environment, FileSystemLoader, select_autoescape
DOCKER_COMPOSE_TEMPLATE = "docker-compose-local.yml.j2"
DOCKER_COMPOSE_OUTPUT_FILE = "docker-compose-local.generated.yml"
def spawn_local_game(game_config_path, secrets_path):
if not os.path.exists(game_config_path):
print("The specified game_config path does not exist")
return
if not os.path.exists(secrets_path):
print("The specified path to the secret folder does not exist")
return
with open(game_config_path, 'r') as f:
game_config = json.load(f)
with open(os.path.join(secrets_path, 'database-api', 'secret'), 'r') as f:
api_secret = f.read().strip()
num_scriptbots = int(math.ceil(len(game_config["teams"]) / 10.0))
num_teams = len(game_config['teams'])
active_services = [service['name'] for service in game_config['services'] if service['state'] == 'enabled']
jinja_env = Environment(
loader=FileSystemLoader(searchpath='./'),
autoescape=select_autoescape(['yml'])
)
teamvm_entrypoints = []
for team_id in range(1, len(game_config['teams'] ) + 1):
teamvm_entry = []
for service_id, service_name in enumerate(active_services):
exposed_port = 10000 + service_id + 1
portforward_cmd = "socat TCP-LISTEN:{},fork TCP:chall_{}_{}:6666".format(exposed_port, service_name, team_id)
teamvm_entry.append(portforward_cmd)
final_teamvm_entry = " & ".join(teamvm_entry)
teamvm_entrypoints.append(final_teamvm_entry)
template = jinja_env.get_template(DOCKER_COMPOSE_TEMPLATE)
with open(DOCKER_COMPOSE_OUTPUT_FILE, 'w') as out_f:
out_f.write(template.render(
num_scriptbots=num_scriptbots,
num_teams=num_teams,
api_secret=api_secret,
# services_ids=range(1, len(active_services) + 1)
service_names=active_services,
teamvm_entrypoints=teamvm_entrypoints
))
print('''
Configuration for docker-compose successfully generated in ./{0}
Spawn the infrastructure locally with the following command:
- docker-compose -f {0} up
Destroy the infrastructure locally with the following command:
- docker-compose -f {0} down -v --remove-orphans
To start the game: http://localhost:5000/game/insert?secret={1}
'''.format(DOCKER_COMPOSE_OUTPUT_FILE, api_secret))
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('game_config', type=str, help="Path to game_config.json file")
argparser.add_argument('secrets_path', type=str, help="Path to the folder generated by make_secret.sh")
args = argparser.parse_args()
spawn_local_game(args.game_config, args.secrets_path)
|
py | 7dffb5d204812cc1647325acb8696e58e00c5962 | #!BPY
"""
Name: 'Wavefront (.obj)...'
Blender: 249
Group: 'Export'
Tooltip: 'Save a Wavefront OBJ File'
"""
__author__ = "Campbell Barton, Jiri Hnidek, Paolo Ciccone"
__url__ = ['http://wiki.blender.org/index.php/Scripts/Manual/Export/wavefront_obj', 'www.blender.org', 'blenderartists.org']
__version__ = "1.22"
__bpydoc__ = """\
This script is an exporter to OBJ file format.
Usage:
Select the objects you wish to export and run this script from "File->Export" menu.
Selecting the default options from the popup box will be good in most cases.
All objects that can be represented as a mesh (mesh, curve, metaball, surface, text3d)
will be exported as mesh data.
"""
# ***** BEGIN GPL LICENSE BLOCK *****
#
# Script copyright (C) Campbell J Barton 2007-2009
# - V1.22- bspline import/export added (funded by PolyDimensions GmbH)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
# --------------------------------------------------------------------------
import Blender
from Blender import Mesh, Scene, Window, sys, Image, Draw
import BPyMesh
import BPyObject
import BPySys
import BPyMessages
# Returns a tuple - path,extension.
# 'hello.obj' > ('hello', '.obj')
def splitExt(path):
dotidx = path.rfind('.')
if dotidx == -1:
return path, ''
else:
return path[:dotidx], path[dotidx:]
def fixName(name):
if name == None:
return 'None'
else:
return name.replace(' ', '_')
# A Dict of Materials
# (material.name, image.name):matname_imagename # matname_imagename has gaps removed.
MTL_DICT = {}
def write_mtl(filename):
world = Blender.World.GetCurrent()
if world:
worldAmb = world.getAmb()
else:
worldAmb = (0,0,0) # Default value
file = open(filename, "w")
file.write('# Blender3D MTL File: %s\n' % Blender.Get('filename').split('\\')[-1].split('/')[-1])
file.write('# Material Count: %i\n' % len(MTL_DICT))
# Write material/image combinations we have used.
for key, (mtl_mat_name, mat, img) in MTL_DICT.iteritems():
# Get the Blender data for the material and the image.
# Having an image named None will make a bug, dont do it :)
file.write('newmtl %s\n' % mtl_mat_name) # Define a new material: matname_imgname
if mat:
file.write('Ns %.6f\n' % ((mat.getHardness()-1) * 1.9607843137254901) ) # Hardness, convert blenders 1-511 to MTL's
file.write('Ka %.6f %.6f %.6f\n' % tuple([c*mat.amb for c in worldAmb]) ) # Ambient, uses mirror colour,
file.write('Kd %.6f %.6f %.6f\n' % tuple([c*mat.ref for c in mat.rgbCol]) ) # Diffuse
file.write('Ks %.6f %.6f %.6f\n' % tuple([c*mat.spec for c in mat.specCol]) ) # Specular
file.write('Ni %.6f\n' % mat.IOR) # Refraction index
file.write('d %.6f\n' % mat.alpha) # Alpha (obj uses 'd' for dissolve)
# 0 to disable lighting, 1 for ambient & diffuse only (specular color set to black), 2 for full lighting.
if mat.getMode() & Blender.Material.Modes['SHADELESS']:
file.write('illum 0\n') # ignore lighting
elif mat.getSpec() == 0:
file.write('illum 1\n') # no specular.
else:
file.write('illum 2\n') # light normaly
else:
#write a dummy material here?
file.write('Ns 0\n')
file.write('Ka %.6f %.6f %.6f\n' % tuple([c for c in worldAmb]) ) # Ambient, uses mirror colour,
file.write('Kd 0.8 0.8 0.8\n')
file.write('Ks 0.8 0.8 0.8\n')
file.write('d 1\n') # No alpha
file.write('illum 2\n') # light normaly
# Write images!
if img: # We have an image on the face!
file.write('map_Kd %s\n' % img.filename.split('\\')[-1].split('/')[-1]) # Diffuse mapping image
elif mat: # No face image. if we havea material search for MTex image.
for mtex in mat.getTextures():
if mtex and mtex.tex.type == Blender.Texture.Types.IMAGE:
try:
filename = mtex.tex.image.filename.split('\\')[-1].split('/')[-1]
file.write('map_Kd %s\n' % filename) # Diffuse mapping image
break
except:
# Texture has no image though its an image type, best ignore.
pass
file.write('\n\n')
file.close()
def copy_file(source, dest):
file = open(source, 'rb')
data = file.read()
file.close()
file = open(dest, 'wb')
file.write(data)
file.close()
def copy_images(dest_dir):
if dest_dir[-1] != sys.sep:
dest_dir += sys.sep
# Get unique image names
uniqueImages = {}
for matname, mat, image in MTL_DICT.itervalues(): # Only use image name
# Get Texface images
if image:
uniqueImages[image] = image # Should use sets here. wait until Python 2.4 is default.
# Get MTex images
if mat:
for mtex in mat.getTextures():
if mtex and mtex.tex.type == Blender.Texture.Types.IMAGE:
image_tex = mtex.tex.image
if image_tex:
try:
uniqueImages[image_tex] = image_tex
except:
pass
# Now copy images
copyCount = 0
for bImage in uniqueImages.itervalues():
image_path = sys.expandpath(bImage.filename)
if sys.exists(image_path):
# Make a name for the target path.
dest_image_path = dest_dir + image_path.split('\\')[-1].split('/')[-1]
if not sys.exists(dest_image_path): # Image isnt alredy there
print '\tCopying "%s" > "%s"' % (image_path, dest_image_path)
copy_file(image_path, dest_image_path)
copyCount+=1
print '\tCopied %d images' % copyCount
def test_nurbs_compat(ob):
if ob.type != 'Curve':
return False
for nu in ob.data:
if (not nu.knotsV) and nu.type != 1: # not a surface and not bezier
return True
return False
def write_nurb(file, ob, ob_mat):
tot_verts = 0
cu = ob.data
# use negative indices
Vector = Blender.Mathutils.Vector
for nu in cu:
if nu.type==0: DEG_ORDER_U = 1
else: DEG_ORDER_U = nu.orderU-1 # Tested to be correct
if nu.type==1:
print "\tWarning, bezier curve:", ob.name, "only poly and nurbs curves supported"
continue
if nu.knotsV:
print "\tWarning, surface:", ob.name, "only poly and nurbs curves supported"
continue
if len(nu) <= DEG_ORDER_U:
print "\tWarning, orderU is lower then vert count, skipping:", ob.name
continue
pt_num = 0
do_closed = (nu.flagU & 1)
do_endpoints = (do_closed==0) and (nu.flagU & 2)
for pt in nu:
pt = Vector(pt[0], pt[1], pt[2]) * ob_mat
file.write('v %.6f %.6f %.6f\n' % (pt[0], pt[1], pt[2]))
pt_num += 1
tot_verts += pt_num
file.write('g %s\n' % (fixName(ob.name))) # fixName(ob.getData(1)) could use the data name too
file.write('cstype bspline\n') # not ideal, hard coded
file.write('deg %d\n' % DEG_ORDER_U) # not used for curves but most files have it still
curve_ls = [-(i+1) for i in xrange(pt_num)]
# 'curv' keyword
if do_closed:
if DEG_ORDER_U == 1:
pt_num += 1
curve_ls.append(-1)
else:
pt_num += DEG_ORDER_U
curve_ls = curve_ls + curve_ls[0:DEG_ORDER_U]
file.write('curv 0.0 1.0 %s\n' % (' '.join( [str(i) for i in curve_ls] ))) # Blender has no U and V values for the curve
# 'parm' keyword
tot_parm = (DEG_ORDER_U + 1) + pt_num
tot_parm_div = float(tot_parm-1)
parm_ls = [(i/tot_parm_div) for i in xrange(tot_parm)]
if do_endpoints: # end points, force param
for i in xrange(DEG_ORDER_U+1):
parm_ls[i] = 0.0
parm_ls[-(1+i)] = 1.0
file.write('parm u %s\n' % ' '.join( [str(i) for i in parm_ls] ))
file.write('end\n')
return tot_verts
def write(filename, objects,\
EXPORT_TRI=False, EXPORT_EDGES=False, EXPORT_NORMALS=False, EXPORT_NORMALS_HQ=False,\
EXPORT_UV=True, EXPORT_MTL=True, EXPORT_COPY_IMAGES=False,\
EXPORT_APPLY_MODIFIERS=True, EXPORT_ROTX90=True, EXPORT_BLEN_OBS=True,\
EXPORT_GROUP_BY_OB=False, EXPORT_GROUP_BY_MAT=False, EXPORT_KEEP_VERT_ORDER=False,\
EXPORT_POLYGROUPS=False, EXPORT_CURVE_AS_NURBS=True):
'''
Basic write function. The context and options must be alredy set
This can be accessed externaly
eg.
write( 'c:\\test\\foobar.obj', Blender.Object.GetSelected() ) # Using default options.
'''
def veckey3d(v):
return round(v.x, 6), round(v.y, 6), round(v.z, 6)
def veckey2d(v):
return round(v.x, 6), round(v.y, 6)
def findVertexGroupName(face, vWeightMap):
"""
Searches the vertexDict to see what groups is assigned to a given face.
We use a frequency system in order to sort out the name because a given vetex can
belong to two or more groups at the same time. To find the right name for the face
we list all the possible vertex group names with their frequency and then sort by
frequency in descend order. The top element is the one shared by the highest number
of vertices is the face's group
"""
weightDict = {}
for vert in face:
vWeights = vWeightMap[vert.index]
for vGroupName, weight in vWeights:
weightDict[vGroupName] = weightDict.get(vGroupName, 0) + weight
if weightDict:
alist = [(weight,vGroupName) for vGroupName, weight in weightDict.iteritems()] # sort least to greatest amount of weight
alist.sort()
return(alist[-1][1]) # highest value last
else:
return '(null)'
print 'OBJ Export path: "%s"' % filename
temp_mesh_name = '~tmp-mesh'
time1 = sys.time()
scn = Scene.GetCurrent()
file = open(filename, "w")
# Write Header
file.write('# Blender3D v%s OBJ File: %s\n' % (Blender.Get('version'), Blender.Get('filename').split('/')[-1].split('\\')[-1] ))
file.write('# www.blender3d.org\n')
# Tell the obj file what material file to use.
if EXPORT_MTL:
mtlfilename = '%s.mtl' % '.'.join(filename.split('.')[:-1])
file.write('mtllib %s\n' % ( mtlfilename.split('\\')[-1].split('/')[-1] ))
# Get the container mesh. - used for applying modifiers and non mesh objects.
containerMesh = meshName = tempMesh = None
for meshName in Blender.NMesh.GetNames():
if meshName.startswith(temp_mesh_name):
tempMesh = Mesh.Get(meshName)
if not tempMesh.users:
containerMesh = tempMesh
if not containerMesh:
containerMesh = Mesh.New(temp_mesh_name)
if EXPORT_ROTX90:
mat_xrot90= Blender.Mathutils.RotationMatrix(-90, 4, 'x')
del meshName
del tempMesh
# Initialize totals, these are updated each object
totverts = totuvco = totno = 1
face_vert_index = 1
globalNormals = {}
# Get all meshes
for ob_main in objects:
for ob, ob_mat in BPyObject.getDerivedObjects(ob_main):
# Nurbs curve support
if EXPORT_CURVE_AS_NURBS and test_nurbs_compat(ob):
if EXPORT_ROTX90:
ob_mat = ob_mat * mat_xrot90
totverts += write_nurb(file, ob, ob_mat)
continue
# end nurbs
# Will work for non meshes now! :)
# getMeshFromObject(ob, container_mesh=None, apply_modifiers=True, vgroups=True, scn=None)
me= BPyMesh.getMeshFromObject(ob, containerMesh, EXPORT_APPLY_MODIFIERS, EXPORT_POLYGROUPS, scn)
if not me:
continue
if EXPORT_UV:
faceuv= me.faceUV
else:
faceuv = False
# We have a valid mesh
if EXPORT_TRI and me.faces:
# Add a dummy object to it.
has_quads = False
for f in me.faces:
if len(f) == 4:
has_quads = True
break
if has_quads:
oldmode = Mesh.Mode()
Mesh.Mode(Mesh.SelectModes['FACE'])
me.sel = True
tempob = scn.objects.new(me)
me.quadToTriangle(0) # more=0 shortest length
oldmode = Mesh.Mode(oldmode)
scn.objects.unlink(tempob)
Mesh.Mode(oldmode)
# Make our own list so it can be sorted to reduce context switching
faces = [ f for f in me.faces ]
if EXPORT_EDGES:
edges = me.edges
else:
edges = []
if not (len(faces)+len(edges)+len(me.verts)): # Make sure there is somthing to write
continue # dont bother with this mesh.
if EXPORT_ROTX90:
me.transform(ob_mat*mat_xrot90)
else:
me.transform(ob_mat)
# High Quality Normals
if EXPORT_NORMALS and faces:
if EXPORT_NORMALS_HQ:
BPyMesh.meshCalcNormals(me)
else:
# transforming normals is incorrect
# when the matrix is scaled,
# better to recalculate them
me.calcNormals()
# # Crash Blender
#materials = me.getMaterials(1) # 1 == will return None in the list.
materials = me.materials
materialNames = []
materialItems = materials[:]
if materials:
for mat in materials:
if mat: # !=None
materialNames.append(mat.name)
else:
materialNames.append(None)
# Cant use LC because some materials are None.
# materialNames = map(lambda mat: mat.name, materials) # Bug Blender, dosent account for null materials, still broken.
# Possible there null materials, will mess up indicies
# but at least it will export, wait until Blender gets fixed.
materialNames.extend((16-len(materialNames)) * [None])
materialItems.extend((16-len(materialItems)) * [None])
# Sort by Material, then images
# so we dont over context switch in the obj file.
if EXPORT_KEEP_VERT_ORDER:
pass
elif faceuv:
try: faces.sort(key = lambda a: (a.mat, a.image, a.smooth))
except: faces.sort(lambda a,b: cmp((a.mat, a.image, a.smooth), (b.mat, b.image, b.smooth)))
elif len(materials) > 1:
try: faces.sort(key = lambda a: (a.mat, a.smooth))
except: faces.sort(lambda a,b: cmp((a.mat, a.smooth), (b.mat, b.smooth)))
else:
# no materials
try: faces.sort(key = lambda a: a.smooth)
except: faces.sort(lambda a,b: cmp(a.smooth, b.smooth))
# Set the default mat to no material and no image.
contextMat = (0, 0) # Can never be this, so we will label a new material teh first chance we get.
contextSmooth = None # Will either be true or false, set bad to force initialization switch.
if EXPORT_BLEN_OBS or EXPORT_GROUP_BY_OB:
name1 = ob.name
name2 = ob.getData(1)
if name1 == name2:
obnamestring = fixName(name1)
else:
obnamestring = '%s_%s' % (fixName(name1), fixName(name2))
if EXPORT_BLEN_OBS:
file.write('o %s\n' % obnamestring) # Write Object name
else: # if EXPORT_GROUP_BY_OB:
file.write('g %s\n' % obnamestring)
# Vert
for v in me.verts:
file.write('v %.6f %.6f %.6f\n' % tuple(v.co))
# UV
if faceuv:
uv_face_mapping = [[0,0,0,0] for f in faces] # a bit of a waste for tri's :/
uv_dict = {} # could use a set() here
for f_index, f in enumerate(faces):
for uv_index, uv in enumerate(f.uv):
uvkey = veckey2d(uv)
try:
uv_face_mapping[f_index][uv_index] = uv_dict[uvkey]
except:
uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] = len(uv_dict)
file.write('vt %.6f %.6f\n' % tuple(uv))
uv_unique_count = len(uv_dict)
del uv, uvkey, uv_dict, f_index, uv_index
# Only need uv_unique_count and uv_face_mapping
# NORMAL, Smooth/Non smoothed.
if EXPORT_NORMALS:
for f in faces:
if f.smooth:
for v in f:
noKey = veckey3d(v.no)
if not globalNormals.has_key( noKey ):
globalNormals[noKey] = totno
totno +=1
file.write('vn %.6f %.6f %.6f\n' % noKey)
else:
# Hard, 1 normal from the face.
noKey = veckey3d(f.no)
if not globalNormals.has_key( noKey ):
globalNormals[noKey] = totno
totno +=1
file.write('vn %.6f %.6f %.6f\n' % noKey)
if not faceuv:
f_image = None
if EXPORT_POLYGROUPS:
# Retrieve the list of vertex groups
vertGroupNames = me.getVertGroupNames()
currentVGroup = ''
# Create a dictionary keyed by face id and listing, for each vertex, the vertex groups it belongs to
vgroupsMap = [[] for _i in xrange(len(me.verts))]
for vertexGroupName in vertGroupNames:
for vIdx, vWeight in me.getVertsFromGroup(vertexGroupName, 1):
vgroupsMap[vIdx].append((vertexGroupName, vWeight))
for f_index, f in enumerate(faces):
f_v= f.v
f_smooth= f.smooth
f_mat = min(f.mat, len(materialNames)-1)
if faceuv:
f_image = f.image
f_uv= f.uv
# MAKE KEY
if faceuv and f_image: # Object is always true.
key = materialNames[f_mat], f_image.name
else:
key = materialNames[f_mat], None # No image, use None instead.
# Write the vertex group
if EXPORT_POLYGROUPS:
if vertGroupNames:
# find what vertext group the face belongs to
theVGroup = findVertexGroupName(f,vgroupsMap)
if theVGroup != currentVGroup:
currentVGroup = theVGroup
file.write('g %s\n' % theVGroup)
# CHECK FOR CONTEXT SWITCH
if key == contextMat:
pass # Context alredy switched, dont do anything
else:
if key[0] == None and key[1] == None:
# Write a null material, since we know the context has changed.
if EXPORT_GROUP_BY_MAT:
file.write('g %s_%s\n' % (fixName(ob.name), fixName(ob.getData(1))) ) # can be mat_image or (null)
file.write('usemtl (null)\n') # mat, image
else:
mat_data= MTL_DICT.get(key)
if not mat_data:
# First add to global dict so we can export to mtl
# Then write mtl
# Make a new names from the mat and image name,
# converting any spaces to underscores with fixName.
# If none image dont bother adding it to the name
if key[1] == None:
mat_data = MTL_DICT[key] = ('%s'%fixName(key[0])), materialItems[f_mat], f_image
else:
mat_data = MTL_DICT[key] = ('%s_%s' % (fixName(key[0]), fixName(key[1]))), materialItems[f_mat], f_image
if EXPORT_GROUP_BY_MAT:
file.write('g %s_%s_%s\n' % (fixName(ob.name), fixName(ob.getData(1)), mat_data[0]) ) # can be mat_image or (null)
file.write('usemtl %s\n' % mat_data[0]) # can be mat_image or (null)
contextMat = key
if f_smooth != contextSmooth:
if f_smooth: # on now off
file.write('s 1\n')
contextSmooth = f_smooth
else: # was off now on
file.write('s off\n')
contextSmooth = f_smooth
file.write('f')
if faceuv:
if EXPORT_NORMALS:
if f_smooth: # Smoothed, use vertex normals
for vi, v in enumerate(f_v):
file.write( ' %d/%d/%d' % (\
v.index+totverts,\
totuvco + uv_face_mapping[f_index][vi],\
globalNormals[ veckey3d(v.no) ])) # vert, uv, normal
else: # No smoothing, face normals
no = globalNormals[ veckey3d(f.no) ]
for vi, v in enumerate(f_v):
file.write( ' %d/%d/%d' % (\
v.index+totverts,\
totuvco + uv_face_mapping[f_index][vi],\
no)) # vert, uv, normal
else: # No Normals
for vi, v in enumerate(f_v):
file.write( ' %d/%d' % (\
v.index+totverts,\
totuvco + uv_face_mapping[f_index][vi])) # vert, uv
face_vert_index += len(f_v)
else: # No UV's
if EXPORT_NORMALS:
if f_smooth: # Smoothed, use vertex normals
for v in f_v:
file.write( ' %d//%d' % (\
v.index+totverts,\
globalNormals[ veckey3d(v.no) ]))
else: # No smoothing, face normals
no = globalNormals[ veckey3d(f.no) ]
for v in f_v:
file.write( ' %d//%d' % (\
v.index+totverts,\
no))
else: # No Normals
for v in f_v:
file.write( ' %d' % (\
v.index+totverts))
file.write('\n')
# Write edges.
if EXPORT_EDGES:
LOOSE= Mesh.EdgeFlags.LOOSE
for ed in edges:
if ed.flag & LOOSE:
file.write('f %d %d\n' % (ed.v1.index+totverts, ed.v2.index+totverts))
# Make the indicies global rather then per mesh
totverts += len(me.verts)
if faceuv:
totuvco += uv_unique_count
me.verts= None
file.close()
# Now we have all our materials, save them
if EXPORT_MTL:
write_mtl(mtlfilename)
if EXPORT_COPY_IMAGES:
dest_dir = filename
# Remove chars until we are just the path.
while dest_dir and dest_dir[-1] not in '\\/':
dest_dir = dest_dir[:-1]
if dest_dir:
copy_images(dest_dir)
else:
print '\tError: "%s" could not be used as a base for an image path.' % filename
print "OBJ Export time: %.2f" % (sys.time() - time1)
def write_ui(filename):
if not filename.lower().endswith('.obj'):
filename += '.obj'
if not BPyMessages.Warning_SaveOver(filename):
return
global EXPORT_APPLY_MODIFIERS, EXPORT_ROTX90, EXPORT_TRI, EXPORT_EDGES,\
EXPORT_NORMALS, EXPORT_NORMALS_HQ, EXPORT_UV,\
EXPORT_MTL, EXPORT_SEL_ONLY, EXPORT_ALL_SCENES,\
EXPORT_ANIMATION, EXPORT_COPY_IMAGES, EXPORT_BLEN_OBS,\
EXPORT_GROUP_BY_OB, EXPORT_GROUP_BY_MAT, EXPORT_KEEP_VERT_ORDER,\
EXPORT_POLYGROUPS, EXPORT_CURVE_AS_NURBS
EXPORT_APPLY_MODIFIERS = Draw.Create(0)
EXPORT_ROTX90 = Draw.Create(1)
EXPORT_TRI = Draw.Create(0)
EXPORT_EDGES = Draw.Create(1)
EXPORT_NORMALS = Draw.Create(0)
EXPORT_NORMALS_HQ = Draw.Create(0)
EXPORT_UV = Draw.Create(1)
EXPORT_MTL = Draw.Create(1)
EXPORT_SEL_ONLY = Draw.Create(1)
EXPORT_ALL_SCENES = Draw.Create(0)
EXPORT_ANIMATION = Draw.Create(0)
EXPORT_COPY_IMAGES = Draw.Create(0)
EXPORT_BLEN_OBS = Draw.Create(0)
EXPORT_GROUP_BY_OB = Draw.Create(0)
EXPORT_GROUP_BY_MAT = Draw.Create(0)
EXPORT_KEEP_VERT_ORDER = Draw.Create(1)
EXPORT_POLYGROUPS = Draw.Create(0)
EXPORT_CURVE_AS_NURBS = Draw.Create(1)
# Old UI
'''
# removed too many options are bad!
# Get USER Options
pup_block = [\
('Context...'),\
('Selection Only', EXPORT_SEL_ONLY, 'Only export objects in visible selection. Else export whole scene.'),\
('All Scenes', EXPORT_ALL_SCENES, 'Each scene as a separate OBJ file.'),\
('Animation', EXPORT_ANIMATION, 'Each frame as a numbered OBJ file.'),\
('Object Prefs...'),\
('Apply Modifiers', EXPORT_APPLY_MODIFIERS, 'Use transformed mesh data from each object. May break vert order for morph targets.'),\
('Rotate X90', EXPORT_ROTX90 , 'Rotate on export so Blenders UP is translated into OBJs UP'),\
('Keep Vert Order', EXPORT_KEEP_VERT_ORDER, 'Keep vert and face order, disables some other options.'),\
('Extra Data...'),\
('Edges', EXPORT_EDGES, 'Edges not connected to faces.'),\
('Normals', EXPORT_NORMALS, 'Export vertex normal data (Ignored on import).'),\
('High Quality Normals', EXPORT_NORMALS_HQ, 'Calculate high quality normals for rendering.'),\
('UVs', EXPORT_UV, 'Export texface UV coords.'),\
('Materials', EXPORT_MTL, 'Write a separate MTL file with the OBJ.'),\
('Copy Images', EXPORT_COPY_IMAGES, 'Copy image files to the export directory, never overwrite.'),\
('Triangulate', EXPORT_TRI, 'Triangulate quads.'),\
('Grouping...'),\
('Objects', EXPORT_BLEN_OBS, 'Export blender objects as "OBJ objects".'),\
('Object Groups', EXPORT_GROUP_BY_OB, 'Export blender objects as "OBJ Groups".'),\
('Material Groups', EXPORT_GROUP_BY_MAT, 'Group by materials.'),\
]
if not Draw.PupBlock('Export...', pup_block):
return
'''
# BEGIN ALTERNATIVE UI *******************
if True:
EVENT_NONE = 0
EVENT_EXIT = 1
EVENT_REDRAW = 2
EVENT_EXPORT = 3
GLOBALS = {}
GLOBALS['EVENT'] = EVENT_REDRAW
#GLOBALS['MOUSE'] = Window.GetMouseCoords()
GLOBALS['MOUSE'] = [i/2 for i in Window.GetScreenSize()]
def obj_ui_set_event(e,v):
GLOBALS['EVENT'] = e
def do_split(e,v):
global EXPORT_BLEN_OBS, EXPORT_GROUP_BY_OB, EXPORT_GROUP_BY_MAT, EXPORT_APPLY_MODIFIERS, KEEP_VERT_ORDER, EXPORT_POLYGROUPS
if EXPORT_BLEN_OBS.val or EXPORT_GROUP_BY_OB.val or EXPORT_GROUP_BY_MAT.val or EXPORT_APPLY_MODIFIERS.val:
EXPORT_KEEP_VERT_ORDER.val = 0
else:
EXPORT_KEEP_VERT_ORDER.val = 1
def do_vertorder(e,v):
global EXPORT_BLEN_OBS, EXPORT_GROUP_BY_OB, EXPORT_GROUP_BY_MAT, EXPORT_APPLY_MODIFIERS, KEEP_VERT_ORDER
if EXPORT_KEEP_VERT_ORDER.val:
EXPORT_BLEN_OBS.val = EXPORT_GROUP_BY_OB.val = EXPORT_GROUP_BY_MAT.val = EXPORT_APPLY_MODIFIERS.val = 0
else:
if not (EXPORT_BLEN_OBS.val or EXPORT_GROUP_BY_OB.val or EXPORT_GROUP_BY_MAT.val or EXPORT_APPLY_MODIFIERS.val):
EXPORT_KEEP_VERT_ORDER.val = 1
def do_help(e,v):
url = __url__[0]
print 'Trying to open web browser with documentation at this address...'
print '\t' + url
try:
import webbrowser
webbrowser.open(url)
except:
print '...could not open a browser window.'
def obj_ui():
ui_x, ui_y = GLOBALS['MOUSE']
# Center based on overall pup size
ui_x -= 165
ui_y -= 140
global EXPORT_APPLY_MODIFIERS, EXPORT_ROTX90, EXPORT_TRI, EXPORT_EDGES,\
EXPORT_NORMALS, EXPORT_NORMALS_HQ, EXPORT_UV,\
EXPORT_MTL, EXPORT_SEL_ONLY, EXPORT_ALL_SCENES,\
EXPORT_ANIMATION, EXPORT_COPY_IMAGES, EXPORT_BLEN_OBS,\
EXPORT_GROUP_BY_OB, EXPORT_GROUP_BY_MAT, EXPORT_KEEP_VERT_ORDER,\
EXPORT_POLYGROUPS, EXPORT_CURVE_AS_NURBS
Draw.Label('Context...', ui_x+9, ui_y+239, 220, 20)
Draw.BeginAlign()
EXPORT_SEL_ONLY = Draw.Toggle('Selection Only', EVENT_NONE, ui_x+9, ui_y+219, 110, 20, EXPORT_SEL_ONLY.val, 'Only export objects in visible selection. Else export whole scene.')
EXPORT_ALL_SCENES = Draw.Toggle('All Scenes', EVENT_NONE, ui_x+119, ui_y+219, 110, 20, EXPORT_ALL_SCENES.val, 'Each scene as a separate OBJ file.')
EXPORT_ANIMATION = Draw.Toggle('Animation', EVENT_NONE, ui_x+229, ui_y+219, 110, 20, EXPORT_ANIMATION.val, 'Each frame as a numbered OBJ file.')
Draw.EndAlign()
Draw.Label('Output Options...', ui_x+9, ui_y+189, 220, 20)
Draw.BeginAlign()
EXPORT_APPLY_MODIFIERS = Draw.Toggle('Apply Modifiers', EVENT_REDRAW, ui_x+9, ui_y+170, 110, 20, EXPORT_APPLY_MODIFIERS.val, 'Use transformed mesh data from each object. May break vert order for morph targets.', do_split)
EXPORT_ROTX90 = Draw.Toggle('Rotate X90', EVENT_NONE, ui_x+119, ui_y+170, 110, 20, EXPORT_ROTX90.val, 'Rotate on export so Blenders UP is translated into OBJs UP')
EXPORT_COPY_IMAGES = Draw.Toggle('Copy Images', EVENT_NONE, ui_x+229, ui_y+170, 110, 20, EXPORT_COPY_IMAGES.val, 'Copy image files to the export directory, never overwrite.')
Draw.EndAlign()
Draw.Label('Export...', ui_x+9, ui_y+139, 220, 20)
Draw.BeginAlign()
EXPORT_EDGES = Draw.Toggle('Edges', EVENT_NONE, ui_x+9, ui_y+120, 50, 20, EXPORT_EDGES.val, 'Edges not connected to faces.')
EXPORT_TRI = Draw.Toggle('Triangulate', EVENT_NONE, ui_x+59, ui_y+120, 70, 20, EXPORT_TRI.val, 'Triangulate quads.')
Draw.EndAlign()
Draw.BeginAlign()
EXPORT_MTL = Draw.Toggle('Materials', EVENT_NONE, ui_x+139, ui_y+120, 70, 20, EXPORT_MTL.val, 'Write a separate MTL file with the OBJ.')
EXPORT_UV = Draw.Toggle('UVs', EVENT_NONE, ui_x+209, ui_y+120, 31, 20, EXPORT_UV.val, 'Export texface UV coords.')
Draw.EndAlign()
Draw.BeginAlign()
EXPORT_NORMALS = Draw.Toggle('Normals', EVENT_NONE, ui_x+250, ui_y+120, 59, 20, EXPORT_NORMALS.val, 'Export vertex normal data (Ignored on import).')
EXPORT_NORMALS_HQ = Draw.Toggle('HQ', EVENT_NONE, ui_x+309, ui_y+120, 31, 20, EXPORT_NORMALS_HQ.val, 'Calculate high quality normals for rendering.')
Draw.EndAlign()
EXPORT_POLYGROUPS = Draw.Toggle('Polygroups', EVENT_REDRAW, ui_x+9, ui_y+95, 120, 20, EXPORT_POLYGROUPS.val, 'Export vertex groups as OBJ groups (one group per face approximation).')
EXPORT_CURVE_AS_NURBS = Draw.Toggle('Nurbs', EVENT_NONE, ui_x+139, ui_y+95, 100, 20, EXPORT_CURVE_AS_NURBS.val, 'Export 3D nurbs curves and polylines as OBJ curves, (bezier not supported).')
Draw.Label('Blender Objects as OBJ:', ui_x+9, ui_y+59, 220, 20)
Draw.BeginAlign()
EXPORT_BLEN_OBS = Draw.Toggle('Objects', EVENT_REDRAW, ui_x+9, ui_y+39, 60, 20, EXPORT_BLEN_OBS.val, 'Export blender objects as "OBJ objects".', do_split)
EXPORT_GROUP_BY_OB = Draw.Toggle('Groups', EVENT_REDRAW, ui_x+69, ui_y+39, 60, 20, EXPORT_GROUP_BY_OB.val, 'Export blender objects as "OBJ Groups".', do_split)
EXPORT_GROUP_BY_MAT = Draw.Toggle('Material Groups', EVENT_REDRAW, ui_x+129, ui_y+39, 100, 20, EXPORT_GROUP_BY_MAT.val, 'Group by materials.', do_split)
Draw.EndAlign()
EXPORT_KEEP_VERT_ORDER = Draw.Toggle('Keep Vert Order', EVENT_REDRAW, ui_x+239, ui_y+39, 100, 20, EXPORT_KEEP_VERT_ORDER.val, 'Keep vert and face order, disables some other options. Use for morph targets.', do_vertorder)
Draw.BeginAlign()
Draw.PushButton('Online Help', EVENT_REDRAW, ui_x+9, ui_y+9, 110, 20, 'Load the wiki page for this script', do_help)
Draw.PushButton('Cancel', EVENT_EXIT, ui_x+119, ui_y+9, 110, 20, '', obj_ui_set_event)
Draw.PushButton('Export', EVENT_EXPORT, ui_x+229, ui_y+9, 110, 20, 'Export with these settings', obj_ui_set_event)
Draw.EndAlign()
# hack so the toggle buttons redraw. this is not nice at all
while GLOBALS['EVENT'] not in (EVENT_EXIT, EVENT_EXPORT):
Draw.UIBlock(obj_ui, 0)
if GLOBALS['EVENT'] != EVENT_EXPORT:
return
# END ALTERNATIVE UI *********************
if EXPORT_KEEP_VERT_ORDER.val:
EXPORT_BLEN_OBS.val = False
EXPORT_GROUP_BY_OB.val = False
EXPORT_GROUP_BY_MAT.val = False
EXPORT_APPLY_MODIFIERS.val = False
Window.EditMode(0)
Window.WaitCursor(1)
EXPORT_APPLY_MODIFIERS = EXPORT_APPLY_MODIFIERS.val
EXPORT_ROTX90 = EXPORT_ROTX90.val
EXPORT_TRI = EXPORT_TRI.val
EXPORT_EDGES = EXPORT_EDGES.val
EXPORT_NORMALS = EXPORT_NORMALS.val
EXPORT_NORMALS_HQ = EXPORT_NORMALS_HQ.val
EXPORT_UV = EXPORT_UV.val
EXPORT_MTL = EXPORT_MTL.val
EXPORT_SEL_ONLY = EXPORT_SEL_ONLY.val
EXPORT_ALL_SCENES = EXPORT_ALL_SCENES.val
EXPORT_ANIMATION = EXPORT_ANIMATION.val
EXPORT_COPY_IMAGES = EXPORT_COPY_IMAGES.val
EXPORT_BLEN_OBS = EXPORT_BLEN_OBS.val
EXPORT_GROUP_BY_OB = EXPORT_GROUP_BY_OB.val
EXPORT_GROUP_BY_MAT = EXPORT_GROUP_BY_MAT.val
EXPORT_KEEP_VERT_ORDER = EXPORT_KEEP_VERT_ORDER.val
EXPORT_POLYGROUPS = EXPORT_POLYGROUPS.val
EXPORT_CURVE_AS_NURBS = EXPORT_CURVE_AS_NURBS.val
base_name, ext = splitExt(filename)
context_name = [base_name, '', '', ext] # basename, scene_name, framenumber, extension
# Use the options to export the data using write()
# def write(filename, objects, EXPORT_EDGES=False, EXPORT_NORMALS=False, EXPORT_MTL=True, EXPORT_COPY_IMAGES=False, EXPORT_APPLY_MODIFIERS=True):
orig_scene = Scene.GetCurrent()
if EXPORT_ALL_SCENES:
export_scenes = Scene.Get()
else:
export_scenes = [orig_scene]
# Export all scenes.
for scn in export_scenes:
scn.makeCurrent() # If alredy current, this is not slow.
context = scn.getRenderingContext()
orig_frame = Blender.Get('curframe')
if EXPORT_ALL_SCENES: # Add scene name into the context_name
context_name[1] = '_%s' % BPySys.cleanName(scn.name) # WARNING, its possible that this could cause a collision. we could fix if were feeling parranoied.
# Export an animation?
if EXPORT_ANIMATION:
scene_frames = xrange(context.startFrame(), context.endFrame()+1) # up to and including the end frame.
else:
scene_frames = [orig_frame] # Dont export an animation.
# Loop through all frames in the scene and export.
for frame in scene_frames:
if EXPORT_ANIMATION: # Add frame to the filename.
context_name[2] = '_%.6d' % frame
Blender.Set('curframe', frame)
if EXPORT_SEL_ONLY:
export_objects = scn.objects.context
else:
export_objects = scn.objects
full_path= ''.join(context_name)
# erm... bit of a problem here, this can overwrite files when exporting frames. not too bad.
# EXPORT THE FILE.
write(full_path, export_objects,\
EXPORT_TRI, EXPORT_EDGES, EXPORT_NORMALS,\
EXPORT_NORMALS_HQ, EXPORT_UV, EXPORT_MTL,\
EXPORT_COPY_IMAGES, EXPORT_APPLY_MODIFIERS,\
EXPORT_ROTX90, EXPORT_BLEN_OBS,\
EXPORT_GROUP_BY_OB, EXPORT_GROUP_BY_MAT, EXPORT_KEEP_VERT_ORDER,\
EXPORT_POLYGROUPS, EXPORT_CURVE_AS_NURBS)
Blender.Set('curframe', orig_frame)
# Restore old active scene.
orig_scene.makeCurrent()
Window.WaitCursor(0)
if __name__ == '__main__':
Window.FileSelector(write_ui, 'Export Wavefront OBJ', sys.makename(ext='.obj'))
|
py | 7dffb5f3ae68f3f5c0a8a8a9bbee40b2616c5663 | from conftest import crate_translator as translator
from utils.common import create_random_entities, TIME_INDEX_NAME, add_attr
import datetime
def test_aggr_per_second(translator):
entities = create_random_entities(num_ids_per_type=2, num_updates=17)
assert len(entities) == 34
# One update every 100 millis -> 10 updates per second.
base_index = datetime.datetime(2010, 1, 1, 8, 0, 0)
delta = datetime.timedelta(milliseconds=100)
for i, e in enumerate(entities):
t = base_index + i * delta
e[TIME_INDEX_NAME] = t
add_attr(e, 'attr_float', i)
translator.insert(entities)
# Query avg attr_float per second.
res = translator.query(attr_names=['attr_float'],
aggr_method='avg',
aggr_period='second')
assert len(res) == 2
# 34 values span across 4 seconds
expected_index = []
for i in range(4):
d = datetime.datetime(2010, 1, 1, 8, 0, i)
expected_index.append(d.isoformat(timespec='milliseconds'))
assert res[0] == {
'type': '0',
'id': '0-0',
'index': expected_index,
'attr_float': {
'type': 'Number',
'values': [4, 14, 24, 31],
}
}
assert res[1] == {
'type': '0',
'id': '0-1',
'index': expected_index,
'attr_float': {
'type': 'Number',
'values': [5, 15, 25, 32],
}
}
|
py | 7dffb6dccef71e7a3843ce4024f3a7831b0c9520 | from django.urls import path
from . import views
urlpatterns=[
path('',views.default,name="default"),
path('form/',views.form,name="form"),
path('insertData/',views.insertData,name="form"),
path("dbConnection/",views.dbConnectionTest,name="dbTest"),
path("10001/",views.processSeries10001,name="10001"),
path("allData/",views.processAllDatapoints,name="allData")
] |
py | 7dffb82a71c1096490e80fb0031010a088cbcf0c | from llvmlite.binding import ffi
def create_context():
return ContextRef(ffi.lib.LLVMPY_ContextCreate())
def get_global_context():
return GlobalContextRef(ffi.lib.LLVMPY_GetGlobalContext())
class ContextRef(ffi.ObjectRef):
def __init__(self, context_ptr):
super(ContextRef, self).__init__(context_ptr)
def _dispose(self):
ffi.lib.LLVMPY_ContextDispose(self)
class GlobalContextRef(ContextRef):
def _dispose(self):
pass
ffi.lib.LLVMPY_GetGlobalContext.restype = ffi.LLVMContextRef
ffi.lib.LLVMPY_ContextCreate.restype = ffi.LLVMContextRef
ffi.lib.LLVMPY_ContextDispose.argtypes = [ffi.LLVMContextRef]
|
py | 7dffb853c0ac210b380ca7485ef305992fca42f7 | # Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jsonschema
import os
import pkg_resources
import requests
import yaml
from oslo_log import log as logging
from armada.const import KEYWORD_GROUPS, KEYWORD_CHARTS, KEYWORD_RELEASE
from armada.handlers.manifest import Manifest
from armada.exceptions.manifest_exceptions import ManifestException
from armada.utils.validation_message import ValidationMessage
LOG = logging.getLogger(__name__)
# Creates a mapping between ``metadata.name``: ``data`` where the
# ``metadata.name`` is the ``schema`` of a manifest and the ``data`` is the
# JSON schema to be used to validate the manifest in question.
SCHEMAS = {}
def _get_schema_dir():
return pkg_resources.resource_filename('armada', 'schemas')
def _load_schemas():
"""Populates ``SCHEMAS`` with the schemas defined in package
``armada.schemas``.
"""
schema_dir = _get_schema_dir()
for schema_file in os.listdir(schema_dir):
with open(os.path.join(schema_dir, schema_file)) as f:
for schema in yaml.safe_load_all(f):
name = schema['metadata']['name']
if name in SCHEMAS:
raise RuntimeError(
'Duplicate schema specified for: %s.' % name)
SCHEMAS[name] = schema['data']
def _validate_armada_manifest(manifest):
"""Validates an Armada manifest file output by
:class:`armada.handlers.manifest.Manifest`.
This will do business logic validation after the input
files have be syntactically validated via jsonschema.
:param dict manifest: The manifest to validate.
:returns: A tuple of (bool, list[dict]) where the first value
indicates whether the validation succeeded or failed and
the second value is the validation details with a minimum
keyset of (message(str), error(bool))
:rtype: tuple.
"""
details = []
try:
armada_object = manifest.get_manifest().get('armada')
except ManifestException as me:
vmsg = ValidationMessage(
message=str(me), error=True, name='ARM001', level='Error')
LOG.error('ValidationMessage: %s', vmsg.get_output_json())
details.append(vmsg.get_output())
return False, details
groups = armada_object.get(KEYWORD_GROUPS)
if not isinstance(groups, list):
message = '{} entry is of wrong type: {} (expected: {})'.format(
KEYWORD_GROUPS, type(groups), 'list')
vmsg = ValidationMessage(
message=message, error=True, name='ARM101', level='Error')
LOG.info('ValidationMessage: %s', vmsg.get_output_json())
details.append(vmsg.get_output())
for group in groups:
for chart in group.get(KEYWORD_CHARTS):
chart_obj = chart.get('chart')
if KEYWORD_RELEASE not in chart_obj:
message = 'Could not find {} keyword in {}'.format(
KEYWORD_RELEASE, chart_obj.get('release'))
vmsg = ValidationMessage(
message=message, error=True, name='ARM102', level='Error')
LOG.info('ValidationMessage: %s', vmsg.get_output_json())
details.append(vmsg.get_output())
if len([x for x in details if x.get('error', False)]) > 0:
return False, details
return True, details
def validate_armada_manifests(documents):
"""Validate each Armada manifest found in the document set.
:param documents: List of Armada documents to validate
:type documents: :func: `list[dict]`.
"""
messages = []
all_valid = True
for document in documents:
if document.get('schema', '') == 'armada/Manifest/v1':
target = document.get('metadata').get('name')
# TODO(MarshM) explore: why does this pass 'documents'?
manifest = Manifest(documents, target_manifest=target)
is_valid, details = _validate_armada_manifest(manifest)
all_valid = all_valid and is_valid
messages.extend(details)
return all_valid, messages
def validate_armada_document(document):
"""Validates a document ingested by Armada by subjecting it to JSON schema
validation.
:param dict dictionary: The document to validate.
:returns: A tuple of (bool, list[dict]) where the first value
indicates whether the validation succeeded or failed and
the second value is the validation details with a minimum
keyset of (message(str), error(bool))
:rtype: tuple.
:raises TypeError: If ``document`` is not of type ``dict``.
"""
if not isinstance(document, dict):
raise TypeError(
'The provided input "%s" must be a dictionary.' % document)
schema = document.get('schema', '<missing>')
document_name = document.get('metadata', {}).get('name', None)
details = []
LOG.debug('Validating document [%s] %s', schema, document_name)
if schema in SCHEMAS:
try:
validator = jsonschema.Draft4Validator(SCHEMAS[schema])
for error in validator.iter_errors(document.get('data')):
error_message = "Invalid document [%s] %s: %s." % \
(schema, document_name, error.message)
vmsg = ValidationMessage(
message=error_message,
error=True,
name='ARM100',
level='Error',
schema=schema,
doc_name=document_name)
LOG.info('ValidationMessage: %s', vmsg.get_output_json())
details.append(vmsg.get_output())
except jsonschema.SchemaError as e:
error_message = ('The built-in Armada JSON schema %s is invalid. '
'Details: %s.' % (e.schema, e.message))
vmsg = ValidationMessage(
message=error_message,
error=True,
name='ARM000',
level='Error',
diagnostic='Armada is misconfigured.')
LOG.error('ValidationMessage: %s', vmsg.get_output_json())
details.append(vmsg.get_output())
else:
vmsg = ValidationMessage(
message='Unsupported document type.',
error=False,
name='ARM002',
level='Warning',
schema=schema,
doc_name=document_name,
diagnostic='Please ensure document is one of '
'the following schema types: %s' % list(SCHEMAS.keys()))
LOG.info('Unsupported document type, ignoring %s.', schema)
LOG.debug('ValidationMessage: %s', vmsg.get_output_json())
# Validation API doesn't care about this type of message, don't send
if len([x for x in details if x.get('error', False)]) > 0:
return False, details
return True, details
def validate_armada_documents(documents):
"""Validates multiple Armada documents.
:param documents: List of Armada manifests to validate.
:type documents: :func:`list[dict]`.
:returns: A tuple of bool, list[dict] where the first value is whether
the full set of documents is valid or not and the second is the
detail messages from validation
:rtype: tuple
"""
messages = []
# Track if all the documents in the set are valid
all_valid = True
for document in documents:
is_valid, details = validate_armada_document(document)
all_valid = all_valid and is_valid
messages.extend(details)
if all_valid:
valid, details = validate_armada_manifests(documents)
all_valid = all_valid and valid
messages.extend(details)
for msg in messages:
if msg.get('error', False):
LOG.error(msg.get('message', 'Unknown validation error.'))
else:
LOG.debug(msg.get('message', 'Validation succeeded.'))
return all_valid, messages
def validate_manifest_url(value):
try:
return (requests.get(value).status_code == 200)
except requests.exceptions.RequestException:
return False
# Fill the cache.
_load_schemas()
|
py | 7dffb996733db1d346416bbe5e05744c90fde7a5 | # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tools to visualize Chimera lattices and weighted graph problems on them.
"""
import math
import random
import networkx as nx
from networkx import draw
from dwave_networkx.drawing.distinguishable_colors import distinguishable_color_map
__all__ = ['draw_qubit_graph']
def draw_qubit_graph(G, layout, linear_biases={}, quadratic_biases={},
nodelist=None, edgelist=None, cmap=None, edge_cmap=None, vmin=None, vmax=None,
edge_vmin=None, edge_vmax=None, midpoint=None,
**kwargs):
"""Draws graph G according to layout.
If `linear_biases` and/or `quadratic_biases` are provided, these
are visualized on the plot.
Parameters
----------
G : NetworkX graph
The graph to be drawn
layout : dict
A dict of coordinates associated with each node in G. Should
be of the form {node: coordinate, ...}. Coordinates will be
treated as vectors, and should all have the same length.
linear_biases : dict (optional, default {})
A dict of biases associated with each node in G. Should be of
form {node: bias, ...}. Each bias should be numeric.
quadratic_biases : dict (optional, default {})
A dict of biases associated with each edge in G. Should be of
form {edge: bias, ...}. Each bias should be numeric. Self-loop
edges (i.e., :math:`i=j`) are treated as linear biases.
midpoint : float (optional, default None)
A float that specifies where the center of the colormap should
be. If not provided, the colormap will default to the middle of
min/max values provided.
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the `pos` parameter which is not used by this
function. If `linear_biases` or `quadratic_biases` are provided,
any provided `node_color` or `edge_color` arguments are ignored.
"""
try:
import matplotlib.pyplot as plt
import matplotlib as mpl
except ImportError:
raise ImportError("Matplotlib and numpy required for draw_qubit_graph()")
try:
from mpl_toolkits.axes_grid1 import make_axes_locatable
except ImportError:
_mpl_toolkit_found = False
else:
_mpl_toolkit_found = True
fig = plt.gcf()
ax = kwargs.pop('ax', None)
cax = kwargs.pop('cax', None)
if linear_biases or quadratic_biases:
# if linear biases and/or quadratic biases are provided, then color accordingly.
if ax is None:
ax = fig.add_axes([0.01, 0.01, 0.86, 0.98])
if cax is None:
if _mpl_toolkit_found:
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='2%', pad=0.05)
else:
cax = fig.add_axes([.87, 0.2, 0.02, 0.6]) # left, bottom, width, height
if nodelist is None:
nodelist = G.nodes()
if edgelist is None:
edgelist = G.edges()
if cmap is None:
cmap = plt.get_cmap('coolwarm')
if edge_cmap is None:
edge_cmap = plt.get_cmap('coolwarm')
# any edges or nodes with an unspecified bias default to 0
def edge_color(u, v):
c = 0.
if (u, v) in quadratic_biases:
c += quadratic_biases[(u, v)]
if (v, u) in quadratic_biases:
c += quadratic_biases[(v, u)]
return c
def node_color(v):
c = 0.
if v in linear_biases:
c += linear_biases[v]
if (v, v) in quadratic_biases:
c += quadratic_biases[(v, v)]
return c
node_color = [node_color(v) for v in nodelist]
edge_color = [edge_color(u, v) for u, v in edgelist]
# the range of the color map is shared for nodes/edges and is symmetric
# around 0.
vmag = max(max(abs(c) for c in node_color), max(abs(c) for c in edge_color))
if vmin is None:
vmin = -1 * vmag
if vmax is None:
vmax = vmag
if edge_vmin is None:
edge_vmin = -1 * vmag
if edge_vmax is None:
edge_vmax = vmag
if linear_biases and quadratic_biases:
global_vmin = min(edge_vmin, vmin)
global_vmax = max(edge_vmax, vmax)
if midpoint is None:
midpoint = (global_vmax + global_vmin) / 2.0
norm_map = mpl.colors.TwoSlopeNorm(midpoint, vmin=global_vmin, vmax=global_vmax)
node_color = [cmap(norm_map(node)) for node in node_color]
edge_color = [cmap(norm_map(edge)) for edge in edge_color]
mpl.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm_map, orientation='vertical')
# if the biases are provided, then add a legend explaining the color map
elif linear_biases:
if midpoint is None:
midpoint = (vmax + vmin) / 2.0
norm_map = mpl.colors.TwoSlopeNorm(midpoint, vmin=vmin, vmax=vmax)
node_color = [cmap(norm_map(node)) for node in node_color]
mpl.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm_map, orientation='vertical')
elif quadratic_biases:
if midpoint is None:
midpoint = (edge_vmax + edge_vmin) / 2.0
norm_map = mpl.colors.TwoSlopeNorm(midpoint, vmin=edge_vmin, vmax=edge_vmax)
edge_color = [edge_cmap(norm_map(edge)) for edge in edge_color]
mpl.colorbar.ColorbarBase(cax, cmap=edge_cmap, norm=norm_map, orientation='vertical')
kwargs['edge_color'] = edge_color
kwargs['node_color'] = node_color
else:
if ax is None:
ax = fig.add_axes([0.01, 0.01, 0.98, 0.98])
draw(G, layout, ax=ax, nodelist=nodelist, edgelist=edgelist,
cmap=cmap, edge_cmap=edge_cmap, vmin=vmin, vmax=vmax, edge_vmin=edge_vmin,
edge_vmax=edge_vmax,
**kwargs)
def draw_embedding(G, layout, emb, embedded_graph=None, interaction_edges=None,
chain_color=None, unused_color=(0.9,0.9,0.9,1.0), cmap=None,
show_labels=False, overlapped_embedding=False, **kwargs):
"""Draws an embedding onto the graph G, according to layout.
If interaction_edges is not None, then only display the couplers in that
list. If embedded_graph is not None, the only display the couplers between
chains with intended couplings according to embedded_graph.
Parameters
----------
G : NetworkX graph
The graph to be drawn
layout : dict
A dict of coordinates associated with each node in G. Should
be of the form {node: coordinate, ...}. Coordinates will be
treated as vectors, and should all have the same length.
emb : dict
A dict of chains associated with each node in G. Should be
of the form {node: chain, ...}. Chains should be iterables
of qubit labels (qubits are nodes in G).
embedded_graph : NetworkX graph (optional, default None)
A graph which contains all keys of emb as nodes. If specified,
edges of G will be considered interactions if and only if they
exist between two chains of emb if their keys are connected by
an edge in embedded_graph
interaction_edges : list (optional, default None)
A list of edges which will be used as interactions.
show_labels: boolean (optional, default False)
If show_labels is True, then each chain in emb is labelled with its key.
chain_color : dict (optional, default None)
A dict of colors associated with each key in emb. Should be
of the form {node: rgba_color, ...}. Colors should be length-4
tuples of floats between 0 and 1 inclusive. If chain_color is None,
each chain will be assigned a different color.
cmap : str or matplotlib colormap (optional, default None)
A matplotlib colormap for coloring of chains. Only used if chain_color
is None.
unused_color : tuple or color string (optional, default (0.9,0.9,0.9,1.0))
The color to use for nodes and edges of G which are not involved
in chains, and edges which are neither chain edges nor interactions.
If unused_color is None, these nodes and edges will not be shown at all.
overlapped_embedding: boolean (optional, default False)
If overlapped_embedding is True, then chains in emb may overlap (contain
the same vertices in G), and the drawing will display these overlaps as
concentric circles.
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the `pos` parameter which is not used by this
function. If `linear_biases` or `quadratic_biases` are provided,
any provided `node_color` or `edge_color` arguments are ignored.
"""
try:
import matplotlib.pyplot as plt
import matplotlib as mpl
except ImportError:
raise ImportError("Matplotlib and numpy required for draw_chimera()")
if nx.utils.is_string_like(unused_color):
from matplotlib.colors import colorConverter
alpha = kwargs.get('alpha', 1.0)
unused_color = colorConverter.to_rgba(unused_color, alpha)
if chain_color is None:
import matplotlib.cm
n = max(1., len(emb) - 1.)
if cmap:
color = matplotlib.cm.get_cmap(cmap)
else:
color = distinguishable_color_map(int(n+1))
var_i = {v: i for i, v in enumerate(emb)}
chain_color = {v: color(i/n) for i, v in enumerate(emb)}
if overlapped_embedding:
bags = compute_bags(G, emb)
base_node_size = kwargs.get('node_size', 100)
node_size_dict = {v: base_node_size for v in G.nodes()}
G, emb, interaction_edges = unoverlapped_embedding(G, emb, interaction_edges)
for node, data in G.nodes(data=True):
if 'dummy' in data:
v, x = node
layout[node] = layout[v]
for v, bag in bags.items():
for i, x in enumerate(bag):
node_size_dict[(v, x)] = base_node_size * (len(bag) - i) ** 2
kwargs['node_size'] = [node_size_dict[p] for p in G.nodes()]
qlabel = {q: v for v, chain in emb.items() for q in chain}
edgelist = []
edge_color = []
background_edgelist = []
background_edge_color = []
if interaction_edges is not None:
interactions = nx.Graph()
interactions.add_edges_from(interaction_edges)
def show(p, q, u, v): return interactions.has_edge(p, q)
elif embedded_graph is not None:
def show(p, q, u, v): return embedded_graph.has_edge(u, v)
else:
def show(p, q, u, v): return True
for (p, q) in G.edges():
u = qlabel.get(p)
v = qlabel.get(q)
if u is None or v is None:
ec = unused_color
elif u == v:
ec = chain_color.get(u)
elif show(p, q, u, v):
ec = (0, 0, 0, 1)
else:
ec = unused_color
if ec == unused_color:
background_edgelist.append((p, q))
background_edge_color.append(ec)
elif ec is not None:
edgelist.append((p, q))
edge_color.append(ec)
nodelist = []
node_color = []
for p in G.nodes():
u = qlabel.get(p)
if u is None:
pc = unused_color
else:
pc = chain_color.get(u)
if pc is not None:
nodelist.append(p)
node_color.append(pc)
labels = {}
if show_labels:
if overlapped_embedding:
node_labels = {q: [] for q in bags.keys()}
node_index = {p: i for i, p in enumerate(G.nodes())}
for v in emb.keys():
v_labelled = False
chain = emb[v]
for node in chain:
(q, _) = node
if len(bags[q]) == 1:
# if there's a node that only has this label, use that
labels[q] = str(v)
v_labelled = True
break
if not v_labelled and chain:
# otherwise, pick a random node for this label
node = random.choice(list(chain))
(q, _) = node
node_labels[q].append(v)
for q, label_vars in node_labels.items():
x, y = layout[q]
# TODO: find a better way of placing labels around the outside of nodes.
# Currently, if the graph is resized, labels will appear at a strange distance from the vertices.
# To fix this, the "scale" value below, rather than being a fixed constant, should be determined using
# both the size of the nodes and the size of the coordinate space of the graph.
scale = 0.1
# spread the labels evenly around the node.
for i, v in enumerate(label_vars):
theta = 2 * math.pi * i / len(label_vars)
new_x = x + scale * math.sin(theta)
new_y = y + scale * math.cos(theta)
plt.text(new_x, new_y, str(v), color=node_color[node_index[(q, v)]], horizontalalignment='center',
verticalalignment='center')
else:
for v in emb.keys():
c = emb[v]
labels[list(c)[0]] = str(v)
# draw the background (unused) graph first
if unused_color is not None:
draw(G, layout, nodelist=nodelist, edgelist=background_edgelist,
node_color=node_color, edge_color=background_edge_color,
**kwargs)
draw(G, layout, nodelist=nodelist, edgelist=edgelist,
node_color=node_color, edge_color=edge_color, labels=labels,
**kwargs)
def compute_bags(C, emb):
# Given an overlapped embedding, compute the set of source nodes embedded at every target node.
bags = {v: [] for v in C.nodes()}
for x, chain in emb.items():
for v in chain:
bags[v].append(x)
return bags
def unoverlapped_embedding(G, emb, interaction_edges):
# Given an overlapped embedding, construct a new graph and embedding without overlaps
# by making copies of nodes that have multiple variables.
bags = compute_bags(G, emb)
new_G = G.copy()
new_emb = dict()
for x, chain in emb.items():
for v in chain:
new_G.add_node((v, x), dummy=True)
for (u, v) in G.subgraph(chain).edges():
new_G.add_edge((u, x), (v, x))
new_emb[x] = {(v, x) for v in chain}
for (u, v) in G.edges():
for x in bags[u]:
for y in bags[v]:
new_G.add_edge((u, x), (v, y))
if interaction_edges:
new_interaction_edges = list(interaction_edges)
for (u, v) in interaction_edges:
for x in bags[u]:
for y in bags[v]:
new_interaction_edges.append(((u, x), (v, y)))
else:
new_interaction_edges = None
return new_G, new_emb, new_interaction_edges
def draw_yield(G, layout, perfect_graph, unused_color=(0.9,0.9,0.9,1.0),
fault_color=(1.0,0.0,0.0,1.0), fault_shape='x',
fault_style='dashed', **kwargs):
"""Draws the given graph G with highlighted faults, according to layout.
Parameters
----------
G : NetworkX graph
The graph to be parsed for faults
layout : dict
A dict of coordinates associated with each node in perfect_graph. Should
be of the form {node: coordinate, ...}. Coordinates will be
treated as vectors, and should all have the same length.
perfect_graph : NetworkX graph
The graph to be drawn with highlighted faults
unused_color : tuple or color string (optional, default (0.9,0.9,0.9,1.0))
The color to use for nodes and edges of G which are not faults.
If unused_color is None, these nodes and edges will not be shown at all.
fault_color : tuple or color string (optional, default (1.0,0.0,0.0,1.0))
A color to represent nodes absent from the graph G. Colors should be
length-4 tuples of floats between 0 and 1 inclusive.
fault_shape : string, optional (default='x')
The shape of the fault nodes. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8'.
fault_style : string, optional (default='dashed')
Edge fault line style (solid|dashed|dotted,dashdot)
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the `pos` parameter which is not used by this
function. If `linear_biases` or `quadratic_biases` are provided,
any provided `node_color` or `edge_color` arguments are ignored.
"""
try:
import matplotlib.pyplot as plt
import matplotlib as mpl
except ImportError:
raise ImportError("Matplotlib and numpy required for draw_chimera()")
nodelist = G.nodes()
edgelist = G.edges()
faults_nodelist = perfect_graph.nodes() - nodelist
faults_edgelist = perfect_graph.edges() - edgelist
# To avoid matplotlib.pyplot.scatter warnings for single tuples, create
# lists of colors from given colors.
faults_node_color = [fault_color for v in faults_nodelist]
faults_edge_color = [fault_color for v in faults_edgelist]
# Draw faults with different style and shape
draw(perfect_graph, layout, nodelist=faults_nodelist, edgelist=faults_edgelist,
node_color=faults_node_color, edge_color=faults_edge_color,
style=fault_style, node_shape=fault_shape,
**kwargs )
# Draw rest of graph
if unused_color is not None:
if nodelist is None:
nodelist = G.nodes() - faults_nodelist
if edgelist is None:
edgelist = G.edges() - faults_edgelist
unused_node_color = [unused_color for v in nodelist]
unused_edge_color = [unused_color for v in edgelist]
draw(perfect_graph, layout, nodelist=nodelist, edgelist=edgelist,
node_color=unused_node_color, edge_color=unused_edge_color,
**kwargs)
|
py | 7dffbb41b783924706d03b44ee1be29a6a1fa40c | import ila
import pickle
PC_BITS = 32
REG_BITS = 32
MEM_ADDRESS_BITS = 32
OPCODE_BIT = 22
DST_BIT = 17
SRC0_BIT = 12
SRC1_BIT = 7
SRC2_BIT = 2
BASE_BIT = 2
class barSpec(object):
def __init__(self):
self.BAR_INIT = 0
self.BAR_ENTER = 1
self.BAR_WAIT = 2
self.BAR_EXIT = 3
self.BAR_FINISH = 4
self.BAR_COUNTER_ENTER_BITS = 32
self.BAR_COUNTER_EXIT_BITS = 32
self.BAR_STATE_BITS = 3
self.THREAD_NUM = 128
reg_source_file = "test_reg_source.txt"
mem_source_file = "test_mem_source.txt"
program_file = "program_test.ptx"
reg_map_file = "reg_map"
mem_map_file = "mem_map"
reg_book_file = "reg_book"
mem_book_file = "mem_book"
reg_source_obj = open(reg_source_file, 'r')
mem_source_obj = open(mem_source_file, 'r')
[reg_state_type_name_dict, reg_state_type_length_dict] = pickle.load(reg_source_obj)
[mem_state_type_name_dict, mem_state_type_length_dict, mem_state_size_dict] = pickle.load(mem_source_obj)
reg_state_list = reg_state_type_name_dict.keys()
mem_state_list = mem_state_type_name_dict.keys()
class ptxGPUModel(object):
def __init__(self):
self.model = ila.Abstraction('GPU_ptx')
self.program_name = 'test.ptx'
self.createStates()
self.ubar()
self.assumptions()
def createStates(self):
self.createPC()
self.createRegs()
self.createMems()
self.createuBar()
self.instructionFetch()
self.instructionDecode()
#self.addInstruction()
def createPC(self):
self.pc = self.model.reg('pc', PC_BITS)
def createRegs(self):
self.scalar_registers = []
reg_book_obj = open(reg_book_file)
reg_book = pickle.load(reg_book_obj)
#Here's something to be cleaned up in future.
#These 3 states are needed for ptxSim,
#but here we have to remove them,
#because the length of these regs are not as general regs.
reg_book.remove('bar_state')
reg_book.remove('bar_counter_enter')
reg_book.remove('bar_counter_exit')
for reg_name in reg_book :
self.scalar_registers.append(self.model.reg(reg_name, REG_BITS))
def createMems(self):
self.mem = self.model.mem('mem', MEM_ADDRESS_BITS, MEM_ADDRESS_BITS)
def createuBar(self):
bar_spec = barSpec()
self.bar_state = self.model.reg('bar_state', bar_spec.BAR_STATE_BITS)
#self.bar_counter_enter = self.model.reg('bar_counter_enter', bar_spec.BAR_COUNTER_ENTER_BITS)
#self.bar_counter_exit = self.model.reg('bar_counter_exit', bar_spec.BAR_COUNTER_EXIT_BITS)
#self.bar_state = self.model.getreg('bar_state')
#self.bar_counter_enter = self.model.getreg('bar_counter_enter')
#self.bar_counter_exit = self.model.getreg('bar_counter_exit')
bar_state_next = ila.choice('bar_state_next', [ila.const(bar_spec.BAR_INIT ,bar_spec.BAR_STATE_BITS), ila.const(bar_spec.BAR_ENTER, bar_spec.BAR_STATE_BITS)])
self.model.set_next('bar_state', bar_state_next)
#self.model.set_next('bar_counter_enter', self.bar_counter_enter)
#self.model.set_next('bar_counter_exit', self.bar_counter_exit)
def instructionFetch(self):
self.inst = ila.load(self.mem, ila.zero_extend(self.pc[31:2], MEM_ADDRESS_BITS))
self.opcode = self.inst[(REG_BITS - 1):OPCODE_BIT]
self.fetch_expr = self.inst
self.dest = self.inst[(OPCODE_BIT - 1):DST_BIT]
self.src1 = self.inst[(DST_BIT - 1):SRC0_BIT]
self.src2 = self.inst[(SRC0_BIT - 1):SRC1_BIT]
self.src3 = self.inst[(SRC1_BIT - 1):SRC2_BIT]
self.baseImm = ila.sign_extend(self.inst[(BASE_BIT-1): 0], PC_BITS)
self.branchPred = self.dest
self.predReg = self.indexIntoReg(self.branchPred)
self.branchImm = ila.zero_extend(self.inst[(DST_BIT - 1) : BASE_BIT], PC_BITS)
self.sreg1 = self.indexIntoReg(self.src1)
self.sreg2 = self.indexIntoReg(self.src2)
self.sreg3 = self.indexIntoReg(self.src3)
self.sregdest = self.indexIntoReg(self.dest)
def instructionDecode(self):
instruction_map_file = 'instruction_map'
instruction_map_obj = open(instruction_map_file, 'r')
instruction_map = pickle.load(instruction_map_obj)
bar_spec = barSpec()
#ALUInstructions = [(self.opcode == instruction_map['add']), (self.opcode == instruction_map['sub']),(self.opcode == instruction_map['mul']), ((self.opcode == instruction_map['bra']) & (self.predReg != 0) & (self.baseImm != 0)), ((self.opcode == instruction_map['bra']) & (self.baseImm == 0)), (self.opcode != instruction_map['add']) & (self.opcode != instruction_map['sub']) & (self.opcode != instruction_map['mul']) & ((self.opcode != instruction_map['bra']) | (self.predReg == 0) | (self.baseImm == 0))&((self.opcode != instruction_map['bra']) | (self.baseImm != 0)) ]
#decodeList = ALUInstructions + [(self.opcode == instruction_map['bar']) & (self.bar_state == bar_spec.BAR_INIT), (self.opcode == instruction_map['bar']) & (self.bar_state == bar_spec.BAR_FINISH)]
decodeList = [(self.opcode == instruction_map['bar']) & (self.bar_state == bar_spec.BAR_INIT), (self.opcode == instruction_map['bar']) & (self.bar_state == bar_spec.BAR_FINISH)]
self.model.decode_exprs = decodeList
#self.bar_state_next = ila.ite((self.opcode == instruction_map['bar']) & (self.bar_state == bar_spec.BAR_INIT), ila.const(1, bar_spec.BAR_STATE_BITS) ila.ite(self.bar_state == bar_spec.BAR_FINISH, bar_spec.BAR_INIT, self.bar_state)) #non-synthesize bar instruction
def ubar(self):
instruction_map_file = 'instruction_map'
instruction_map_obj = open(instruction_map_file, 'r')
instruction_map = pickle.load(instruction_map_obj)
bar_spec = barSpec()
self.u_bar_model = self.model.add_microabstraction('bar_instruction', ((self.bar_state > bar_spec.BAR_INIT) & (self.bar_state < bar_spec.BAR_FINISH)))
self.bar_counter_enter = self.u_bar_model.reg('bar_counter_enter', bar_spec.BAR_COUNTER_ENTER_BITS)
self.bar_counter_exit = self.u_bar_model.reg('bar_counter_exit', bar_spec.BAR_COUNTER_EXIT_BITS)
#bar_counter_enter = self.model.getreg('bar_counter_enter')
#bar_counter_exit = self.model.getreg('bar_counter_exit')
bar_state = self.model.getreg('bar_state')
self.bar_counter_max = bar_spec.THREAD_NUM # need cleanup
#bar_state_next = ila.ite(bar_state == bar_spec.bar_enter, ila.ite(bar_counter_exit != 0, bar_spec.bar_enter, ila.ite(bar_counter_enter == (bar_counter_max - 1), bar_spec.bar_exit, bar_wait)), ila.ite(bar_state == bar_spec.bar_wait, ila.ite(bar_counter_enter == bar_counter_max, bar_spec.bar_exit, bar_spec.bar_wait), ila.ite(bar_state == bar_spec.bar_exit, bar_spec.bar_finish , bar_state)))
#bar_counter_enter_next = ila.ite(bar_state == bar_spec.bar_enter, ila.ite(bar_counter_exit != 0, bar_counter_enter, bar_counter_enter + 1), ila.ite((bar_state == bar_spec.bar_exit) & (bar_counter_exit == 1), ila.const(0x0, bar_spec.bar_counter_enter_bits), bar_counter_enter))
#bar_counter_exit_next = ila.ite((bar_state == bar_spec.bar_enter) & (counter_enter == (bar_counter_max - 1)), bar_counter_max, ila.ite(bar_state == bar_spec.bar_exit, bar_counter_exit - 1, bar_counter_exit))
bar_state_next = ila.choice('bar_state_next', [ila.const(i, bar_spec.BAR_STATE_BITS) for i in range(1,4)])
self.bar_counter_enter_next = ila.choice('bar_counter_enter_next', [self.bar_counter_enter, self.bar_counter_enter + 1, ila.const(0x0, bar_spec.BAR_COUNTER_ENTER_BITS)])
self.bar_counter_exit_next = ila.choice('bar_counter_exit_next', [self.bar_counter_exit, self.bar_counter_exit + 1, ila.const(self.bar_counter_max, bar_spec.BAR_COUNTER_EXIT_BITS)])
self.u_bar_model.set_next('bar_state', bar_state_next)
self.u_bar_model.set_next('bar_counter_enter', self.bar_counter_enter_next)
self.u_bar_model.set_next('bar_counter_exit', self.bar_counter_exit_next)
bar_decode_list = [(bar_state == bar_spec.BAR_ENTER) & (self.bar_counter_exit != 0),\
(bar_state == bar_spec.BAR_ENTER) & (self.bar_counter_exit == 0) & (self.bar_counter_enter == (self.bar_counter_max - 1)),\
(bar_state == bar_spec.BAR_ENTER) & (self.bar_counter_exit == 0) & (self.bar_counter_enter != (self.bar_counter_max - 1)),\
(bar_state == bar_spec.BAR_WAIT) & (self.bar_counter_enter != self.bar_counter_max),\
(bar_state == bar_spec.BAR_WAIT) & (self.bar_counter_enter == self.bar_counter_max),\
(bar_state == bar_spec.BAR_EXIT) & (self.bar_counter_exit != 1),\
(bar_state == bar_spec.BAR_EXIT) & (self.bar_counter_exit == 1)]
self.u_bar_model.decode_exprs = bar_decode_list
def assumptions(self):
instruction_map_file = 'instruction_map'
instruction_map_obj = open(instruction_map_file, 'r')
instruction_map = pickle.load(instruction_map_obj)
bar_spec = barSpec()
self.model.add_assumption((self.opcode == instruction_map['bar'])) #& ((self.bar_state == bar_spec.BAR_INIT) | (self.bar_state == bar_spec.BAR_FINISH) ))
#self.u_bar_model.add_assumption((self.opcode == instruction_map['bar']) & (self.bar_state > bar_spec.BAR_INIT) & (self.bar_state < bar_spec.BAR_FINISH))
def pc_nxt(self):
self.pcPlus4 = self.pc + ila.const(0b100, PC_BITS)
self.branchPC = self.pcPlus4 + self.branchImm
return ila.choice("pc_nxt", [self.pc ,self.pcPlus4, self.branchPC])
def sreg_nxt(self, regNo):
return ila.ite(self.dest == regNo, ila.choice(str(regNo) + "_nxt", [self.sreg1 + self.sreg2, self.sreg1 - self.sreg2, self.sreg1 * self.sreg2, self.sregdest]),self.scalar_registers[regNo])
# return ila.choice(str(regNo) + "_nxt", [self.sreg1 + self.sreg2, self.sreg1 - self.sreg2, self.sreg1 * self.sreg2, self.sregdest ,self.scalar_registers[regNo]])
def mem_nxt(self):
return self.mem
def indexIntoReg(self, idx):
expr = self.scalar_registers[0]
for i in range(len(self.scalar_registers)):
expr = ila.ite(idx == i, self.scalar_registers[i], expr)
return expr
def compare(self):
next_1 = self.u_bar_model.get_next('bar_state')
next_2 = self.ptxSample()
if not self.u_bar_model.areEqual(next_1, next_2):
print 'not equal'
else:
print 'equal'
'''
def ptxSample(self):
bar_spec = barSpec()
return ila.ite(self.bar_state == bar_spec.BAR_ENTER, ila.ite(self.bar_counter_exit != 0, self.bar_counter_enter, self.bar_counter_enter + 1), ila.ite((self.bar_state == bar_spec.BAR_EXIT) & (self.bar_counter_exit == 1), ila.const(0x0, bar_spec.BAR_COUNTER_ENTER_BITS), self.bar_counter_enter))
'''
#sample for uILA bar_state update.
def ptxSample(self):
bar_spec = barSpec()
return ila.ite(self.bar_state == bar_spec.BAR_ENTER, ila.ite(self.bar_counter_exit != 0, ila.const(bar_spec.BAR_ENTER, bar_spec.BAR_STATE_BITS), ila.ite(self.bar_counter_enter == (self.bar_counter_max - 1), ila.const(bar_spec.BAR_EXIT, bar_spec.BAR_STATE_BITS), ila.const(bar_spec.BAR_WAIT, bar_spec.BAR_STATE_BITS))), ila.ite(self.bar_state == bar_spec.BAR_WAIT, ila.ite(self.bar_counter_enter == self.bar_counter_max, ila.const(bar_spec.BAR_EXIT, bar_spec.BAR_STATE_BITS), ila.const(bar_spec.BAR_WAIT, bar_spec.BAR_STATE_BITS)), ila.ite(self.bar_state == bar_spec.BAR_EXIT, ila.const(bar_spec.BAR_FINISH, bar_spec.BAR_STATE_BITS), self.bar_state)))
'''
#sample for topILA bar_state update.
def ptxSample(self):
bar_spec = barSpec()
return ila.ite(self.opcode == 71, ila.ite(self.bar_state == bar_spec.BAR_FINISH, ila.const(bar_spec.BAR_INIT, bar_spec.BAR_STATE_BITS), ila.ite(self.bar_state == bar_spec.BAR_INIT, ila.const(bar_spec.BAR_ENTER, bar_spec.BAR_STATE_BITS), self.bar_state)), self.bar_state)
'''
'''
#sample for topILA pc.
def ptxSample(self):
bar_spec = barSpec()
return ila.ite(self.opcode == 71, ila.ite(self.bar_state == bar_spec.BAR_FINISH, self.pc + 4 , self.pc), self.pc + 4)
'''
'''
#sample for branch instruction
def ptxSample(self):
return ila.ite(self.opcode == 67, ila.ite(self.baseImm == 0, self.pc + ila.const(0b100, PC_BITS) + self.branchImm, ila.ite(self.predReg == 0, self.pc + ila.const(0b100, PC_BITS), self.pc + ila.const(0b100, PC_BITS) + self.branchImm)), self.pc + ila.const(0b100, PC_BITS))
'''
'''
#Only with alu intruction
def compare(self):
next_1 = self.model.get_next('%r4')
next_2 = self.ptxSample()
if not self.model.areEqual(next_1, next_2):
print 'not equal'
else:
print 'equal'
def ptxSample(self):
return ila.ite(self.dest == 0, ila.ite(self.opcode == 26, self.sreg1 + self.sreg2, ila.ite(self.opcode == 28, self.sreg1 - self.sreg2, self.sregdest)), self.scalar_registers[0])
'''
|
py | 7dffbc3255c64e13360a6e30a32d255511ebe764 | #!/usr/bin/env python3
import click
import colorama
import threading
import yaml
from greendoge.cmds.passphrase_funcs import prompt_for_passphrase, read_passphrase_from_file
from greendoge.util.default_root import DEFAULT_KEYS_ROOT_PATH
from greendoge.util.file_keyring import FileKeyring
from greendoge.util.keyring_wrapper import DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE, KeyringWrapper
from cryptography.exceptions import InvalidTag
from io import TextIOWrapper
from pathlib import Path
from typing import Any, Dict, Optional
DEFAULT_KEYRING_YAML = DEFAULT_KEYS_ROOT_PATH / "keyring.yaml"
class DumpKeyring(FileKeyring): # lgtm [py/missing-call-to-init]
def __init__(self, keyring_file: Path):
self.keyring_path = keyring_file
self.payload_cache = {}
self.load_keyring_lock = threading.RLock()
# We don't call super().__init__() to avoid side-effects
def get_passphrase_prompt(keyring_file: str) -> str:
prompt = (
colorama.Fore.YELLOW
+ colorama.Style.BRIGHT
+ "(Unlock Keyring: "
+ colorama.Fore.MAGENTA
+ keyring_file
+ colorama.Style.RESET_ALL
+ colorama.Fore.YELLOW
+ colorama.Style.BRIGHT
+ ")"
+ colorama.Style.RESET_ALL
+ " Passphrase: "
) # noqa: E501
return prompt
@click.command()
@click.argument("keyring_file", nargs=1, default=DEFAULT_KEYRING_YAML)
@click.option(
"--full-payload", is_flag=True, default=False, help="Print the full keyring contents, including plaintext"
)
@click.option("--passphrase-file", type=click.File("r"), help="File or descriptor to read the passphrase from")
@click.option("--pretty-print", is_flag=True, default=False)
def dump(keyring_file, full_payload: bool, passphrase_file: Optional[TextIOWrapper], pretty_print: bool):
saved_passphrase: Optional[str] = KeyringWrapper.get_shared_instance().get_master_passphrase_from_credential_store()
passphrase: str = saved_passphrase or DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE
prompt: str = get_passphrase_prompt(str(keyring_file))
data: Dict[str, Any] = {}
print(f"Attempting to dump contents of keyring file: {keyring_file}\n")
if passphrase_file is not None:
passphrase = read_passphrase_from_file(passphrase_file)
keyring = DumpKeyring(Path(keyring_file))
if full_payload:
keyring.load_outer_payload()
data = keyring.outer_payload_cache
for i in range(5):
try:
keyring.load_keyring(passphrase)
if len(data) > 0:
data["data"] = keyring.payload_cache
else:
data = keyring.payload_cache
if pretty_print:
print(yaml.dump(data))
else:
print(data)
break
except (ValueError, InvalidTag):
passphrase = prompt_for_passphrase(prompt)
except Exception as e:
print(f"Unhandled exception: {e}")
break
def dump_to_string(
keyring_file, full_payload: bool, passphrase_file: Optional[TextIOWrapper], pretty_print: bool
) -> str:
saved_passphrase: Optional[str] = KeyringWrapper.get_shared_instance().get_master_passphrase_from_credential_store()
passphrase: str = saved_passphrase or DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE
prompt: str = get_passphrase_prompt(str(keyring_file))
data: Dict[str, Any] = {}
print(f"Attempting to dump contents of keyring file: {keyring_file}\n")
if passphrase_file is not None:
passphrase = read_passphrase_from_file(passphrase_file)
keyring = DumpKeyring(Path(keyring_file))
if full_payload:
keyring.load_outer_payload()
data = keyring.outer_payload_cache
s: str = ""
for i in range(5):
try:
keyring.load_keyring(passphrase)
if len(data) > 0:
data["data"] = keyring.payload_cache
else:
data = keyring.payload_cache
if pretty_print:
s = yaml.dump(data)
else:
s = str(data)
break
except (ValueError, InvalidTag):
passphrase = prompt_for_passphrase(prompt)
except Exception as e:
print(f"Unhandled exception: {e}")
break
return s
def main():
colorama.init()
dump() # pylint: disable=no-value-for-parameter
if __name__ == "__main__":
main()
|
py | 7dffbce3e038193d4f1fe486c22cb7361c1280eb | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 Centre National d'Etudes Spatiales (CNES)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
###################################################################################################
o o
oo oo oo o oo ,-.
o o o o o o o o o \_/
o o o o o o o o {|||D
o o oooooo o oooooo / \
o o o o o o o o `-^
o o o o oooo o o
###################################################################################################
orchestrator.tests.plugins.test_maja_sentinel2_l1_main_xml_reader -- shortdesc
orchestrator.tests.plugins.test_maja_sentinel2_l1_main_xml_reader is a description
It defines classes_and_methods
###################################################################################################
"""
import os
import unittest
from orchestrator.tests.settings import A_PRODUCT_S2_L1
from orchestrator.plugins.sentinel2.maja_sentinel2_l1_main_xml_reader import MajaSentinel2L1MainXmlReader
class TestMajaSentinel2MainXmlReader(unittest.TestCase):
"""Test class for ClassUndertestName"""
def setUp(self):
self.input_product_mtd = A_PRODUCT_S2_L1.get("main_xml_fullpath")
def test_object(self):
"""
Assert the initialization of the object do not fail
"""
self.maja_oject = MajaSentinel2L1MainXmlReader(self.input_product_mtd)
self.assertIsNotNone(self.maja_oject)
def test_granule(self):
"""
Assertion on the extracted granule name
TODO: create a general test for a general function extract_info which should return a {}
"""
self.maja_oject = MajaSentinel2L1MainXmlReader(self.input_product_mtd)
self.assertEqual(self.maja_oject.granule_id, A_PRODUCT_S2_L1.get("granule_id"))
self.assertEqual(self.maja_oject.granule_id, A_PRODUCT_S2_L1.get("granule_id"))
def test_granule_xml_file_getter(self):
"""
Test of the method granule_xml_file
"""
self.maja_oject = MajaSentinel2L1MainXmlReader(self.input_product_mtd)
self.assertEqual(os.path.basename(self.maja_oject.granule_xml_file),
A_PRODUCT_S2_L1.get("granule_xml_basename"))
if __name__ == "__main__":
unittest.main()
|
py | 7dffbe494f4e0e4c3eef651e72468e3af11e0d78 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for testing random variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.ops.distributions import special_math
def test_moment_matching(
samples,
number_moments,
dist,
stride=0):
"""Return z-test scores for sample moments to match analytic moments.
Given `samples`, check that the first sample `number_moments` match
the given `dist` moments by doing a z-test.
Args:
samples: Samples from target distribution.
number_moments: Python `int` describing how many sample moments to check.
dist: SciPy distribution object that provides analytic moments.
stride: Distance between samples to check for statistical properties.
A stride of 0 means to use all samples, while other strides test for
spatial correlation.
Returns:
Array of z_test scores.
"""
sample_moments = []
expected_moments = []
variance_sample_moments = []
for i in range(1, number_moments + 1):
if len(samples.shape) == 2:
strided_range = samples.flat[::(i - 1) * stride + 1]
else:
strided_range = samples[::(i - 1) * stride + 1, ...]
sample_moments.append(np.mean(strided_range**i, axis=0))
expected_moments.append(dist.moment(i))
variance_sample_moments.append(
(dist.moment(2 * i) - dist.moment(i) ** 2) / len(strided_range))
z_test_scores = []
for i in range(1, number_moments + 1):
# Assume every operation has a small numerical error.
# It takes i multiplications to calculate one i-th moment.
total_variance = (
variance_sample_moments[i - 1] +
i * np.finfo(samples.dtype).eps)
tiny = np.finfo(samples.dtype).tiny
assert np.all(total_variance > 0)
total_variance = np.where(total_variance < tiny, tiny, total_variance)
# z_test is approximately a unit normal distribution.
z_test_scores.append(abs(
(sample_moments[i - 1] - expected_moments[i - 1]) / np.sqrt(
total_variance)))
return z_test_scores
def chi_squared(x, bins):
"""Pearson's Chi-squared test."""
x = np.ravel(x)
n = len(x)
histogram, _ = np.histogram(x, bins=bins, range=(0, 1))
expected = n / float(bins)
return np.sum(np.square(histogram - expected) / expected)
def normal_cdf(x):
"""Cumulative distribution function for a standard normal distribution."""
return 0.5 + 0.5 * np.vectorize(math.erf)(x / math.sqrt(2))
def anderson_darling(x):
"""Anderson-Darling test for a standard normal distribution."""
x = np.sort(np.ravel(x))
n = len(x)
i = np.linspace(1, n, n)
z = np.sum((2 * i - 1) * np.log(normal_cdf(x)) +
(2 * (n - i) + 1) * np.log(1 - normal_cdf(x)))
return -n - z / n
def test_truncated_normal(assert_equal, assert_all_close, n, y,
mean_atol=5e-4, median_atol=8e-4, variance_rtol=1e-3):
"""Tests truncated normal distribution's statistics."""
def _normal_cdf(x):
return .5 * math.erfc(-x / math.sqrt(2))
def normal_pdf(x):
return math.exp(-(x**2) / 2.) / math.sqrt(2 * math.pi)
def probit(x):
return special_math.ndtri(x)
a = -2.
b = 2.
mu = 0.
sigma = 1.
alpha = (a - mu) / sigma
beta = (b - mu) / sigma
z = _normal_cdf(beta) - _normal_cdf(alpha)
assert_equal((y >= a).sum(), n)
assert_equal((y <= b).sum(), n)
# For more information on these calculations, see:
# Burkardt, John. "The Truncated Normal Distribution".
# Department of Scientific Computing website. Florida State University.
expected_mean = mu + (normal_pdf(alpha) - normal_pdf(beta)) / z * sigma
y = y.astype(float)
actual_mean = np.mean(y)
assert_all_close(actual_mean, expected_mean, atol=mean_atol)
expected_median = mu + probit(
(_normal_cdf(alpha) + _normal_cdf(beta)) / 2.) * sigma
actual_median = np.median(y)
assert_all_close(actual_median, expected_median, atol=median_atol)
expected_variance = sigma**2 * (1 + (
(alpha * normal_pdf(alpha) - beta * normal_pdf(beta)) / z) - (
(normal_pdf(alpha) - normal_pdf(beta)) / z)**2)
actual_variance = np.var(y)
assert_all_close(
actual_variance,
expected_variance,
rtol=variance_rtol)
|
py | 7dffc219cd7f17b329b5bb832082af2590f96ca1 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import socket
def get_free_ports(num_ports, ip='127.0.0.1'):
"""Get `num_ports` free/available ports on the interface linked to the `ip´
:param int num_ports: The number of free ports to get
:param str ip: The ip on which the ports have to be taken
:return: a set of ports number
"""
sock_ports = []
ports = set()
try:
for _ in range(num_ports):
sock = socket.socket()
cur = [sock, -1]
# append the socket directly,
# so that it'll be also closed (no leaked resource)
# in the finally here after.
sock_ports.append(cur)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((ip, 0))
cur[1] = sock.getsockname()[1]
finally:
for sock, port in sock_ports:
sock.close()
ports.add(port)
assert num_ports == len(ports)
return ports
def is_port_open(port, ip='127.0.0.1'):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
result = sock.connect_ex((ip, port))
if not result:
sock.shutdown(socket.SHUT_RDWR)
return result == 0
finally:
sock.close()
|
py | 7dffc21a354ab0aed4e358393411aef14bfa97eb | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from operator import itemgetter
def calc_mean_ap(input_zip):
pos_list = [0.1*x for x in range(0, 10)]
#print pos_list
return_zip = []
mAP = 0
for it in pos_list:
precisions = [x[1] for x in input_zip if x[0]>it]
if len(precisions)<=0:
pr=0
else:
pr = max(precisions)
mAP+=pr*0.1
return mAP
#return return_zip
def calc_recall_precision(sc, y):
sc_list = zip(sc, y)
sc_list_s = sorted(sc_list, key=itemgetter(0), reverse=True)
#print sc_list_s
precision = []
recall = []
s = []
gt = []
precision.append(0.0)
recall.append(0.0)
arr_s = len(sc_list_s)
false_pos = 0
true_pos = 0
for pair in sc_list_s:
curr_score = pair[0]
curr_y = pair[1]
if curr_y==1:
true_pos+=1
else:
false_pos+=1
false_neg = 0
for p in sc_list_s:
sc = p[0]
py = p[1]
if sc<curr_score and py==1:
false_neg+=1
curr_pr = float(true_pos)/(true_pos+false_pos)
if (true_pos+false_neg)==0:
curr_rec=0.0
else:
curr_rec = float(true_pos)/(true_pos+false_neg)
precision.append(curr_pr)
recall.append(curr_rec)
prc = zip(recall, precision)
return prc
def calc_auc(input_zip):
indices = [x for x in range(0, len(input_zip)-1)]
auc = 0
for ind in indices:
w = - input_zip[ind][0] + input_zip[ind+1][0]
h1 = input_zip[ind][1]
h2 = input_zip[ind+1][1]
#print w, h1, h2
minh = min(h1, h2)
maxh = max(h2, h2)
rectangleS = w*minh
triangleS = (w*(maxh-minh))/2
currS = rectangleS+triangleS
auc+=currS
return auc
'''
scores = [0.8, 0.5, 0.39, 0.7, 0, 0]
y = [0,1,0,0,1,1]
prc = calc_recall_precision(scores, y)
print prc
mAP = calc_mean_ap(prc)
print mAP
#print result_zip
'''
|
py | 7dffc361741aa6f2bbf04926406ea6dad8e4d22d | try:
from builtins import object
except ImportError:
pass
import warnings
import sys
from .utils import InheritedStuff
from .utils import Stuff
from functools import partial
from transitions import Machine, MachineError, State, EventData
from transitions.core import listify, _prep_ordered_arg
from unittest import TestCase, skipIf
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
def on_exit_A(event):
event.model.exit_A_called = True
def on_exit_B(event):
event.model.exit_B_called = True
class TestTransitions(TestCase):
def setUp(self):
self.stuff = Stuff()
def tearDown(self):
pass
def test_init_machine_with_hella_arguments(self):
states = [
State('State1'),
'State2',
{
'name': 'State3',
'on_enter': 'hello_world'
}
]
transitions = [
{'trigger': 'advance',
'source': 'State2',
'dest': 'State3'
}
]
s = Stuff()
m = Machine(model=s, states=states, transitions=transitions, initial='State2')
s.advance()
self.assertEqual(s.message, 'Hello World!')
def test_listify(self):
self.assertEqual(listify(4), [4])
self.assertEqual(listify(None), [])
self.assertEqual(listify((4, 5)), (4, 5))
self.assertEqual(listify([1, 3]), [1, 3])
def test_property_initial(self):
states = ['A', 'B', 'C', 'D']
# Define with list of dictionaries
transitions = [
{'trigger': 'walk', 'source': 'A', 'dest': 'B'},
{'trigger': 'run', 'source': 'B', 'dest': 'C'},
{'trigger': 'sprint', 'source': 'C', 'dest': 'D'}
]
m = Machine(states=states, transitions=transitions, initial='A')
self.assertEqual(m.initial, 'A')
m = Machine(states=states, transitions=transitions, initial='C')
self.assertEqual(m.initial, 'C')
m = Machine(states=states, transitions=transitions)
self.assertEqual(m.initial, 'initial')
def test_transition_definitions(self):
states = ['A', 'B', 'C', 'D']
# Define with list of dictionaries
transitions = [
{'trigger': 'walk', 'source': 'A', 'dest': 'B'},
{'trigger': 'run', 'source': 'B', 'dest': 'C'},
{'trigger': 'sprint', 'source': 'C', 'dest': 'D'}
]
m = Machine(states=states, transitions=transitions, initial='A')
m.walk()
self.assertEqual(m.state, 'B')
# Define with list of lists
transitions = [
['walk', 'A', 'B'],
['run', 'B', 'C'],
['sprint', 'C', 'D']
]
m = Machine(states=states, transitions=transitions, initial='A')
m.to_C()
m.sprint()
self.assertEqual(m.state, 'D')
def test_transitioning(self):
s = self.stuff
s.machine.add_transition('advance', 'A', 'B')
s.machine.add_transition('advance', 'B', 'C')
s.machine.add_transition('advance', 'C', 'D')
s.advance()
self.assertEqual(s.state, 'B')
self.assertFalse(s.is_A())
self.assertTrue(s.is_B())
s.advance()
self.assertEqual(s.state, 'C')
def test_pass_state_instances_instead_of_names(self):
state_A = State('A')
state_B = State('B')
states = [state_A, state_B]
m = Machine(states=states, initial=state_A)
assert m.state == 'A'
m.add_transition('advance', state_A, state_B)
m.advance()
assert m.state == 'B'
state_B2 = State('B', on_enter='this_passes')
with self.assertRaises(ValueError):
m.add_transition('advance2', state_A, state_B2)
m2 = Machine(states=states, initial=state_A.name)
assert m.initial == m2.initial
with self.assertRaises(ValueError):
Machine(states=states, initial=State('A'))
def test_conditions(self):
s = self.stuff
s.machine.add_transition('advance', 'A', 'B', conditions='this_passes')
s.machine.add_transition('advance', 'B', 'C', unless=['this_fails'])
s.machine.add_transition('advance', 'C', 'D', unless=['this_fails',
'this_passes'])
s.advance()
self.assertEqual(s.state, 'B')
s.advance()
self.assertEqual(s.state, 'C')
s.advance()
self.assertEqual(s.state, 'C')
def test_conditions_with_partial(self):
def check(result):
return result
s = self.stuff
s.machine.add_transition('advance', 'A', 'B',
conditions=partial(check, True))
s.machine.add_transition('advance', 'B', 'C',
unless=[partial(check, False)])
s.machine.add_transition('advance', 'C', 'D',
unless=[partial(check, False), partial(check, True)])
s.advance()
self.assertEqual(s.state, 'B')
s.advance()
self.assertEqual(s.state, 'C')
s.advance()
self.assertEqual(s.state, 'C')
def test_multiple_add_transitions_from_state(self):
s = self.stuff
s.machine.add_transition(
'advance', 'A', 'B', conditions=['this_fails'])
s.machine.add_transition('advance', 'A', 'C')
s.advance()
self.assertEqual(s.state, 'C')
def test_use_machine_as_model(self):
states = ['A', 'B', 'C', 'D']
m = Machine(states=states, initial='A')
m.add_transition('move', 'A', 'B')
m.add_transition('move_to_C', 'B', 'C')
m.move()
self.assertEqual(m.state, 'B')
def test_state_change_listeners(self):
s = self.stuff
s.machine.add_transition('advance', 'A', 'B')
s.machine.add_transition('reverse', 'B', 'A')
s.machine.on_enter_B('hello_world')
s.machine.on_exit_B('goodbye')
s.advance()
self.assertEqual(s.state, 'B')
self.assertEqual(s.message, 'Hello World!')
s.reverse()
self.assertEqual(s.state, 'A')
self.assertTrue(s.message.startswith('So long'))
def test_before_after_callback_addition(self):
m = Machine(Stuff(), states=['A', 'B', 'C'], initial='A')
m.add_transition('move', 'A', 'B')
trans = m.events['move'].transitions['A'][0]
trans.add_callback('after', 'increase_level')
m.model.move()
self.assertEqual(m.model.level, 2)
def test_before_after_transition_listeners(self):
m = Machine(Stuff(), states=['A', 'B', 'C'], initial='A')
m.add_transition('move', 'A', 'B')
m.add_transition('move', 'B', 'C')
m.before_move('increase_level')
m.model.move()
self.assertEqual(m.model.level, 2)
m.model.move()
self.assertEqual(m.model.level, 3)
def test_prepare(self):
m = Machine(Stuff(), states=['A', 'B', 'C'], initial='A')
m.add_transition('move', 'A', 'B', prepare='increase_level')
m.add_transition('move', 'B', 'C', prepare='increase_level')
m.add_transition('move', 'C', 'A', prepare='increase_level', conditions='this_fails')
m.add_transition('dont_move', 'A', 'C', prepare='increase_level')
m.prepare_move('increase_level')
m.model.move()
self.assertEqual(m.model.state, 'B')
self.assertEqual(m.model.level, 3)
m.model.move()
self.assertEqual(m.model.state, 'C')
self.assertEqual(m.model.level, 5)
# State does not advance, but increase_level still runs
m.model.move()
self.assertEqual(m.model.state, 'C')
self.assertEqual(m.model.level, 7)
# An invalid transition shouldn't execute the callback
try:
m.model.dont_move()
except MachineError as e:
self.assertTrue("Can't trigger event" in str(e))
self.assertEqual(m.model.state, 'C')
self.assertEqual(m.model.level, 7)
def test_state_model_change_listeners(self):
s = self.stuff
s.machine.add_transition('go_e', 'A', 'E')
s.machine.add_transition('go_f', 'E', 'F')
s.machine.on_enter_F('hello_F')
s.go_e()
self.assertEqual(s.state, 'E')
self.assertEqual(s.message, 'I am E!')
s.go_f()
self.assertEqual(s.state, 'F')
self.assertEqual(s.exit_message, 'E go home...')
assert 'I am F!' in s.message
assert 'Hello F!' in s.message
def test_inheritance(self):
states = ['A', 'B', 'C', 'D', 'E']
s = InheritedStuff(states=states, initial='A')
s.add_transition('advance', 'A', 'B', conditions='this_passes')
s.add_transition('advance', 'B', 'C')
s.add_transition('advance', 'C', 'D')
s.advance()
self.assertEqual(s.state, 'B')
self.assertFalse(s.is_A())
self.assertTrue(s.is_B())
s.advance()
self.assertEqual(s.state, 'C')
class NewMachine(Machine):
def __init__(self, *args, **kwargs):
super(NewMachine, self).__init__(*args, **kwargs)
n = NewMachine(states=states, transitions=[['advance', 'A', 'B']], initial='A')
self.assertTrue(n.is_A())
n.advance()
self.assertTrue(n.is_B())
with self.assertRaises(ValueError):
NewMachine(state=['A', 'B'])
def test_send_event_data_callbacks(self):
states = ['A', 'B', 'C', 'D', 'E']
s = Stuff()
# First pass positional and keyword args directly to the callback
m = Machine(model=s, states=states, initial='A', send_event=False,
auto_transitions=True)
m.add_transition(
trigger='advance', source='A', dest='B', before='set_message')
s.advance(message='Hallo. My name is Inigo Montoya.')
self.assertTrue(s.message.startswith('Hallo.'))
s.to_A()
s.advance('Test as positional argument')
self.assertTrue(s.message.startswith('Test as'))
# Now wrap arguments in an EventData instance
m.send_event = True
m.add_transition(
trigger='advance', source='B', dest='C', before='extract_message')
s.advance(message='You killed my father. Prepare to die.')
self.assertTrue(s.message.startswith('You'))
def test_send_event_data_conditions(self):
states = ['A', 'B', 'C', 'D']
s = Stuff()
# First pass positional and keyword args directly to the condition
m = Machine(model=s, states=states, initial='A', send_event=False)
m.add_transition(
trigger='advance', source='A', dest='B',
conditions='this_fails_by_default')
s.advance(boolean=True)
self.assertEqual(s.state, 'B')
# Now wrap arguments in an EventData instance
m.send_event = True
m.add_transition(
trigger='advance', source='B', dest='C',
conditions='extract_boolean')
s.advance(boolean=False)
self.assertEqual(s.state, 'B')
def test_auto_transitions(self):
states = ['A', {'name': 'B'}, State(name='C')]
m = Machine('self', states, initial='A', auto_transitions=True)
m.to_B()
self.assertEqual(m.state, 'B')
m.to_C()
self.assertEqual(m.state, 'C')
m.to_A()
self.assertEqual(m.state, 'A')
# Should fail if auto transitions is off...
m = Machine('self', states, initial='A', auto_transitions=False)
with self.assertRaises(AttributeError):
m.to_C()
def test_ordered_transitions(self):
states = ['beginning', 'middle', 'end']
m = Machine('self', states)
m.add_ordered_transitions()
self.assertEqual(m.state, 'initial')
m.next_state()
self.assertEqual(m.state, 'beginning')
m.next_state()
m.next_state()
self.assertEqual(m.state, 'end')
m.next_state()
self.assertEqual(m.state, 'initial')
# Include initial state in loop
m = Machine('self', states)
m.add_ordered_transitions(loop_includes_initial=False)
m.to_end()
m.next_state()
self.assertEqual(m.state, 'beginning')
# Do not loop transitions
m = Machine('self', states)
m.add_ordered_transitions(loop=False)
m.to_end()
with self.assertRaises(MachineError):
m.next_state()
# Test user-determined sequence and trigger name
m = Machine('self', states, initial='beginning')
m.add_ordered_transitions(['end', 'beginning'], trigger='advance')
m.advance()
self.assertEqual(m.state, 'end')
m.advance()
self.assertEqual(m.state, 'beginning')
# Via init argument
m = Machine('self', states, initial='beginning', ordered_transitions=True)
m.next_state()
self.assertEqual(m.state, 'middle')
# Alter initial state
m = Machine('self', states, initial='middle', ordered_transitions=True)
m.next_state()
self.assertEqual(m.state, 'end')
m.next_state()
self.assertEqual(m.state, 'beginning')
def test_ordered_transition_error(self):
m = Machine(states=['A'], initial='A')
with self.assertRaises(ValueError):
m.add_ordered_transitions()
m.add_state('B')
m.add_ordered_transitions()
m.add_state('C')
with self.assertRaises(ValueError):
m.add_ordered_transitions(['C'])
def test_ignore_invalid_triggers(self):
a_state = State('A')
transitions = [['a_to_b', 'A', 'B']]
# Exception is triggered by default
b_state = State('B')
m1 = Machine('self', states=[a_state, b_state], transitions=transitions,
initial='B')
with self.assertRaises(MachineError):
m1.a_to_b()
# Exception is suppressed, so this passes
b_state = State('B', ignore_invalid_triggers=True)
m2 = Machine('self', states=[a_state, b_state], transitions=transitions,
initial='B')
m2.a_to_b()
# Set for some states but not others
new_states = ['C', 'D']
m1.add_states(new_states, ignore_invalid_triggers=True)
m1.to_D()
m1.a_to_b() # passes because exception suppressed for D
m1.to_B()
with self.assertRaises(MachineError):
m1.a_to_b()
# Set at machine level
m3 = Machine('self', states=[a_state, b_state], transitions=transitions,
initial='B', ignore_invalid_triggers=True)
m3.a_to_b()
def test_string_callbacks(self):
m = Machine(states=['A', 'B'],
before_state_change='before_state_change',
after_state_change='after_state_change', send_event=True,
initial='A', auto_transitions=True)
m.before_state_change = MagicMock()
m.after_state_change = MagicMock()
m.to_B()
self.assertTrue(m.before_state_change[0].called)
self.assertTrue(m.after_state_change[0].called)
# after_state_change should have been called with EventData
event_data = m.after_state_change[0].call_args[0][0]
self.assertIsInstance(event_data, EventData)
self.assertTrue(event_data.result)
def test_function_callbacks(self):
before_state_change = MagicMock()
after_state_change = MagicMock()
m = Machine('self', states=['A', 'B'],
before_state_change=before_state_change,
after_state_change=after_state_change, send_event=True,
initial='A', auto_transitions=True)
m.to_B()
self.assertTrue(m.before_state_change[0].called)
self.assertTrue(m.after_state_change[0].called)
def test_state_callbacks(self):
class Model:
def on_enter_A(self):
pass
def on_exit_A(self):
pass
def on_enter_B(self):
pass
def on_exit_B(self):
pass
states = [State(name='A', on_enter='on_enter_A', on_exit='on_exit_A'),
State(name='B', on_enter='on_enter_B', on_exit='on_exit_B')]
machine = Machine(Model(), states=states)
state_a = machine.get_state('A')
state_b = machine.get_state('B')
self.assertEqual(len(state_a.on_enter), 1)
self.assertEqual(len(state_a.on_exit), 1)
self.assertEqual(len(state_b.on_enter), 1)
self.assertEqual(len(state_b.on_exit), 1)
def test_state_callable_callbacks(self):
class Model:
def __init__(self):
self.exit_A_called = False
self.exit_B_called = False
def on_enter_A(self, event):
pass
def on_enter_B(self, event):
pass
states = [State(name='A', on_enter='on_enter_A', on_exit='tests.test_core.on_exit_A'),
State(name='B', on_enter='on_enter_B', on_exit=on_exit_B),
State(name='C', on_enter='tests.test_core.AAAA')]
model = Model()
machine = Machine(model, states=states, send_event=True, initial='A')
state_a = machine.get_state('A')
state_b = machine.get_state('B')
self.assertEqual(len(state_a.on_enter), 1)
self.assertEqual(len(state_a.on_exit), 1)
self.assertEqual(len(state_b.on_enter), 1)
self.assertEqual(len(state_b.on_exit), 1)
model.to_B()
self.assertTrue(model.exit_A_called)
model.to_A()
self.assertTrue(model.exit_B_called)
with self.assertRaises(AttributeError):
model.to_C()
def test_pickle(self):
import sys
if sys.version_info < (3, 4):
import dill as pickle
else:
import pickle
states = ['A', 'B', 'C', 'D']
# Define with list of dictionaries
transitions = [
{'trigger': 'walk', 'source': 'A', 'dest': 'B'},
{'trigger': 'run', 'source': 'B', 'dest': 'C'},
{'trigger': 'sprint', 'source': 'C', 'dest': 'D'}
]
m = Machine(states=states, transitions=transitions, initial='A')
m.walk()
dump = pickle.dumps(m)
self.assertIsNotNone(dump)
m2 = pickle.loads(dump)
self.assertEqual(m.state, m2.state)
m2.run()
def test_pickle_model(self):
import sys
if sys.version_info < (3, 4):
import dill as pickle
else:
import pickle
self.stuff.to_B()
dump = pickle.dumps(self.stuff)
self.assertIsNotNone(dump)
model2 = pickle.loads(dump)
self.assertEqual(self.stuff.state, model2.state)
model2.to_F()
def test_queued(self):
states = ['A', 'B', 'C', 'D']
# Define with list of dictionaries
def change_state(machine):
self.assertEqual(machine.state, 'A')
if machine.has_queue:
machine.run(machine=machine)
self.assertEqual(machine.state, 'A')
else:
with self.assertRaises(MachineError):
machine.run(machine=machine)
transitions = [
{'trigger': 'walk', 'source': 'A', 'dest': 'B', 'before': change_state},
{'trigger': 'run', 'source': 'B', 'dest': 'C'},
{'trigger': 'sprint', 'source': 'C', 'dest': 'D'}
]
m = Machine(states=states, transitions=transitions, initial='A')
m.walk(machine=m)
self.assertEqual(m.state, 'B')
m = Machine(states=states, transitions=transitions, initial='A', queued=True)
m.walk(machine=m)
self.assertEqual(m.state, 'C')
def test_queued_errors(self):
def before_change(machine):
if machine.has_queue:
machine.to_A(machine)
machine._queued = False
def after_change(machine):
machine.to_C(machine)
def failed_transition(machine):
raise ValueError('Something was wrong')
states = ['A', 'B', 'C']
transitions = [{'trigger': 'do', 'source': '*', 'dest': 'C', 'before': failed_transition}]
m = Machine(states=states, transitions=transitions, queued=True,
before_state_change=before_change, after_state_change=after_change)
with self.assertRaises(MachineError):
m.to_B(machine=m)
with self.assertRaises(ValueError):
m.do(machine=m)
def test___getattr___and_identify_callback(self):
m = Machine(Stuff(), states=['A', 'B', 'C'], initial='A')
m.add_transition('move', 'A', 'B')
m.add_transition('move', 'B', 'C')
callback = m.__getattr__('before_move')
self.assertTrue(callable(callback))
with self.assertRaises(AttributeError):
m.__getattr__('before_no_such_transition')
with self.assertRaises(AttributeError):
m.__getattr__('before_no_such_transition')
with self.assertRaises(AttributeError):
m.__getattr__('__no_such_method__')
with self.assertRaises(AttributeError):
m.__getattr__('')
type, target = m._identify_callback('on_exit_foobar')
self.assertEqual(type, 'on_exit')
self.assertEqual(target, 'foobar')
type, target = m._identify_callback('on_exitfoobar')
self.assertEqual(type, None)
self.assertEqual(target, None)
type, target = m._identify_callback('notacallback_foobar')
self.assertEqual(type, None)
self.assertEqual(target, None)
type, target = m._identify_callback('totallyinvalid')
self.assertEqual(type, None)
self.assertEqual(target, None)
type, target = m._identify_callback('before__foobar')
self.assertEqual(type, 'before')
self.assertEqual(target, '_foobar')
type, target = m._identify_callback('before__this__user__likes__underscores___')
self.assertEqual(type, 'before')
self.assertEqual(target, '_this__user__likes__underscores___')
type, target = m._identify_callback('before_stuff')
self.assertEqual(type, 'before')
self.assertEqual(target, 'stuff')
type, target = m._identify_callback('before_trailing_underscore_')
self.assertEqual(type, 'before')
self.assertEqual(target, 'trailing_underscore_')
type, target = m._identify_callback('before_')
self.assertIs(type, None)
self.assertIs(target, None)
type, target = m._identify_callback('__')
self.assertIs(type, None)
self.assertIs(target, None)
type, target = m._identify_callback('')
self.assertIs(type, None)
self.assertIs(target, None)
def test_state_and_transition_with_underscore(self):
m = Machine(Stuff(), states=['_A_', '_B_', '_C_'], initial='_A_')
m.add_transition('_move_', '_A_', '_B_', prepare='increase_level')
m.add_transition('_after_', '_B_', '_C_', prepare='increase_level')
m.add_transition('_on_exit_', '_C_', '_A_', prepare='increase_level', conditions='this_fails')
m.model._move_()
self.assertEqual(m.model.state, '_B_')
self.assertEqual(m.model.level, 2)
m.model._after_()
self.assertEqual(m.model.state, '_C_')
self.assertEqual(m.model.level, 3)
# State does not advance, but increase_level still runs
m.model._on_exit_()
self.assertEqual(m.model.state, '_C_')
self.assertEqual(m.model.level, 4)
def test_callback_identification(self):
m = Machine(Stuff(), states=['A', 'B', 'C', 'D', 'E', 'F'], initial='A')
m.add_transition('transition', 'A', 'B', before='increase_level')
m.add_transition('after', 'B', 'C', before='increase_level')
m.add_transition('on_exit_A', 'C', 'D', before='increase_level', conditions='this_fails')
m.add_transition('check', 'C', 'E', before='increase_level')
m.add_transition('prepare', 'E', 'F', before='increase_level')
m.add_transition('before', 'F', 'A', before='increase_level')
m.before_transition('increase_level')
m.before_after('increase_level')
m.before_on_exit_A('increase_level')
m.after_check('increase_level')
m.before_prepare('increase_level')
m.before_before('increase_level')
m.model.transition()
self.assertEqual(m.model.state, 'B')
self.assertEqual(m.model.level, 3)
m.model.after()
self.assertEqual(m.model.state, 'C')
self.assertEqual(m.model.level, 5)
m.model.on_exit_A()
self.assertEqual(m.model.state, 'C')
self.assertEqual(m.model.level, 5)
m.model.check()
self.assertEqual(m.model.state, 'E')
self.assertEqual(m.model.level, 7)
m.model.prepare()
self.assertEqual(m.model.state, 'F')
self.assertEqual(m.model.level, 9)
m.model.before()
self.assertEqual(m.model.state, 'A')
self.assertEqual(m.model.level, 11)
# An invalid transition shouldn't execute the callback
with self.assertRaises(MachineError):
m.model.on_exit_A()
def test_process_trigger(self):
m = Machine(states=['raw', 'processed'], initial='raw')
m.add_transition('process', 'raw', 'processed')
m.process()
self.assertEqual(m.state, 'processed')
def test_multiple_models(self):
s1, s2 = Stuff(), Stuff()
states = ['A', 'B', 'C']
m = Machine(model=[s1, s2], states=states,
initial=states[0])
self.assertEqual(len(m.models), 2)
self.assertEqual(len(m.model), 2)
m.add_transition('advance', 'A', 'B')
s1.advance()
self.assertEqual(s1.state, 'B')
self.assertEqual(s2.state, 'A')
m = Machine(model=s1, states=states,
initial=states[0])
# for backwards compatibility model should return a model instance
# rather than a list
self.assertNotIsInstance(m.model, list)
def test_dispatch(self):
s1, s2 = Stuff(), Stuff()
states = ['A', 'B', 'C']
m = Machine(model=s1, states=states, ignore_invalid_triggers=True,
initial=states[0], transitions=[['go', 'A', 'B'], ['go', 'B', 'C']])
m.add_model(s2, initial='B')
m.dispatch('go')
self.assertEqual(s1.state, 'B')
self.assertEqual(s2.state, 'C')
def test_string_trigger(self):
def return_value(value):
return value
class Model:
def trigger(self, value):
return value
self.stuff.machine.add_transition('do', '*', 'C')
self.stuff.trigger('do')
self.assertTrue(self.stuff.is_C())
self.stuff.machine.add_transition('maybe', 'C', 'A', conditions=return_value)
self.assertFalse(self.stuff.trigger('maybe', value=False))
self.assertTrue(self.stuff.trigger('maybe', value=True))
self.assertTrue(self.stuff.is_A())
with self.assertRaises(AttributeError):
self.stuff.trigger('not_available')
model = Model()
m = Machine(model=model)
self.assertEqual(model.trigger(5), 5)
def test_get_triggers(self):
states = ['A', 'B', 'C']
transitions = [['a2b', 'A', 'B'],
['a2c', 'A', 'C'],
['c2b', 'C', 'B']]
machine = Machine(states=states, transitions=transitions, initial='A', auto_transitions=False)
self.assertEqual(len(machine.get_triggers('A')), 2)
self.assertEqual(len(machine.get_triggers('B')), 0)
self.assertEqual(len(machine.get_triggers('C')), 1)
# self stuff machine should have to-transitions to every state
self.assertEqual(len(self.stuff.machine.get_triggers('B')), len(self.stuff.machine.states))
def test_skip_override(self):
local_mock = MagicMock()
class Model(object):
def go(self):
local_mock()
model = Model()
transitions = [['go', 'A', 'B'], ['advance', 'A', 'B']]
m = self.stuff.machine_cls(model=model, states=['A', 'B'], transitions=transitions, initial='A')
model.go()
self.assertEqual(model.state, 'A')
self.assertTrue(local_mock.called)
model.advance()
self.assertEqual(model.state, 'B')
model.to_A()
model.trigger('go')
self.assertEqual(model.state, 'B')
@skipIf(sys.version_info < (3, ),
"String-checking disabled on PY-2 because is different")
def test_repr(self):
def a_condition(event_data):
self.assertRegex(
str(event_data.transition.conditions),
r"\[<Condition\(<function TestTransitions.test_repr.<locals>"
r".a_condition at [^>]+>\)@\d+>\]")
return True
# No transition has been assigned to EventData yet
def check_prepare_repr(event_data):
self.assertRegex(
str(event_data),
r"<EventData\('<State\('A'\)@\d+>', "
r"None\)@\d+>")
def check_before_repr(event_data):
self.assertRegex(
str(event_data),
r"<EventData\('<State\('A'\)@\d+>', "
r"<Transition\('A', 'B'\)@\d+>\)@\d+>")
m.checked = True
m = Machine(states=['A', 'B'],
prepare_event=check_prepare_repr,
before_state_change=check_before_repr, send_event=True,
initial='A')
m.add_transition('do_strcheck', 'A', 'B', conditions=a_condition)
self.assertTrue(m.do_strcheck())
self.assertIn('checked', vars(m))
def test_machine_prepare(self):
global_mock = MagicMock()
local_mock = MagicMock()
def global_callback():
global_mock()
def local_callback():
local_mock()
def always_fails():
return False
transitions = [
{'trigger': 'go', 'source': 'A', 'dest': 'B', 'conditions': always_fails, 'prepare': local_callback},
{'trigger': 'go', 'source': 'A', 'dest': 'B', 'conditions': always_fails, 'prepare': local_callback},
{'trigger': 'go', 'source': 'A', 'dest': 'B', 'conditions': always_fails, 'prepare': local_callback},
{'trigger': 'go', 'source': 'A', 'dest': 'B', 'conditions': always_fails, 'prepare': local_callback},
{'trigger': 'go', 'source': 'A', 'dest': 'B', 'prepare': local_callback},
]
m = Machine(states=['A', 'B'], transitions=transitions,
prepare_event=global_callback, initial='A')
m.go()
self.assertEqual(global_mock.call_count, 1)
self.assertEqual(local_mock.call_count, len(transitions))
def test_machine_finalize(self):
finalize_mock = MagicMock()
def always_fails(event_data):
return False
def always_raises(event_data):
raise Exception()
transitions = [
{'trigger': 'go', 'source': 'A', 'dest': 'B'},
{'trigger': 'planA', 'source': 'B', 'dest': 'A', 'conditions': always_fails},
{'trigger': 'planB', 'source': 'B', 'dest': 'A', 'conditions': always_raises}
]
m = self.stuff.machine_cls(states=['A', 'B'], transitions=transitions,
finalize_event=finalize_mock, initial='A', send_event=True)
m.go()
self.assertEqual(finalize_mock.call_count, 1)
m.planA()
event_data = finalize_mock.call_args[0][0]
self.assertIsInstance(event_data, EventData)
self.assertEqual(finalize_mock.call_count, 2)
self.assertFalse(event_data.result)
with self.assertRaises(Exception):
m.planB()
self.assertEqual(finalize_mock.call_count, 3)
def test_machine_finalize_exception(self):
exception = ZeroDivisionError()
def always_raises(event):
raise exception
def finalize_callback(event):
self.assertEqual(event.error, exception)
m = self.stuff.machine_cls(states=['A', 'B'], send_event=True, initial='A',
before_state_change=always_raises,
finalize_event=finalize_callback)
with self.assertRaises(ZeroDivisionError):
m.to_B()
def test_prep_ordered_arg(self):
self.assertTrue(len(_prep_ordered_arg(3, None)) == 3)
self.assertTrue(all(a is None for a in _prep_ordered_arg(3, None)))
with self.assertRaises(ValueError):
_prep_ordered_arg(3, [None, None])
def test_ordered_transition_callback(self):
class Model:
def __init__(self):
self.flag = False
def make_true(self):
self.flag = True
model = Model()
states = ['beginning', 'middle', 'end']
transits = [None, None, 'make_true']
m = Machine(model, states, initial='beginning')
m.add_ordered_transitions(before=transits)
model.next_state()
self.assertFalse(model.flag)
model.next_state()
model.next_state()
self.assertTrue(model.flag)
def test_ordered_transition_condition(self):
class Model:
def __init__(self):
self.blocker = False
def check_blocker(self):
return self.blocker
model = Model()
states = ['beginning', 'middle', 'end']
m = Machine(model, states, initial='beginning')
m.add_ordered_transitions(conditions=[None, None, 'check_blocker'])
model.to_end()
self.assertFalse(model.next_state())
model.blocker = True
self.assertTrue(model.next_state())
def test_get_transitions(self):
states = ['A', 'B', 'C', 'D']
m = Machine('self', states, initial='a', auto_transitions=False)
m.add_transition('go', ['A', 'B', 'C'], 'D')
m.add_transition('run', 'A', 'D')
self.assertEqual(
{(t.source, t.dest) for t in m.get_transitions('go')},
{('A', 'D'), ('B', 'D'), ('C', 'D')})
self.assertEqual(
[(t.source, t.dest)
for t in m.get_transitions(source='A', dest='D')],
[('A', 'D'), ('A', 'D')])
def test_remove_transition(self):
self.stuff.machine.add_transition('go', ['A', 'B', 'C'], 'D')
self.stuff.machine.add_transition('walk', 'A', 'B')
self.stuff.go()
self.assertEqual(self.stuff.state, 'D')
self.stuff.to_A()
self.stuff.machine.remove_transition('go', source='A')
with self.assertRaises(MachineError):
self.stuff.go()
self.stuff.machine.add_transition('go', 'A', 'D')
self.stuff.walk()
self.stuff.go()
self.assertEqual(self.stuff.state, 'D')
self.stuff.to_C()
self.stuff.machine.remove_transition('go', dest='D')
self.assertFalse(hasattr(self.stuff, 'go'))
def test_reflexive_transition(self):
self.stuff.machine.add_transition('reflex', ['A', 'B'], '=', after='increase_level')
self.assertEqual(self.stuff.state, 'A')
self.stuff.reflex()
self.assertEqual(self.stuff.state, 'A')
self.assertEqual(self.stuff.level, 2)
self.stuff.to_B()
self.assertEqual(self.stuff.state, 'B')
self.stuff.reflex()
self.assertEqual(self.stuff.state, 'B')
self.assertEqual(self.stuff.level, 3)
self.stuff.to_C()
with self.assertRaises(MachineError):
self.stuff.reflex()
self.assertEqual(self.stuff.level, 3)
def test_internal_transition(self):
m = Machine(Stuff(), states=['A', 'B'], initial='A')
m.add_transition('move', 'A', None, prepare='increase_level')
m.model.move()
self.assertEqual(m.model.state, 'A')
self.assertEqual(m.model.level, 2)
class TestWarnings(TestCase):
def test_warning(self):
import sys
# does not work with python 3.3. However, the warning is shown when Machine is initialized manually.
if (3, 3) <= sys.version_info < (3, 4):
return
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings(action='default', message=r"Starting from transitions version 0\.6\.0 .*")
m = Machine(None)
m = Machine(add_self=False)
self.assertEqual(len(w), 1)
for warn in w:
self.assertEqual(warn.category, DeprecationWarning)
|
py | 7dffc3bf46646f42f8e182c622c390ee6647a279 | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Syscoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the preciousblock RPC."""
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi,
sync_blocks,
)
def unidirectional_node_sync_via_rpc(node_src, node_dest):
blocks_to_copy = []
blockhash = node_src.getbestblockhash()
while True:
try:
assert(len(node_dest.getblock(blockhash, False)) > 0)
break
except:
blocks_to_copy.append(blockhash)
blockhash = node_src.getblockheader(blockhash, True)['previousblockhash']
blocks_to_copy.reverse()
for blockhash in blocks_to_copy:
blockdata = node_src.getblock(blockhash, False)
assert(node_dest.submitblock(blockdata) in (None, 'inconclusive'))
def node_sync_via_rpc(nodes):
for node_src in nodes:
for node_dest in nodes:
if node_src is node_dest:
continue
unidirectional_node_sync_via_rpc(node_src, node_dest)
class PreciousTest(SyscoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
def run_test(self):
self.log.info("Ensure submitblock can in principle reorg to a competing chain")
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getblockcount(), 1)
hashZ = self.nodes[1].generate(2)[-1]
assert_equal(self.nodes[1].getblockcount(), 2)
node_sync_via_rpc(self.nodes[0:3])
assert_equal(self.nodes[0].getbestblockhash(), hashZ)
self.log.info("Mine blocks A-B-C on Node 0")
hashC = self.nodes[0].generate(3)[-1]
assert_equal(self.nodes[0].getblockcount(), 5)
self.log.info("Mine competing blocks E-F-G on Node 1")
hashG = self.nodes[1].generate(3)[-1]
assert_equal(self.nodes[1].getblockcount(), 5)
assert(hashC != hashG)
self.log.info("Connect nodes and check no reorg occurs")
# Submit competing blocks via RPC so any reorg should occur before we proceed (no way to wait on inaction for p2p sync)
node_sync_via_rpc(self.nodes[0:2])
connect_nodes_bi(self.nodes,0,1)
assert_equal(self.nodes[0].getbestblockhash(), hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block G")
self.nodes[0].preciousblock(hashG)
assert_equal(self.nodes[0].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block C again")
self.nodes[0].preciousblock(hashC)
assert_equal(self.nodes[0].getbestblockhash(), hashC)
self.log.info("Make Node1 prefer block C")
self.nodes[1].preciousblock(hashC)
sync_blocks(self.nodes[0:2]) # wait because node 1 may not have downloaded hashC
assert_equal(self.nodes[1].getbestblockhash(), hashC)
self.log.info("Make Node1 prefer block G again")
self.nodes[1].preciousblock(hashG)
assert_equal(self.nodes[1].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block G again")
self.nodes[0].preciousblock(hashG)
assert_equal(self.nodes[0].getbestblockhash(), hashG)
self.log.info("Make Node1 prefer block C again")
self.nodes[1].preciousblock(hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashC)
self.log.info("Mine another block (E-F-G-)H on Node 0 and reorg Node 1")
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getblockcount(), 6)
sync_blocks(self.nodes[0:2])
hashH = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbestblockhash(), hashH)
self.log.info("Node1 should not be able to prefer block C anymore")
self.nodes[1].preciousblock(hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashH)
self.log.info("Mine competing blocks I-J-K-L on Node 2")
self.nodes[2].generate(4)
assert_equal(self.nodes[2].getblockcount(), 6)
hashL = self.nodes[2].getbestblockhash()
self.log.info("Connect nodes and check no reorg occurs")
node_sync_via_rpc(self.nodes[1:3])
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
assert_equal(self.nodes[0].getbestblockhash(), hashH)
assert_equal(self.nodes[1].getbestblockhash(), hashH)
assert_equal(self.nodes[2].getbestblockhash(), hashL)
self.log.info("Make Node1 prefer block L")
self.nodes[1].preciousblock(hashL)
assert_equal(self.nodes[1].getbestblockhash(), hashL)
self.log.info("Make Node2 prefer block H")
self.nodes[2].preciousblock(hashH)
assert_equal(self.nodes[2].getbestblockhash(), hashH)
if __name__ == '__main__':
PreciousTest().main()
|
py | 7dffc472a71c708d6dc42b96fb7b6d24662be9da | # Copyright 2020 The Microsoft DeepSpeed Team
"""
DeepSpeed runner is the main front-end to launching multi-worker
training jobs with DeepSpeed. By default this uses pdsh to parallel
ssh into multiple worker nodes and launch all the neccisary processes
per rank for training.
"""
import os
import sys
import json
import shutil
import base64
import argparse
import subprocess
import collections
from copy import deepcopy
import torch.cuda
from .multinode_runner import PDSHRunner, OpenMPIRunner, MVAPICHRunner
from .constants import PDSH_LAUNCHER, OPENMPI_LAUNCHER, MVAPICH_LAUNCHER
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT
from ..utils import logger
DLTS_HOSTFILE = "/job/hostfile"
EXPORT_ENVS = ["NCCL", "PYTHON", "MV2", 'UCX']
DEEPSPEED_ENVIRONMENT_NAME = ".deepspeed_env"
DEEPSPEED_ENVIRONMENT_PATHS = [os.path.expanduser("~"), '.']
PDSH_MAX_FAN_OUT = 1024
def parse_args(args=None):
parser = argparse.ArgumentParser(
description="DeepSpeed runner to help launch distributed "
"multi-node/multi-gpu training jobs.")
parser.add_argument("-H",
"--hostfile",
type=str,
default=DLTS_HOSTFILE,
help="Hostfile path (in MPI style) that defines the "
"resource pool available to the job (e.g., "
"worker-0 slots=4)")
parser.add_argument("-i",
"--include",
type=str,
default="",
help='''Specify hardware resources to use during execution.
String format is
NODE_SPEC[@NODE_SPEC ...],
where
NODE_SPEC=NAME[:SLOT[,SLOT ...]].
If :SLOT is omitted, include all slots on that host.
Example: -i "worker-0@worker-1:0,2" will use all slots
on worker-0 and slots [0, 2] on worker-1.
''')
parser.add_argument("-e",
"--exclude",
type=str,
default="",
help='''Specify hardware resources to NOT use during execution.
Mutually exclusive with --include. Resource formatting
is the same as --include.
Example: -e "worker-1:0" will use all available
resources except slot 0 on worker-1.
''')
parser.add_argument("--num_nodes",
type=int,
default=-1,
help="Total number of worker nodes to run on, this will use "
"the top N hosts from the given hostfile.")
parser.add_argument("--num_gpus",
type=int,
default=-1,
help="Max number of GPUs to use on each node, will use "
"[0:N) GPU ids on each node.")
parser.add_argument("--master_port",
default=TORCH_DISTRIBUTED_DEFAULT_PORT,
type=int,
help="(optional) Port used by PyTorch distributed for "
"communication during training.")
parser.add_argument("--master_addr",
default="",
type=str,
help="(optional) IP address of node 0, will be "
"inferred via 'hostname -I' if not specified.")
parser.add_argument("--launcher",
default=PDSH_LAUNCHER,
type=str,
help="(optional) choose launcher backend for multi-node "
"training. Options currently include PDSH, OpenMPI, MVAPICH.")
parser.add_argument("--launcher_args",
default="",
type=str,
help="(optional) pass launcher specific arguments as a "
"single quoted argument.")
parser.add_argument("--force_multi",
action="store_true",
help="Force multi-node launcher mode, helps in cases where user "
"wants to launch on single remote node.")
parser.add_argument("--detect_nvlink_pairs", action="store_true",
help="(optional) autodetects nvlink pairs and remaps CUDA_VISIBLE_DEVICES along the "
"fastest connections")
parser.add_argument("user_script",
type=str,
help="User script to launch, followed by any required "
"arguments.")
parser.add_argument('user_args', nargs=argparse.REMAINDER)
return parser.parse_args(args=args)
def fetch_hostfile(hostfile_path):
if not os.path.isfile(hostfile_path):
logger.warning("Unable to find hostfile, will proceed with training "
"with local resources only.")
return None
# e.g., worker-0 slots=16
with open(hostfile_path, 'r') as fd:
resource_pool = collections.OrderedDict()
for line in fd.readlines():
line = line.strip()
if line == '':
# skip empty lines
continue
try:
hostname, slots = line.split()
_, slot_count = slots.split("=")
slot_count = int(slot_count)
except ValueError as err:
logger.error("Hostfile is not formatted correctly, unable to "
"proceed with training.")
raise err
if hostname in resource_pool:
logger.error("Hostfile contains duplicate hosts, unable to "
"proceed with training.")
raise ValueError("host {} is already defined".format(hostname))
resource_pool[hostname] = slot_count
return resource_pool
def parse_resource_filter(host_info, include_str="", exclude_str=""):
'''Parse an inclusion or exclusion string and filter a hostfile dictionary.
String format is NODE_SPEC[@NODE_SPEC ...], where
NODE_SPEC = NAME[:SLOT[,SLOT ...]].
If :SLOT is omitted, include/exclude all slots on that host.
Examples:
include_str="worker-0@worker-1:0,2" will use all slots on worker-0 and
slots [0, 2] on worker-1.
exclude_str="worker-1:0" will use all available resources except
slot 0 on worker-1.
'''
# Constants that define our syntax
NODE_SEP = '@'
SLOT_LIST_START = ':'
SLOT_SEP = ','
# Ensure include/exclude are mutually exclusive
if (include_str != "") and (exclude_str != ""):
raise ValueError('include_str and exclude_str are mutually exclusive.')
# no-op
if (include_str == "") and (exclude_str == ""):
return host_info
# Either build from scratch or remove items
filtered_hosts = dict()
if include_str:
parse_str = include_str
if exclude_str != "":
filtered_hosts = deepcopy(host_info)
parse_str = exclude_str
# foreach node in the list
for node_config in parse_str.split(NODE_SEP):
# Node can either be alone or node:slot,slot,slot
if SLOT_LIST_START in node_config:
hostname, slots = node_config.split(SLOT_LIST_START)
slots = [int(x) for x in slots.split(SLOT_SEP)]
# sanity checks
if hostname not in host_info:
raise ValueError("Hostname '{}' not found in hostfile".format(hostname))
for s in slots:
if s not in host_info[hostname]:
raise ValueError("No slot '{}' specified on host '{}'".format(
s,
hostname))
# If include string, build the list from here
if include_str:
filtered_hosts[hostname] = slots
elif exclude_str:
for s in slots:
logger.info('removing {} from {}'.format(s, hostname))
filtered_hosts[hostname].remove(s)
# User just specified the whole node
else:
hostname = node_config
# sanity check hostname
if hostname not in host_info:
raise ValueError("Hostname '{}' not found in hostfile".format(hostname))
if include_str:
filtered_hosts[hostname] = host_info[hostname]
elif exclude_str:
filtered_hosts[hostname] = []
# Post-processing to remove duplicates and empty nodes
del_keys = []
for hostname in filtered_hosts:
# Remove duplicates
filtered_hosts[hostname] = list(set(filtered_hosts[hostname]))
# Remove empty hosts
if len(filtered_hosts[hostname]) == 0:
del_keys.append(hostname)
for name in del_keys:
del filtered_hosts[name]
# Lastly, go over filtered_hosts and convert to a OrderedDict() to ensure
# we map ranks to nodes correctly by maintaining host_info ordering.
ordered_hosts = collections.OrderedDict()
for host in host_info:
if host in filtered_hosts:
ordered_hosts[host] = filtered_hosts[host]
return ordered_hosts
def parse_inclusion_exclusion(resource_pool, inclusion, exclusion):
active_resources = collections.OrderedDict()
for hostname, slots in resource_pool.items():
active_resources[hostname] = list(range(slots))
return parse_resource_filter(active_resources,
include_str=inclusion,
exclude_str=exclusion)
def encode_world_info(world_info):
world_info_json = json.dumps(world_info).encode('utf-8')
world_info_base64 = base64.urlsafe_b64encode(world_info_json).decode('utf-8')
return world_info_base64
def main(args=None):
args = parse_args(args)
if args.num_nodes >= 0 or args.num_gpus >= 0:
if args.include != "" or args.exclude != "":
raise ValueError("Cannot specify num_nodes/gpus with include/exclude")
multi_node_exec = True
resource_pool = fetch_hostfile(args.hostfile)
if not resource_pool:
resource_pool = {}
device_count = torch.cuda.device_count()
if device_count == 0:
raise RuntimeError("Unable to proceed, no GPU resources available")
resource_pool['localhost'] = device_count
args.master_addr = "127.0.0.1"
multi_node_exec = False
if not multi_node_exec and args.num_nodes > 1:
raise ValueError("Num nodes is >1 but no extra nodes available via hostfile")
active_resources = parse_inclusion_exclusion(resource_pool,
args.include,
args.exclude)
env = os.environ.copy()
if not args.master_addr:
first_host = list(active_resources.keys())[0]
hostname_cmd = ["ssh {} hostname -I".format(first_host)]
result = subprocess.check_output(hostname_cmd, shell=True)
args.master_addr = result.decode('utf-8').split()[0]
logger.info("Using IP address of {} for node {}".format(
args.master_addr,
first_host))
if args.num_nodes > 0:
updated_active_resources = collections.OrderedDict()
for count, hostname in enumerate(active_resources.keys()):
if args.num_nodes == count:
break
updated_active_resources[hostname] = active_resources[hostname]
active_resources = updated_active_resources
if args.num_gpus > 0:
updated_active_resources = collections.OrderedDict()
for hostname in active_resources.keys():
updated_active_resources[hostname] = list(range(args.num_gpus))
active_resources = updated_active_resources
# encode world info as base64 to make it easier to pass via command line
world_info_base64 = encode_world_info(active_resources)
multi_node_exec = args.force_multi or len(active_resources) > 1
if not multi_node_exec:
deepspeed_launch = [
sys.executable,
"-u",
"-m",
"deepspeed.launcher.launch",
"--world_info={}".format(world_info_base64),
"--master_addr={}".format(args.master_addr),
"--master_port={}".format(args.master_port)
]
if args.detect_nvlink_pairs:
deepspeed_launch += ["--detect_nvlink_pairs"]
cmd = deepspeed_launch + [args.user_script] + args.user_args
else:
args.launcher = args.launcher.lower()
if args.launcher == PDSH_LAUNCHER:
runner = PDSHRunner(args, world_info_base64)
elif args.launcher == OPENMPI_LAUNCHER:
runner = OpenMPIRunner(args, world_info_base64, resource_pool)
elif args.launcher == MVAPICH_LAUNCHER:
runner = MVAPICHRunner(args, world_info_base64, resource_pool)
else:
raise NotImplementedError(f"Unknown launcher {args.launcher}")
if not runner.backend_exists():
raise RuntimeError(f"launcher '{args.launcher}' not installed.")
curr_path = os.path.abspath('.')
if 'PYTHONPATH' in env:
env['PYTHONPATH'] = curr_path + ":" + env['PYTHONPATH']
else:
env['PYTHONPATH'] = curr_path
exports = ""
for var in env.keys():
if any([var.startswith(name) for name in EXPORT_ENVS]):
runner.add_export(var, env[var])
for environ_path in DEEPSPEED_ENVIRONMENT_PATHS:
environ_file = os.path.join(environ_path, DEEPSPEED_ENVIRONMENT_NAME)
if os.path.isfile(environ_file):
with open(environ_file, 'r') as fd:
for var in fd.readlines():
key, val = var.split('=')
runner.add_export(key, val)
cmd = runner.get_cmd(env, active_resources)
logger.info("cmd = {}".format(' '.join(cmd)))
result = subprocess.Popen(cmd, env=env)
result.wait()
# In case of failure must propagate the error-condition back to the caller (usually shell). The
# actual error and traceback should have been printed in the subprocess, so in order to avoid
# unnecessary noise we just quietly exit here with the same code as the subprocess
if result.returncode > 0:
sys.exit(result.returncode)
if __name__ == "__main__":
main()
|
py | 7dffc47388637289af85bbd0117d5f318b060812 | import json
from ..tools.tools import clean
def get_syns(soup, multi_word=False):
if soup is None:
return list()
# Extract Synonym Components from HTML
syn_lists = [syn_div.find_all("div", {"class": "relevancy-list"})[0] for syn_div in
soup.find_all("div", {"class": "synonyms"})]
synonyms = list()
for syn_list in syn_lists:
syn_items = syn_list.find_all("li")
for item in syn_items:
# Extract Text and Relevance from component
text = clean(item.find_all("span", {"class": "text"})[0].text)
if text is not None:
rel = int(json.loads(item.find_all("a")[0].get("data-category"))["name"].split("-")[1]) / 3
# Check if synonym has more than one word
if (multi_word or len(text.split(" ")) == 1) and rel is not None:
synonyms.append((text, rel))
if rel is None:
print(text, "has no relevance")
else:
break
# Add words in title bar
title_words = _get_thesaurus_titles(soup)
if len(title_words) > 0:
synonyms.extend([(title_word, 1) for title_word in title_words])
else:
pass
# print("No Synonym Titles")
# Sort by relevance
synonyms.sort(key=lambda x: x[1], reverse=True)
return synonyms
def _get_thesaurus_titles(soup):
words_list = set()
list1 = soup.find("div", {"class": "mask"}).find_all("a", {"class": "pos-tab"})
# print(len(list1))
for item1 in list1:
list2 = item1.find_all("strong", {"class": "ttl"})
for item2 in list2:
titles = item2.contents[0].split(",")
for title_item in titles:
if len(title_item.strip().split(" ")) == 1:
title = clean(title_item)
if title is not None:
words_list.add(title)
return words_list
|
py | 7dffc49c5cafc62c76f174c7661095652a2110a6 | # Generated by Django 2.2.14 on 2021-08-09 08:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0003_add_one_time_auth_token_model'),
]
operations = [
migrations.AddField(
model_name='onetimeauthtoken',
name='request_identifier',
field=models.CharField(max_length=1000, null=True),
),
]
|
py | 7dffc4d9a5d0b33eaed8eb763e2b47cb93c391f8 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-05 02:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Curso',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sigla', models.CharField(max_length=5, unique=True)),
('nome', models.CharField(max_length=50, unique=True)),
('objetivo', models.TextField(blank=True, verbose_name='Obejtivo')),
('perfil_profissional', models.TextField(blank=True, verbose_name='Perfil Profissional')),
('mercado_trabalho', models.TextField(blank=True, verbose_name='Mercado de Trabalho')),
],
options={
'db_table': 'curso',
'managed': False,
},
),
]
|
py | 7dffc4f35f72a7e11727dad65e5a6e1bbb638a2a | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
class Source(models.Model):
source_name = models.CharField(max_length=200)
source_address = models.CharField(max_length=600)
is_public = models.BooleanField()
modified = models.DateField(default = None, null=True)
def __str__(self):
return self.source_name
class Category(models.Model):
category_name = models.CharField(max_length=200)
def __str__(self):
return self.category_name
class Event(models.Model):
event_name = models.CharField(max_length=200)
category = models.ForeignKey(Category, on_delete=models.CASCADE)
def __str__(self):
return self.event_name
class Article(models.Model):
source = models.ForeignKey(Source, on_delete=models.CASCADE)
article_name = models.CharField(max_length=600)
article_address = models.CharField(max_length=600)
pub_date = models.DateTimeField('date published')
is_public = models.BooleanField()
article_event = models.ForeignKey(Event, on_delete=models.CASCADE)
article_category = models.ForeignKey(Category, on_delete=models.CASCADE)
def __str__(self):
return self.article_name
class User_source(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
source = models.ForeignKey(Source, on_delete=models.CASCADE)
class UserProfile(models.Model):
user = models.OneToOneField(User)
#TODO:Ustawienia uzytkownika
def __str__(self):
return "%s's profile" % self.user
def create_user_profile(sender, instance, created, **kwargs):
if created:
profile, created = UserProfile.objects.get_or_create(user=instance)
post_save.connect(create_user_profile, sender=User)
|
py | 7dffc53e6e7ca5b708526c53ce21eec647e75774 | ## 2. The dataset ##
import pandas as pd
votes = pd.read_csv('114_congress.csv')
print(votes.head())
## 3. Exploring the data ##
print(votes['party'].value_counts())
print(votes.mean())
## 4. Distance between Senators ##
from sklearn.metrics.pairwise import euclidean_distances
print(euclidean_distances(votes.iloc[0,3:].values.reshape(1, -1), votes.iloc[1,3:].values.reshape(1, -1)))
distance = euclidean_distances(votes.iloc[0,3:].values.reshape(1,-1), votes.iloc[2,3:].values.reshape(1,-1))
print(distance)
## 6. Initial clustering ##
import pandas as pd
from sklearn.cluster import KMeans
kmeans_model = KMeans(n_clusters=2, random_state=1)
senator_distances = kmeans_model.fit_transform(votes.iloc[:,3:])
## 7. Exploring the clusters ##
labels = kmeans_model.labels_
print(pd.crosstab(labels, votes['party']))
## 8. Exploring Senators in the wrong cluster ##
democratic_outliers = votes[(labels == 1) & (votes["party"] == "D")]
print(democratic_outliers)
## 9. Plotting out the clusters ##
plt.scatter(senator_distances[:,0], senator_distances[:,-1],c= labels, linewidths=0)
plt.show()
## 10. Finding the most extreme ##
extremism = (senator_distances ** 3).sum(axis=1)
votes["extremism"] = extremism
votes.sort_values("extremism", inplace=True, ascending=False)
print(votes.head(10)) |
py | 7dffc621a2d8f85ca445f1e8ad8e4dc79b9b8132 | import logging, sys, json_logging, sanic
app = sanic.Sanic(name="sanic-web-app")
json_logging.init_sanic(enable_json=True)
json_logging.init_request_instrument(app)
# init the logger as usual
logger = logging.getLogger("sanic-integration-test-app")
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
@app.route("/")
async def home(request):
logger.info("test log statement")
logger.info("test log statement with extra props", extra={'props': {"extra_property": 'extra_value'}})
# this will be faster
correlation_id = json_logging.get_correlation_id(request=request)
# this will be slower, but will work in context you cant get a reference of request object
correlation_id_without_request_obj = json_logging.get_correlation_id()
return sanic.response.text("hello world")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000)
|
py | 7dffc67c6a6e2104258545e0c9eef396a6b29489 | import numpy as np
from bokeh.models import ColumnDataSource, Legend
from bokeh.models.mappers import LinearColorMapper
from bokeh.palettes import Category10
from bokeh.plotting import figure
class Figure:
def __init__(self, monitoring_client, buffer_size=1, **kwargs):
"""
Base class to implement figures which are connected with a monitoring
client and automatically updated whenever the monitoring client
receives a new data block. Child classes only need to implement
create_figure and update_sources methods. kwargs of the __init__ are
forwarded to create_figure.
"""
self.monitoring_client = monitoring_client
self.buffer_size = buffer_size
data_shape = [*monitoring_client.block_shape]
data_shape[0] *= buffer_size
self.data = np.zeros(data_shape, dtype=monitoring_client.dtype)
self.figure = self.create_figure(**kwargs)
monitoring_client.figures.append(self)
def update_data(self, data):
"""
updates data buffer. Called by monitoring client whenever new data is
received.
"""
block_len, *_ = data.shape
self.data = np.roll(self.data, -block_len, axis=0)
self.data[-block_len:] = data
def create_figure(self, **kwargs):
raise NotImplementedError
def update_figure(self):
raise NotImplementedError
class VerticalBars(Figure):
def create_figure(
self,
title='',
xlabel='',
ylabel='',
plot_width=750,
plot_height=500,
font_size=30,
y_range=(0., 1.),
labels=None,
):
"""
creating a bokeh vertical bars plot.
Args:
title:
xlabel: label for x axis
ylabel: label for y axis
plot_width:
plot_height:
font_size:
y_range: tuple (ymin, ymax) of minimal y value (ymin)
and maximal y value (ymax)
labels: Optional list of labels for the vertical bars.
"""
assert self.buffer_size == 1, self.buffer_size
assert self.data.ndim == 2, self.data.shape
num_classes = self.data.shape[-1]
if labels is None:
labels = [str(i) for i in range(num_classes)]
assert len(labels) == num_classes, (num_classes, len(labels), labels)
self.source = ColumnDataSource(
data=dict(
labels=labels,
y=np.zeros(num_classes).tolist(),
color=(Category10[10])[:num_classes]
)
)
fig = figure(
x_range=labels,
plot_height=plot_height,
plot_width=plot_width,
toolbar_location=None,
title=title
)
fig.vbar(
x='labels',
top='y',
width=.75,
source=self.source,
line_color='white',
fill_color='color'
)
fig.y_range.start, fig.y_range.end = y_range
fig.xaxis.axis_label = xlabel
fig.yaxis.axis_label = ylabel
fig.title.text_font_size = f'{font_size}pt'
fig.axis.major_label_text_font_size = f'{font_size}pt'
fig.xaxis.axis_label_text_font_size = f'{font_size}pt'
fig.yaxis.axis_label_text_font_size = f'{font_size}pt'
return fig
def update_figure(self):
"""
updating the vertical bars' heights
"""
self.source.data['y'] = self.data[-1].tolist()
class MultiLine(Figure):
def create_figure(
self,
title='',
xlabel='',
ylabel='',
x_ticks=None,
plot_width=750,
plot_height=500,
font_size=30,
x_range=None,
y_range=(0, 1),
labels=None,
line_width=5,
):
"""
Creating a bokeh multi line plot.
Args:
title: title of the plot
xlabel: label for x axis
ylabel: label for y axis
x_ticks: ticks of the x-axis. If None, [0, 1, 2, ...] will be used.
plot_width: width (in pixels) of the single plot
plot_height: height (in pixels) of the single plot
font_size: font size used in the plot
y_range: tuple (ymin, ymax) of minimal y value (ymin)
and maximal y value (ymax)
labels: optional list of labels for the lines
line_width: width of the lines
"""
assert self.data.ndim == 2, self.data.shape
num_time_steps, num_lines = self.data.shape
if labels is None and num_lines > 1:
labels = [str(i) for i in range(num_lines)]
assert len(labels) == num_lines, (num_lines, len(labels), labels)
if x_ticks is None:
x_ticks = np.arange(num_time_steps).astype(np.float32)
self.sources = [
ColumnDataSource(
data=dict(
x=x_ticks,
y=np.zeros(num_time_steps).astype(np.float32)
)
) for _ in range(num_lines)
]
fig = figure(
plot_height=plot_height,
plot_width=plot_width,
title=title,
toolbar_location=None,
x_range=x_range,
y_range=y_range,
)
items = []
for line_idx in range(num_lines):
p = fig.line(
'x', 'y',
source=self.sources[line_idx],
line_color=(Category10[10])[line_idx],
line_width=line_width
)
if labels is not None:
items.append((labels[line_idx], [p]))
if labels is not None:
legend = Legend(
items=items,
location='center',
glyph_height=50,
glyph_width=30,
)
fig.add_layout(legend, 'right')
fig.xaxis.axis_label = xlabel
fig.yaxis.axis_label = ylabel
fig.title.text_font_size = f'{font_size}pt'
fig.axis.major_label_text_font_size = f'{font_size}pt'
if labels is not None:
fig.legend.label_text_font_size = f'{font_size}pt'
fig.xaxis.axis_label_text_font_size = f'{font_size}pt'
fig.yaxis.axis_label_text_font_size = f'{font_size}pt'
if x_range is None:
fig.x_range.range_padding = 0
#fig.y_range.start, fig.y_range.end = y_range
return fig
def update_figure(self):
y = [yi for yi in self.data.T]
for src, yi in zip(self.sources, y):
src.data['y'] = yi
class Image(Figure):
def create_figure(
self,
title='',
xlabel='',
ylabel='',
plot_width=750,
plot_height=500,
font_size=30,
low=-3, high=3,
):
"""
creating a bokeh image where data values serve as pixel intensity.
Args:
title:
xlabel: label for x axis
ylabel: label for y axis
plot_width:
plot_height:
font_size:
low: lowest intensity. values below are clamped.
high: highest intensity. values above are clamped.
"""
assert self.data.ndim == 2, self.data.shape
num_frames, num_bins = self.data.shape
x = np.arange(num_frames)
x = np.repeat(x, num_bins, axis=0)
y = np.repeat(np.arange(num_bins)[None], num_frames, axis=0).flatten()
v = np.zeros_like(y).tolist()
self.source = ColumnDataSource(
data=dict(x=x, y=y, v=v)
)
fig = figure(
plot_height=plot_height,
plot_width=plot_width,
toolbar_location=None,
title=title
)
mapper = LinearColorMapper(palette='Viridis256', low=low, high=high)
fig.rect(
'x',
'y',
color={'field': 'v', 'transform': mapper},
width=1,
height=1,
source=self.source
)
fig.x_range.range_padding = 0
fig.y_range.range_padding = 0
fig.xaxis.axis_label = xlabel
fig.yaxis.axis_label = ylabel
fig.title.text_font_size = f'{font_size}pt'
fig.axis.major_label_text_font_size = f'{font_size}pt'
fig.xaxis.axis_label_text_font_size = f'{font_size}pt'
fig.yaxis.axis_label_text_font_size = f'{font_size}pt'
return fig
def update_figure(self):
self.source.data['v'] = self.data.flatten().tolist()
|
py | 7dffc6cf23b7630e5d82ae7220d28135df69bb31 |
from gggg import *
a = Assignment(21, 10)
s = State(a)
horizontal_rule()
s.reject_if_missing_contributors()
s.reject_if_starter_contributors()
s.reject_unless_files_exist(['Book.hpp',
'Book.cpp',
'BookDatabase.hpp',
'BookDatabase.cpp',
'Checkout.hpp',
'Checkout.cpp'])
s.reject_if_file_unchanged('Book.cpp',
'953ed73434f4cae54b9161b48da2f25a2622522198a655c00de571bb596b16df')
s.reject_unless_command_succeeds(['make', 'clean', 'test'])
s.string_removed_test('TO-DO comments removed', 3, 'TO-DO', ['Book.cpp', 'BookDatabase.hpp', 'BookDatabase.cpp', 'Checkout.cpp'])
s.gtest_run('BookDatabase_test')
s.gtest_suite_test('BookDatabaseSize', 2)
s.gtest_suite_test('BookDatabaseFind', 4)
s.gtest_run('Checkout_test')
s.gtest_suite_test('CheckoutTestShopForBooks', 3)
s.gtest_suite_test('CheckoutTestSwitchCarts', 3)
s.gtest_suite_test('CheckoutTestMoveBooksToCheckout', 3)
s.gtest_suite_test('CheckoutTestDoCheckout', 3)
s.summarize()
|
py | 7dffc7311de242e804f6337c91b327220c0af9a3 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from fake_useragent import UserAgent
from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware
UA = UserAgent(verify_ssl=False)
class RandomUserAgentMiddleware(UserAgentMiddleware):
def __init__(self, user_agent='Scrapy'):
super().__init__()
self.user_agent = user_agent
def process_request(self, request, spider):
request.headers.setdefault('Referrer', 'https://stackoverflow.com')
request.headers.setdefault('User-Agent', UA.random)
|
py | 7dffc75e682c530b8c0dc9d79507666e58aff2c6 | from py_compile import compile
from os import path
if __name__ == '__main__':
destination = path.realpath(path.curdir)+'/WallpaperChanger.pyc'
print('Destination: %s' % destination)
compile('main.py', destination)
|
py | 7dffc89e189351ca6c71011dbe51b4921b8e0cf4 | import pytest
from populus.config import (
Config,
)
from populus.config.helpers import (
resolve_config,
)
MASTER_CONFIG = Config({
'a': {
'a': 'a.a',
},
'b': {
'a': 'b.a',
'b': 'b.b',
},
'c': {
'a': {
'a': 'c.a.a',
'b': 'c.a.b',
},
'b': {
'a': 'c.b.a',
'b': 'c.b.b',
},
},
})
@pytest.mark.parametrize(
'config,expected',
(
({'b': 'b'}, {'b': 'b'}),
({'$ref': 'a'}, {'a': 'a.a'}),
({'$ref': 'b'}, {'a': 'b.a', 'b': 'b.b'}),
({'$ref': 'c.a'}, {'a': 'c.a.a', 'b': 'c.a.b'}),
({'$ref': 'c.b'}, {'a': 'c.b.a', 'b': 'c.b.b'}),
)
)
def test_resolving_config(config, expected):
actual = resolve_config(config, MASTER_CONFIG)
assert actual == expected
|
py | 7dffcb0e1b3b5553c41a9cba96fe2ccfb6d21275 | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from selenium import webdriver
from ..mobilecommand import MobileCommand as Command
class Location(webdriver.Remote):
def toggle_location_services(self):
"""Toggle the location services on the device.
Android only.
Returns:
`appium.webdriver.webdriver.WebDriver`
"""
self.execute(Command.TOGGLE_LOCATION_SERVICES, {})
return self
def set_location(self, latitude, longitude, altitude=None):
"""Set the location of the device
Args:
latitude (Union[float, str]): String or numeric value between -90.0 and 90.00
longitude (Union[float, str]): String or numeric value between -180.0 and 180.0
altitude (Union[float, str], optional): String or numeric value (Android real device only)
Returns:
`appium.webdriver.webdriver.WebDriver`
"""
data = {
"location": {
"latitude": latitude,
"longitude": longitude,
}
}
if altitude is not None:
data['location']['altitude'] = altitude
self.execute(Command.SET_LOCATION, data)
return self
@property
def location(self):
"""Retrieves the current location
Returns:
A dictionary whose keys are
- latitude (float)
- longitude (float)
- altitude (float)
"""
return self.execute(Command.GET_LOCATION)['value']
# pylint: disable=protected-access
def _addCommands(self):
self.command_executor._commands[Command.TOGGLE_LOCATION_SERVICES] = \
('POST', '/session/$sessionId/appium/device/toggle_location_services')
self.command_executor._commands[Command.GET_LOCATION] = \
('GET', '/session/$sessionId/location')
self.command_executor._commands[Command.SET_LOCATION] = \
('POST', '/session/$sessionId/location')
|
py | 7dffcb5d803d21217ba47cedc2443e38f3bc2b8c | from datetime import datetime, time, timedelta
from flask import jsonify, request
from flask_login import current_user, login_required
from http import HTTPStatus
import pytz
import re
from sqlalchemy.exc import IntegrityError
from .. import app, db, login_manager
from ..exc import ApiError
from ..models import User, Home, Sensor, Reading
@login_manager.request_loader
def load_user_from_request(request):
api_key = request.headers.get('Authorization')
if api_key:
user = User.query.filter_by(api_key=api_key).first()
if user:
app.logger.info('User {!r} authenticated with API'.format(user.email))
return user
if wants_json():
raise ApiError(
'Please include a valid API key in the Authorization header.',
status_code=HTTPStatus.UNAUTHORIZED,
)
return None
@app.route('/api/v1/<uuid:sensor_uuid>/readings/<date:date>')
@login_required
def api_v1_list_readings(sensor_uuid, date):
sensor = db.session.query(Sensor).filter_by(uuid=sensor_uuid) \
.join(Home).filter_by(user_id=current_user.id).first_or_404()
timezone = pytz.timezone(sensor.home.timezone)
midnight = timezone.localize(datetime.combine(date, time())) \
.astimezone(pytz.utc).replace(tzinfo=None)
readings = sensor.readings.filter(Reading.timestamp.between(
midnight,
midnight + timedelta(days=1)
)).order_by(Reading.timestamp)
return jsonify([r.as_dict() for r in readings])
@app.route('/api/v1/<uuid:sensor_uuid>/readings', methods=['POST'])
@login_required
def api_v1_create_reading(sensor_uuid):
sensor = db.session.query(Sensor).filter_by(uuid=sensor_uuid) \
.join(Home).filter_by(user_id=current_user.id).first_or_404()
data = request.get_json()
try:
reading = Reading(sensor=sensor, **data)
db.session.add(reading)
db.session.commit()
app.logger.info('Created reading with {}'.format(data))
return jsonify({'status': 'Created'}), HTTPStatus.CREATED
except (IntegrityError, ValueError) as e:
if re.search(r'Key \(.*\) already exists', e.args[0]):
message = 'A conflicting record already exists.'
status_code = HTTPStatus.CONFLICT
else:
message = e.args[0]
status_code = HTTPStatus.UNPROCESSABLE_ENTITY
app.logger.warning('Failed to create reading: {}'.format(e.args[0]))
raise ApiError(message, status_code=status_code) from e
def wants_json():
types = request.accept_mimetypes
best = types.best_match(['application/json', 'text/html'])
return best == 'application/json' or types[best] > types['text/html']
|
py | 7dffcb73c913669419c13df21399e2fab57b6997 | # 3p
import pymemcache
# project
from ddtrace import Pin
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.contrib.pymemcache.patch import patch, unpatch
from ddtrace.ext import memcached as memcachedx, net
from .utils import MockSocket
from tests.tracer.test_tracer import get_dummy_tracer
from ... import TracerTestCase
_Client = pymemcache.client.base.Client
TEST_HOST = 'localhost'
TEST_PORT = 117711
class PymemcacheClientTestCaseMixin(TracerTestCase):
""" Tests for a patched pymemcache.client.base.Client. """
def get_spans(self):
pin = Pin.get_from(self.client)
tracer = pin.tracer
spans = tracer.writer.pop()
return spans
def check_spans(self, num_expected, resources_expected, queries_expected):
"""A helper for validating basic span information."""
spans = self.get_spans()
self.assertEqual(num_expected, len(spans))
for span, resource, query in zip(spans, resources_expected, queries_expected):
self.assert_is_measured(span)
self.assertEqual(span.get_tag(net.TARGET_HOST), TEST_HOST)
self.assertEqual(span.get_metric(net.TARGET_PORT), TEST_PORT)
self.assertEqual(span.name, memcachedx.CMD)
self.assertEqual(span.span_type, 'cache')
self.assertEqual(span.service, memcachedx.SERVICE)
self.assertEqual(span.get_tag(memcachedx.QUERY), query)
self.assertEqual(span.resource, resource)
return spans
def setUp(self):
patch()
def tearDown(self):
unpatch()
def make_client(self, mock_socket_values, **kwargs):
tracer = get_dummy_tracer()
Pin.override(pymemcache, tracer=tracer)
self.client = pymemcache.client.base.Client((TEST_HOST, TEST_PORT), **kwargs)
self.client.sock = MockSocket(list(mock_socket_values))
return self.client
def test_set_success(self):
client = self.make_client([b'STORED\r\n'])
result = client.set(b'key', b'value', noreply=False)
assert result is True
self.check_spans(1, ['set'], ['set key'])
def test_get_many_none_found(self):
client = self.make_client([b'END\r\n'])
result = client.get_many([b'key1', b'key2'])
assert result == {}
self.check_spans(1, ['get_many'], ['get_many key1 key2'])
def test_get_multi_none_found(self):
client = self.make_client([b'END\r\n'])
result = client.get_multi([b'key1', b'key2'])
assert result == {}
self.check_spans(1, ['get_many'], ['get_many key1 key2'])
def test_delete_not_found(self):
client = self.make_client([b'NOT_FOUND\r\n'])
result = client.delete(b'key', noreply=False)
assert result is False
self.check_spans(1, ['delete'], ['delete key'])
def test_incr_found(self):
client = self.make_client([b'STORED\r\n', b'1\r\n'])
client.set(b'key', 0, noreply=False)
result = client.incr(b'key', 1, noreply=False)
assert result == 1
self.check_spans(2, ['set', 'incr'], ['set key', 'incr key'])
def test_get_found(self):
client = self.make_client([b'STORED\r\n', b'VALUE key 0 5\r\nvalue\r\nEND\r\n'])
result = client.set(b'key', b'value', noreply=False)
result = client.get(b'key')
assert result == b'value'
self.check_spans(2, ['set', 'get'], ['set key', 'get key'])
def test_decr_found(self):
client = self.make_client([b'STORED\r\n', b'1\r\n'])
client.set(b'key', 2, noreply=False)
result = client.decr(b'key', 1, noreply=False)
assert result == 1
self.check_spans(2, ['set', 'decr'], ['set key', 'decr key'])
def test_add_stored(self):
client = self.make_client([b'STORED\r', b'\n'])
result = client.add(b'key', b'value', noreply=False)
assert result is True
self.check_spans(1, ['add'], ['add key'])
def test_delete_many_found(self):
client = self.make_client([b'STORED\r', b'\n', b'DELETED\r\n'])
result = client.add(b'key', b'value', noreply=False)
result = client.delete_many([b'key'], noreply=False)
assert result is True
self.check_spans(2, ['add', 'delete_many'], ['add key', 'delete_many key'])
def test_set_many_success(self):
client = self.make_client([b'STORED\r\n'])
result = client.set_many({b'key': b'value'}, noreply=False)
assert result is True
self.check_spans(1, ['set_many'], ['set_many key'])
def test_set_multi_success(self):
# Should just map to set_many
client = self.make_client([b'STORED\r\n'])
result = client.set_multi({b'key': b'value'}, noreply=False)
assert result is True
self.check_spans(1, ['set_many'], ['set_many key'])
def test_analytics_default(self):
client = self.make_client([b'STORED\r\n'])
result = client.set(b'key', b'value', noreply=False)
assert result is True
spans = self.get_spans()
self.assertEqual(len(spans), 1)
self.assertIsNone(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY))
def test_analytics_with_rate(self):
with self.override_config(
'pymemcache',
dict(analytics_enabled=True, analytics_sample_rate=0.5)
):
client = self.make_client([b'STORED\r\n'])
result = client.set(b'key', b'value', noreply=False)
assert result is True
spans = self.get_spans()
self.assertEqual(len(spans), 1)
self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5)
def test_analytics_without_rate(self):
with self.override_config(
'pymemcache',
dict(analytics_enabled=True)
):
client = self.make_client([b'STORED\r\n'])
result = client.set(b'key', b'value', noreply=False)
assert result is True
spans = self.get_spans()
self.assertEqual(len(spans), 1)
self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0)
|
py | 7dffcb8303403cca60309ca829e30c22baf7ff0e | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
__author__ = 'Marko A. Rodriguez (http://markorodriguez.com), Lyndon Bauto ([email protected])'
import os
import pytest
from pytest import fail
from gremlin_python.driver import serializer
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
from gremlin_python.structure.graph import Graph
from gremlin_python.process.anonymous_traversal import traversal
from gremlin_python.process.traversal import P
from gremlin_python.process.traversal import Binding, Bindings
from gremlin_python.process.graph_traversal import __
gremlin_server_url = 'ws://localhost:{}/gremlin'
anonymous_url = gremlin_server_url.format(45940)
def transactions_disabled():
return (os.environ['TEST_TRANSACTIONS'] != 'true') if 'TEST_TRANSACTIONS' in os.environ else False
class TestTraversal(object):
def test_bytecode(self):
g = traversal().withGraph(Graph())
bytecode = g.V().out("created").bytecode
assert 0 == len(bytecode.bindings.keys())
assert 0 == len(bytecode.source_instructions)
assert 2 == len(bytecode.step_instructions)
assert "V" == bytecode.step_instructions[0][0]
assert "out" == bytecode.step_instructions[1][0]
assert "created" == bytecode.step_instructions[1][1]
assert 1 == len(bytecode.step_instructions[0])
assert 2 == len(bytecode.step_instructions[1])
##
bytecode = g.withSack(1).E().groupCount().by("weight").bytecode
assert 0 == len(bytecode.bindings.keys())
assert 1 == len(bytecode.source_instructions)
assert "withSack" == bytecode.source_instructions[0][0]
assert 1 == bytecode.source_instructions[0][1]
assert 3 == len(bytecode.step_instructions)
assert "E" == bytecode.step_instructions[0][0]
assert "groupCount" == bytecode.step_instructions[1][0]
assert "by" == bytecode.step_instructions[2][0]
assert "weight" == bytecode.step_instructions[2][1]
assert 1 == len(bytecode.step_instructions[0])
assert 1 == len(bytecode.step_instructions[1])
assert 2 == len(bytecode.step_instructions[2])
##
bytecode = g.V(Bindings.of('a', [1, 2, 3])) \
.out(Bindings.of('b', 'created')) \
.where(__.in_(Bindings.of('c', 'created'), Bindings.of('d', 'knows')) \
.count().is_(Bindings.of('e', P.gt(2)))).bytecode
assert 5 == len(bytecode.bindings.keys())
assert [1, 2, 3] == bytecode.bindings['a']
assert 'created' == bytecode.bindings['b']
assert 'created' == bytecode.bindings['c']
assert 'knows' == bytecode.bindings['d']
assert P.gt(2) == bytecode.bindings['e']
assert Binding('b', 'created') == bytecode.step_instructions[1][1]
assert 'binding[b=created]' == str(bytecode.step_instructions[1][1])
assert isinstance(hash(bytecode.step_instructions[1][1]), int)
def test_P(self):
# verify that the order of operations is respected
assert "and(eq(a),lt(b))" == str(P.eq("a").and_(P.lt("b")))
assert "and(or(lt(b),gt(c)),neq(d))" == str(P.lt("b").or_(P.gt("c")).and_(P.neq("d")))
assert "and(or(lt(b),gt(c)),or(neq(d),gte(e)))" == str(
P.lt("b").or_(P.gt("c")).and_(P.neq("d").or_(P.gte("e"))))
def test_anonymous_traversal(self):
bytecode = __.__(1).bytecode
assert 0 == len(bytecode.bindings.keys())
assert 0 == len(bytecode.source_instructions)
assert 1 == len(bytecode.step_instructions)
assert "inject" == bytecode.step_instructions[0][0]
assert 1 == bytecode.step_instructions[0][1]
##
bytecode = __.start().bytecode
assert 0 == len(bytecode.bindings.keys())
assert 0 == len(bytecode.source_instructions)
assert 0 == len(bytecode.step_instructions)
def test_clone_traversal(self):
g = traversal().withGraph(Graph())
original = g.V().out("created")
clone = original.clone().out("knows")
cloneClone = clone.clone().out("created")
assert 2 == len(original.bytecode.step_instructions)
assert 3 == len(clone.bytecode.step_instructions)
assert 4 == len(cloneClone.bytecode.step_instructions)
original.has("person", "name", "marko")
clone.V().out()
assert 3 == len(original.bytecode.step_instructions)
assert 5 == len(clone.bytecode.step_instructions)
assert 4 == len(cloneClone.bytecode.step_instructions)
def test_no_sugar_for_magic_methods(self):
g = traversal().withGraph(Graph())
t = g.V().age
assert 2 == len(t.bytecode.step_instructions)
try:
t = g.V().__len__
fail("can't do sugar with magic")
except AttributeError as err:
assert str(
err) == 'Python magic methods or keys starting with double underscore cannot be used for Gremlin sugar - prefer values(__len__)'
def test_enforce_anonymous_child_traversal(self):
g = traversal().withGraph(Graph())
g.V(0).addE("self").to(__.V(1))
try:
g.V(0).addE("self").to(g.V(1))
assert False
except TypeError:
pass
@pytest.mark.skipif(transactions_disabled(), reason="Transactions are not enabled.")
def test_transaction_commit(self, remote_transaction_connection):
# Start a transaction traversal.
g = traversal().withRemote(remote_transaction_connection)
start_count = g.V().count().next()
tx = g.tx()
# Except transaction to not be open until begin is called.
assert not tx.isOpen()
gtx = tx.begin()
assert tx.isOpen()
add_node_validate_transaction_state(g, gtx, start_count, start_count, [tx])
add_node_validate_transaction_state(gtx, g, start_count + 2, start_count, [tx])
# Commit the transaction, this should close it and add our transaction only vertex to the graph,
# so our vertex count outside the transaction should be 2 + the start count.
tx.commit()
assert not tx.isOpen()
assert g.V().count().next() == start_count + 2
drop_graph_check_count(g)
verify_gtx_closed(gtx)
@pytest.mark.skipif(transactions_disabled(), reason="Transactions are not enabled.")
def test_transaction_rollback(self, remote_transaction_connection):
# Start a transaction traversal.
g = traversal().withRemote(remote_transaction_connection)
start_count = g.V().count().next()
tx = g.tx()
# Except transaction to not be open until begin is called.
assert not tx.isOpen()
gtx = tx.begin()
assert tx.isOpen()
add_node_validate_transaction_state(g, gtx, start_count, start_count, [tx])
add_node_validate_transaction_state(gtx, g, start_count + 2, start_count, [tx])
# Commit the transaction, this should close it and not add our transaction only vertex to the graph,
# so our vertex count outside the transaction should be the start count.
tx.rollback()
assert not tx.isOpen()
assert g.V().count().next() == start_count + 1
drop_graph_check_count(g)
verify_gtx_closed(gtx)
@pytest.mark.skipif(transactions_disabled(), reason="Transactions are not enabled.")
def test_transaction_no_begin(self, remote_transaction_connection):
# Start a transaction traversal.
g = traversal().withRemote(remote_transaction_connection)
tx = g.tx()
# Except transaction to not be open until begin is called.
assert not tx.isOpen()
try:
# Attempt to commit even though no tx is started.
tx().commit()
assert False
except Exception as e:
assert not tx.isOpen()
try:
# Attempt to rollback even though no tx is started.
tx().rollback()
assert False
except Exception as e:
assert not tx.isOpen()
try:
# Attempt to invoke tx().tx() which is illegal.
tx().tx()
assert False
except Exception as e:
assert not tx.isOpen()
# Create transaction and verify it is open.
gtx = tx.begin()
assert tx.isOpen()
try:
# Attempt to begin gtx which is illegal.
gtx().begin()
assert False
except Exception as e:
assert tx.isOpen()
# Commit this unused transaction and verify it is no longer open.
tx.commit()
assert not tx.isOpen()
# Create another transaction and verify it is open.
gtx = tx.begin()
assert tx.isOpen()
# Rollback this unused transaction and verify it is no longer open.
tx.rollback()
assert not tx.isOpen()
@pytest.mark.skipif(transactions_disabled(), reason="Transactions are not enabled.")
def test_multi_commit_transaction(self, remote_transaction_connection):
# Start a transaction traversal.
g = traversal().withRemote(remote_transaction_connection)
start_count = g.V().count().next()
# Create two transactions.
tx1, tx2 = g.tx(), g.tx()
# Generate two GraphTraversalSource's for each transaction with begin.
gtx1, gtx2 = tx1.begin(), tx2.begin()
verify_tx_state([tx1, tx2], True)
# Add node to gtx1, which should be visible to gtx1, not gtx2.
add_node_validate_transaction_state(g, gtx1, start_count, start_count, [tx1, tx2])
# Add node to gtx2, which should be visible to gtx2, not gtx2
add_node_validate_transaction_state(g, gtx2, start_count, start_count, [tx1, tx2])
# Add node to gtx1, which should be visible to gtx1, not gtx2. Note previous node also added.
add_node_validate_transaction_state(g, gtx1, start_count, start_count + 1, [tx1, tx2])
tx1.commit()
verify_tx_state([tx1], False)
verify_tx_state([tx2], True)
assert g.V().count().next() == start_count + 2
tx2.commit()
verify_tx_state([tx1, tx2], False)
assert g.V().count().next() == start_count + 3
@pytest.mark.skipif(transactions_disabled(), reason="Transactions are not enabled.")
def test_multi_rollback_transaction(self, remote_transaction_connection):
# Start a transaction traversal.
g = traversal().withRemote(remote_transaction_connection)
start_count = g.V().count().next()
# Create two transactions.
tx1, tx2 = g.tx(), g.tx()
# Generate two GraphTraversalSource's for each transaction with begin.
gtx1, gtx2 = tx1.begin(), tx2.begin()
verify_tx_state([tx1, tx2], True)
# Add node to gtx1, which should be visible to gtx1, not gtx2.
add_node_validate_transaction_state(g, gtx1, start_count, start_count, [tx1, tx2])
# Add node to gtx2, which should be visible to gtx2, not gtx2
add_node_validate_transaction_state(g, gtx2, start_count, start_count, [tx1, tx2])
# Add node to gtx1, which should be visible to gtx1, not gtx2. Note previous node also added.
add_node_validate_transaction_state(g, gtx1, start_count, start_count + 1, [tx1, tx2])
tx1.rollback()
verify_tx_state([tx1], False)
verify_tx_state([tx2], True)
assert g.V().count().next() == start_count
tx2.rollback()
verify_tx_state([tx1, tx2], False)
assert g.V().count().next() == start_count
@pytest.mark.skipif(transactions_disabled(), reason="Transactions are not enabled.")
def test_multi_commit_and_rollback(self, remote_transaction_connection):
# Start a transaction traversal.
g = traversal().withRemote(remote_transaction_connection)
start_count = g.V().count().next()
# Create two transactions.
tx1, tx2 = g.tx(), g.tx()
# Generate two GraphTraversalSource's for each transaction with begin.
gtx1, gtx2 = tx1.begin(), tx2.begin()
verify_tx_state([tx1, tx2], True)
# Add node to gtx1, which should be visible to gtx1, not gtx2.
add_node_validate_transaction_state(g, gtx1, start_count, start_count, [tx1, tx2])
# Add node to gtx2, which should be visible to gtx2, not gtx2
add_node_validate_transaction_state(g, gtx2, start_count, start_count, [tx1, tx2])
# Add node to gtx1, which should be visible to gtx1, not gtx2. Note previous node also added.
add_node_validate_transaction_state(g, gtx1, start_count, start_count + 1, [tx1, tx2])
tx1.commit()
verify_tx_state([tx1], False)
verify_tx_state([tx2], True)
assert g.V().count().next() == start_count + 2
tx2.rollback()
verify_tx_state([tx1, tx2], False)
assert g.V().count().next() == start_count + 2
@pytest.mark.skipif(transactions_disabled(), reason="Transactions are not enabled.")
def test_transaction_close_tx(self):
remote_conn = create_connection_to_gtx()
g = traversal().withRemote(remote_conn)
drop_graph_check_count(g)
tx1 = g.tx()
tx2 = g.tx()
# open up two sessions and create stuff
gtx1 = tx1.begin()
gtx2 = tx2.begin()
add_node_validate_transaction_state(g, gtx1, 0, 0, [tx1, tx2])
add_node_validate_transaction_state(g, gtx2, 0, 0, [tx1, tx2])
add_node_validate_transaction_state(g, gtx2, 0, 1, [tx1, tx2])
add_node_validate_transaction_state(g, gtx2, 0, 2, [tx1, tx2])
# someone gets lazy and doesn't commit/rollback and just calls close() - the graph
# will decide how to treat the transaction, but for neo4j/gremlin server in this
# test configuration it should rollback
tx1.close()
tx2.close()
assert not tx1.isOpen()
assert not tx2.isOpen()
verify_gtx_closed(gtx1)
verify_gtx_closed(gtx2)
remote_conn = create_connection_to_gtx()
g = traversal().withRemote(remote_conn)
assert g.V().count().next() == 0
drop_graph_check_count(g)
@pytest.mark.skipif(transactions_disabled(), reason="Transactions are not enabled.")
def test_transaction_close_tx_from_parent(self):
remote_conn = create_connection_to_gtx()
g = traversal().withRemote(remote_conn)
drop_graph_check_count(g)
tx1 = g.tx()
tx2 = g.tx()
# open up two sessions and create stuff
gtx1 = tx1.begin()
gtx2 = tx2.begin()
add_node_validate_transaction_state(g, gtx1, 0, 0, [tx1, tx2])
add_node_validate_transaction_state(g, gtx2, 0, 0, [tx1, tx2])
add_node_validate_transaction_state(g, gtx2, 0, 1, [tx1, tx2])
add_node_validate_transaction_state(g, gtx2, 0, 2, [tx1, tx2])
# someone gets lazy and doesn't commit/rollback and just calls close() but on the parent
# DriverRemoteConnection for all the session that were created via tx() - the graph
# will decide how to treat the transaction, but for neo4j/gremlin server in this
# test configuration it should rollback
remote_conn.close()
assert not tx1.isOpen()
assert not tx2.isOpen()
verify_gtx_closed(gtx1)
verify_gtx_closed(gtx2)
remote_conn = create_connection_to_gtx()
g = traversal().withRemote(remote_conn)
assert g.V().count().next() == 0
drop_graph_check_count(g)
def create_connection_to_gtx():
return DriverRemoteConnection(anonymous_url, 'gtx',
message_serializer=serializer.GraphBinarySerializersV1())
def add_node_validate_transaction_state(g, g_add_to, g_start_count, g_add_to_start_count, tx_verify_list):
# Add a single node to g_add_to, but not g.
# Check that vertex count in g is g_start_count and vertex count in g_add_to is g_add_to_start_count + 1.
g_add_to.addV("person").property("name", "lyndon").iterate()
assert g_add_to.V().count().next() == g_add_to_start_count + 1
assert g.V().count().next() == g_start_count
verify_tx_state(tx_verify_list, True)
def verify_tx_state(gtx_list, value):
for tx in gtx_list:
assert tx.isOpen() == value
def drop_graph_check_count(g):
g.V().drop().iterate()
assert g.V().count().next() == 0
def verify_gtx_closed(gtx):
try:
# Attempt to add an additional vertex to the transaction. This should throw an exception since it
# has been rolled back.
gtx().addV("failure").iterate()
assert False
except Exception as e:
pass
|
py | 7dffcdada5cc02b2c8247d79407b8b86f4b05488 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=all
# Generated from MetricAlertCondition.g4 by ANTLR 4.7.1
# encoding: utf-8
from __future__ import print_function
from antlr4 import *
from io import StringIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write(u"\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2")
buf.write(u"\26\u00d0\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6")
buf.write(u"\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4")
buf.write(u"\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t")
buf.write(u"\22\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27")
buf.write(u"\4\30\t\30\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4")
buf.write(u"\35\t\35\4\36\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t")
buf.write(u"#\4$\t$\4%\t%\4&\t&\3\2\3\2\3\3\3\3\3\4\3\4\3\5\3\5\3")
buf.write(u"\6\3\6\3\7\3\7\3\b\3\b\3\t\3\t\3\n\3\n\3\13\3\13\3\f")
buf.write(u"\3\f\3\r\3\r\3\16\3\16\3\17\3\17\3\20\3\20\3\21\3\21")
buf.write(u"\3\22\3\22\3\23\3\23\3\24\3\24\3\25\3\25\3\26\3\26\3")
buf.write(u"\27\3\27\3\30\3\30\3\31\3\31\3\32\3\32\3\33\3\33\3\34")
buf.write(u"\3\34\3\34\3\34\3\34\3\34\3\35\3\35\3\35\3\35\3\36\3")
buf.write(u"\36\3\36\3\36\3\36\3\36\3\36\3\36\3\36\3\37\3\37\3\37")
buf.write(u"\3\37\3\37\3\37\3\37\3\37\3\37\3 \3 \3 \3!\3!\3!\3!\3")
buf.write(u"!\3!\3!\3!\3!\5!\u00aa\n!\3\"\6\"\u00ad\n\"\r\"\16\"")
buf.write(u"\u00ae\3\"\3\"\6\"\u00b3\n\"\r\"\16\"\u00b4\5\"\u00b7")
buf.write(u"\n\"\3#\3#\3$\6$\u00bc\n$\r$\16$\u00bd\3%\5%\u00c1\n")
buf.write(u"%\3%\3%\6%\u00c5\n%\r%\16%\u00c6\3&\3&\3&\3&\6&\u00cd")
buf.write(u"\n&\r&\16&\u00ce\2\2\'\3\3\5\4\7\5\t\6\13\7\r\b\17\t")
buf.write(u"\21\n\23\13\25\2\27\2\31\2\33\2\35\2\37\2!\2#\2%\2\'")
buf.write(u"\2)\2+\2-\2/\2\61\2\63\2\65\2\67\f9\r;\16=\17?\20A\21")
buf.write(u"C\22E\23G\24I\25K\26\3\2\26\4\2CCcc\4\2EEee\4\2FFff\4")
buf.write(u"\2GGgg\4\2JJjj\4\2KKkk\4\2NNnn\4\2PPpp\4\2QQqq\4\2TT")
buf.write(u"tt\4\2UUuu\4\2WWww\4\2YYyy\4\2ZZzz\3\2\62;\3\2c|\3\2")
buf.write(u"C\\\4\2..\60\60\4\2$$))\4\2\13\13\"\"\2\u00ce\2\3\3\2")
buf.write(u"\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2")
buf.write(u"\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2")
buf.write(u"\67\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2")
buf.write(u"\2\2A\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2")
buf.write(u"\2\2\2K\3\2\2\2\3M\3\2\2\2\5O\3\2\2\2\7Q\3\2\2\2\tS\3")
buf.write(u"\2\2\2\13U\3\2\2\2\rW\3\2\2\2\17Y\3\2\2\2\21[\3\2\2\2")
buf.write(u"\23]\3\2\2\2\25_\3\2\2\2\27a\3\2\2\2\31c\3\2\2\2\33e")
buf.write(u"\3\2\2\2\35g\3\2\2\2\37i\3\2\2\2!k\3\2\2\2#m\3\2\2\2")
buf.write(u"%o\3\2\2\2\'q\3\2\2\2)s\3\2\2\2+u\3\2\2\2-w\3\2\2\2/")
buf.write(u"y\3\2\2\2\61{\3\2\2\2\63}\3\2\2\2\65\177\3\2\2\2\67\u0081")
buf.write(u"\3\2\2\29\u0087\3\2\2\2;\u008b\3\2\2\2=\u0094\3\2\2\2")
buf.write(u"?\u009d\3\2\2\2A\u00a9\3\2\2\2C\u00ac\3\2\2\2E\u00b8")
buf.write(u"\3\2\2\2G\u00bb\3\2\2\2I\u00c4\3\2\2\2K\u00cc\3\2\2\2")
buf.write(u"MN\7\60\2\2N\4\3\2\2\2OP\7\61\2\2P\6\3\2\2\2QR\7a\2\2")
buf.write(u"R\b\3\2\2\2ST\7^\2\2T\n\3\2\2\2UV\7<\2\2V\f\3\2\2\2W")
buf.write(u"X\7\'\2\2X\16\3\2\2\2YZ\7.\2\2Z\20\3\2\2\2[\\\7/\2\2")
buf.write(u"\\\22\3\2\2\2]^\7,\2\2^\24\3\2\2\2_`\t\2\2\2`\26\3\2")
buf.write(u"\2\2ab\t\3\2\2b\30\3\2\2\2cd\t\4\2\2d\32\3\2\2\2ef\t")
buf.write(u"\5\2\2f\34\3\2\2\2gh\t\6\2\2h\36\3\2\2\2ij\t\7\2\2j ")
buf.write(u"\3\2\2\2kl\t\b\2\2l\"\3\2\2\2mn\t\t\2\2n$\3\2\2\2op\t")
buf.write(u"\n\2\2p&\3\2\2\2qr\t\13\2\2r(\3\2\2\2st\t\f\2\2t*\3\2")
buf.write(u"\2\2uv\t\r\2\2v,\3\2\2\2wx\t\16\2\2x.\3\2\2\2yz\t\17")
buf.write(u"\2\2z\60\3\2\2\2{|\t\20\2\2|\62\3\2\2\2}~\t\21\2\2~\64")
buf.write(u"\3\2\2\2\177\u0080\t\22\2\2\u0080\66\3\2\2\2\u0081\u0082")
buf.write(u"\5-\27\2\u0082\u0083\5\35\17\2\u0083\u0084\5\33\16\2")
buf.write(u"\u0084\u0085\5\'\24\2\u0085\u0086\5\33\16\2\u00868\3")
buf.write(u"\2\2\2\u0087\u0088\5\25\13\2\u0088\u0089\5#\22\2\u0089")
buf.write(u"\u008a\5\31\r\2\u008a:\3\2\2\2\u008b\u008c\5\37\20\2")
buf.write(u"\u008c\u008d\5#\22\2\u008d\u008e\5\27\f\2\u008e\u008f")
buf.write(u"\5!\21\2\u008f\u0090\5+\26\2\u0090\u0091\5\31\r\2\u0091")
buf.write(u"\u0092\5\33\16\2\u0092\u0093\5)\25\2\u0093<\3\2\2\2\u0094")
buf.write(u"\u0095\5\33\16\2\u0095\u0096\5/\30\2\u0096\u0097\5\27")
buf.write(u"\f\2\u0097\u0098\5!\21\2\u0098\u0099\5+\26\2\u0099\u009a")
buf.write(u"\5\31\r\2\u009a\u009b\5\33\16\2\u009b\u009c\5)\25\2\u009c")
buf.write(u">\3\2\2\2\u009d\u009e\5%\23\2\u009e\u009f\5\'\24\2\u009f")
buf.write(u"@\3\2\2\2\u00a0\u00aa\7>\2\2\u00a1\u00a2\7>\2\2\u00a2")
buf.write(u"\u00aa\7?\2\2\u00a3\u00aa\7?\2\2\u00a4\u00a5\7@\2\2\u00a5")
buf.write(u"\u00aa\7?\2\2\u00a6\u00aa\7@\2\2\u00a7\u00a8\7#\2\2\u00a8")
buf.write(u"\u00aa\7?\2\2\u00a9\u00a0\3\2\2\2\u00a9\u00a1\3\2\2\2")
buf.write(u"\u00a9\u00a3\3\2\2\2\u00a9\u00a4\3\2\2\2\u00a9\u00a6")
buf.write(u"\3\2\2\2\u00a9\u00a7\3\2\2\2\u00aaB\3\2\2\2\u00ab\u00ad")
buf.write(u"\5\61\31\2\u00ac\u00ab\3\2\2\2\u00ad\u00ae\3\2\2\2\u00ae")
buf.write(u"\u00ac\3\2\2\2\u00ae\u00af\3\2\2\2\u00af\u00b6\3\2\2")
buf.write(u"\2\u00b0\u00b2\t\23\2\2\u00b1\u00b3\5\61\31\2\u00b2\u00b1")
buf.write(u"\3\2\2\2\u00b3\u00b4\3\2\2\2\u00b4\u00b2\3\2\2\2\u00b4")
buf.write(u"\u00b5\3\2\2\2\u00b5\u00b7\3\2\2\2\u00b6\u00b0\3\2\2")
buf.write(u"\2\u00b6\u00b7\3\2\2\2\u00b7D\3\2\2\2\u00b8\u00b9\t\24")
buf.write(u"\2\2\u00b9F\3\2\2\2\u00ba\u00bc\t\25\2\2\u00bb\u00ba")
buf.write(u"\3\2\2\2\u00bc\u00bd\3\2\2\2\u00bd\u00bb\3\2\2\2\u00bd")
buf.write(u"\u00be\3\2\2\2\u00beH\3\2\2\2\u00bf\u00c1\7\17\2\2\u00c0")
buf.write(u"\u00bf\3\2\2\2\u00c0\u00c1\3\2\2\2\u00c1\u00c2\3\2\2")
buf.write(u"\2\u00c2\u00c5\7\f\2\2\u00c3\u00c5\7\17\2\2\u00c4\u00c0")
buf.write(u"\3\2\2\2\u00c4\u00c3\3\2\2\2\u00c5\u00c6\3\2\2\2\u00c6")
buf.write(u"\u00c4\3\2\2\2\u00c6\u00c7\3\2\2\2\u00c7J\3\2\2\2\u00c8")
buf.write(u"\u00cd\5\63\32\2\u00c9\u00cd\5\65\33\2\u00ca\u00cd\5")
buf.write(u"\61\31\2\u00cb\u00cd\7a\2\2\u00cc\u00c8\3\2\2\2\u00cc")
buf.write(u"\u00c9\3\2\2\2\u00cc\u00ca\3\2\2\2\u00cc\u00cb\3\2\2")
buf.write(u"\2\u00cd\u00ce\3\2\2\2\u00ce\u00cc\3\2\2\2\u00ce\u00cf")
buf.write(u"\3\2\2\2\u00cfL\3\2\2\2\r\2\u00a9\u00ae\u00b4\u00b6\u00bd")
buf.write(u"\u00c0\u00c4\u00c6\u00cc\u00ce\2")
return buf.getvalue()
class MetricAlertConditionLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
T__7 = 8
T__8 = 9
WHERE = 10
AND = 11
INCLUDES = 12
EXCLUDES = 13
OR = 14
OPERATOR = 15
NUMBER = 16
QUOTE = 17
WHITESPACE = 18
NEWLINE = 19
WORD = 20
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ u"DEFAULT_MODE" ]
literalNames = [ u"<INVALID>",
u"'.'", u"'/'", u"'_'", u"'\\'", u"':'", u"'%'", u"','", u"'-'",
u"'*'" ]
symbolicNames = [ u"<INVALID>",
u"WHERE", u"AND", u"INCLUDES", u"EXCLUDES", u"OR", u"OPERATOR",
u"NUMBER", u"QUOTE", u"WHITESPACE", u"NEWLINE", u"WORD" ]
ruleNames = [ u"T__0", u"T__1", u"T__2", u"T__3", u"T__4", u"T__5",
u"T__6", u"T__7", u"T__8", u"A", u"C", u"D", u"E", u"H",
u"I", u"L", u"N", u"O", u"R", u"S", u"U", u"W", u"X",
u"DIGIT", u"LOWERCASE", u"UPPERCASE", u"WHERE", u"AND",
u"INCLUDES", u"EXCLUDES", u"OR", u"OPERATOR", u"NUMBER",
u"QUOTE", u"WHITESPACE", u"NEWLINE", u"WORD" ]
grammarFileName = u"MetricAlertCondition.g4"
def __init__(self, input=None, output=sys.stdout):
super(MetricAlertConditionLexer, self).__init__(input, output=output)
self.checkVersion("4.7.1")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
|
py | 7dffcdd629ac5b5e7fa8e0897f63220d0ae1e0bb | begin_unit
comment|'# Copyright 2012 Nebula, Inc.'
nl|'\n'
comment|'# Copyright 2013 IBM Corp.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'uuid'
newline|'\n'
nl|'\n'
name|'from'
name|'oslo_config'
name|'import'
name|'cfg'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'functional'
op|'.'
name|'api_sample_tests'
name|'import'
name|'api_sample_base'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
op|'.'
name|'image'
name|'import'
name|'fake'
newline|'\n'
nl|'\n'
DECL|variable|CONF
name|'CONF'
op|'='
name|'cfg'
op|'.'
name|'CONF'
newline|'\n'
name|'CONF'
op|'.'
name|'import_opt'
op|'('
string|"'osapi_compute_extension'"
op|','
nl|'\n'
string|"'nova.api.openstack.compute.legacy_v2.extensions'"
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|SchedulerHintsJsonTest
name|'class'
name|'SchedulerHintsJsonTest'
op|'('
name|'api_sample_base'
op|'.'
name|'ApiSampleTestBaseV21'
op|')'
op|':'
newline|'\n'
DECL|variable|extension_name
indent|' '
name|'extension_name'
op|'='
string|'"os-scheduler-hints"'
newline|'\n'
nl|'\n'
DECL|member|_get_flags
name|'def'
name|'_get_flags'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'f'
op|'='
name|'super'
op|'('
name|'SchedulerHintsJsonTest'
op|','
name|'self'
op|')'
op|'.'
name|'_get_flags'
op|'('
op|')'
newline|'\n'
name|'f'
op|'['
string|"'osapi_compute_extension'"
op|']'
op|'='
name|'CONF'
op|'.'
name|'osapi_compute_extension'
op|'['
op|':'
op|']'
newline|'\n'
name|'f'
op|'['
string|"'osapi_compute_extension'"
op|']'
op|'.'
name|'append'
op|'('
nl|'\n'
string|'"nova.api.openstack.compute.contrib.scheduler_hints."'
nl|'\n'
string|'"Scheduler_hints"'
op|')'
newline|'\n'
name|'return'
name|'f'
newline|'\n'
nl|'\n'
DECL|member|test_scheduler_hints_post
dedent|''
name|'def'
name|'test_scheduler_hints_post'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
comment|'# Get api sample of scheduler hint post request.'
nl|'\n'
indent|' '
name|'subs'
op|'='
op|'{'
string|"'image_id'"
op|':'
name|'fake'
op|'.'
name|'get_valid_image_id'
op|'('
op|')'
op|','
nl|'\n'
string|"'uuid'"
op|':'
name|'str'
op|'('
name|'uuid'
op|'.'
name|'uuid4'
op|'('
op|')'
op|')'
op|'}'
newline|'\n'
name|'response'
op|'='
name|'self'
op|'.'
name|'_do_post'
op|'('
string|"'servers'"
op|','
string|"'scheduler-hints-post-req'"
op|','
nl|'\n'
name|'subs'
op|')'
newline|'\n'
name|'del'
name|'subs'
op|'['
string|"'uuid'"
op|']'
newline|'\n'
name|'self'
op|'.'
name|'_verify_response'
op|'('
string|"'scheduler-hints-post-resp'"
op|','
name|'subs'
op|','
name|'response'
op|','
number|'202'
op|')'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
|
py | 7dffce8b21795d5c123b22a7338f4bcebe64f880 | OBJECT = 'OBJECT'
OTHER = 'OTHER'
DATE = 'DATE'
QUANTITY = 'QUANTITY'
|
py | 7dffcfecb364686a20efdc06dd09d0c5f180f776 | #!/usr/bin/env python3
# Copyright (c) 2019-2020 The PIVX developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import PnyTestFramework
from test_framework.util import (
sync_blocks,
assert_equal,
assert_raises_rpc_error,
connect_nodes,
connect_nodes_clique,
disconnect_nodes,
set_node_times,
DecimalAmt,
)
class ReorgStakeTest(PnyTestFramework):
def set_test_params(self):
self.num_nodes = 3
# node 0 and 1 stake the blocks, node 2 makes the zerocoin spends
def setup_chain(self):
# Start with PoS cache: 330 blocks
self._initialize_chain(toPosPhase=True)
self.enable_mocktime()
def setup_network(self):
# connect all nodes between each other
self.setup_nodes()
connect_nodes_clique(self.nodes)
self.sync_all()
def log_title(self):
title = "*** Starting %s ***" % self.__class__.__name__
underline = "-" * len(title)
description = "Tests reorganisation for PoS blocks."
self.log.info("\n\n%s\n%s\n%s\n", title, underline, description)
def disconnect_all(self):
self.log.info("Disconnecting nodes...")
for i in range(self.num_nodes):
for j in range(self.num_nodes):
if j != i:
disconnect_nodes(self.nodes[i], j)
self.log.info("Nodes disconnected")
def get_tot_balance(self, nodeid):
wi = self.nodes[nodeid].getwalletinfo()
return wi['balance'] + wi['immature_balance']
def check_money_supply(self, expected_pny, expected_zpny):
g_info = [self.nodes[i].getinfo() for i in range(self.num_nodes)]
# verify that nodes have the expected PNY and zPNY supply
for node in g_info:
assert_equal(node['moneysupply'], DecimalAmt(expected_pny))
for denom in node['zPNYsupply']:
assert_equal(node['zPNYsupply'][denom], DecimalAmt(expected_zpny[denom]))
def run_test(self):
def findUtxoInList(txid, vout, utxo_list):
for x in utxo_list:
if x["txid"] == txid and x["vout"] == vout:
return True, x
return False, None
# Check PNY and zPNY supply at the beginning
# ------------------------------------------
# zPNY supply: 2 coins for each denomination
expected_zpny_supply = {
"1": 2,
"5": 10,
"10": 20,
"50": 100,
"100": 200,
"500": 1000,
"1000": 2000,
"5000": 10000,
"total": 13332,
}
# PNY supply: block rewards minus burned fees for minting
expected_money_supply = 250.0 * 330 - 16 * 0.01
self.check_money_supply(expected_money_supply, expected_zpny_supply)
# Stake with node 0 and node 1 up to public spend activation (400)
# 70 blocks: 5 blocks each (x7)
self.log.info("Staking 70 blocks to reach public spends activation...")
set_node_times(self.nodes, self.mocktime)
for i in range(7):
for peer in range(2):
for nblock in range(5):
self.mocktime = self.generate_pos(peer, self.mocktime)
sync_blocks(self.nodes)
block_time_0 = block_time_1 = self.mocktime
self.log.info("Blocks staked.")
# Check balances
self.log.info("Checking balances...")
initial_balance = [self.get_tot_balance(i) for i in range(self.num_nodes)]
# --nodes 0, 1: 62 pow blocks + 55 pos blocks
assert_equal(initial_balance[0], DecimalAmt(250.0 * (62 + 55)))
assert_equal(initial_balance[1], DecimalAmt(250.0 * (62 + 55)))
# --node 2: 62 pow blocks + 20 pos blocks - zc minted - zcfee
assert_equal(initial_balance[2], DecimalAmt(250.0 * (62 + 20) - 6666 - 0.08))
assert_equal(self.nodes[2].getzerocoinbalance()['Total'], DecimalAmt(6666))
self.log.info("Balances ok.")
# create the raw zerocoin spend txes
addy = self.nodes[2].getnewaddress()
self.log.info("Creating the raw zerocoin public spends...")
mints = self.nodes[2].listmintedzerocoins(True, True)
tx_A0 = self.nodes[2].createrawzerocoinspend(mints[0]["serial hash"], addy)
tx_A1 = self.nodes[2].createrawzerocoinspend(mints[1]["serial hash"], addy)
# Spending same coins to different recipients to get different txids
new_addy = "yAVWM5urwaTyhiuFQHP2aP47rdZsLUG5PH"
tx_B0 = self.nodes[2].createrawzerocoinspend(mints[0]["serial hash"], new_addy)
tx_B1 = self.nodes[2].createrawzerocoinspend(mints[1]["serial hash"], new_addy)
# Disconnect nodes
minted_amount = mints[0]["denomination"] + mints[1]["denomination"]
self.disconnect_all()
# Stake one block with node-0 and save the stake input
self.log.info("Staking 1 block with node 0...")
initial_unspent_0 = self.nodes[0].listunspent()
self.nodes[0].generate(1)
block_time_0 += 60
set_node_times(self.nodes, block_time_0)
last_block = self.nodes[0].getblock(self.nodes[0].getbestblockhash())
assert(len(last_block["tx"]) > 1) # a PoS block has at least two txes
coinstake_txid = last_block["tx"][1]
coinstake_tx = self.nodes[0].getrawtransaction(coinstake_txid, True)
assert (coinstake_tx["vout"][0]["scriptPubKey"]["hex"] == "") # first output of coinstake is empty
stakeinput = coinstake_tx["vin"][0]
# The stake input was unspent 1 block ago, now it's not
res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"], initial_unspent_0)
assert (res and utxo["spendable"])
res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"], self.nodes[0].listunspent())
assert (not res or not utxo["spendable"])
self.log.info("Coinstake input %s...%s-%d is no longer spendable." % (
stakeinput["txid"][:9], stakeinput["txid"][-4:], stakeinput["vout"]))
# Relay zerocoin spends
self.nodes[0].sendrawtransaction(tx_A0)
self.nodes[0].sendrawtransaction(tx_A1)
# Stake 10 more blocks with node-0 and check balances
self.log.info("Staking 10 more blocks with node 0...")
for i in range(10):
block_time_0 = self.generate_pos(0, block_time_0)
expected_balance_0 = initial_balance[0] + DecimalAmt(11 * 250.0)
assert_equal(self.get_tot_balance(0), expected_balance_0)
self.log.info("Balance for node 0 checks out.")
# Connect with node 2, sync and check zerocoin balance
self.log.info("Reconnecting node 0 and node 2")
connect_nodes(self.nodes[0], 2)
sync_blocks([self.nodes[i] for i in [0, 2]])
self.log.info("Resetting zerocoin mints on node 2")
self.nodes[2].resetmintzerocoin(True)
assert_equal(self.get_tot_balance(2), initial_balance[2] + DecimalAmt(minted_amount))
assert_equal(self.nodes[2].getzerocoinbalance()['Total'], DecimalAmt(6666-minted_amount))
self.log.info("Balance for node 2 checks out.")
# Double spending txes not possible
assert_raises_rpc_error(-26, "bad-txns-invalid-zpny",
self.nodes[0].sendrawtransaction, tx_B0)
assert_raises_rpc_error(-26, "bad-txns-invalid-zpny",
self.nodes[0].sendrawtransaction, tx_B1)
# verify that the stakeinput can't be spent
stakeinput_tx_json = self.nodes[0].getrawtransaction(stakeinput["txid"], True)
stakeinput_amount = float(stakeinput_tx_json["vout"][int(stakeinput["vout"])]["value"])
rawtx_unsigned = self.nodes[0].createrawtransaction(
[{"txid": stakeinput["txid"], "vout": int(stakeinput["vout"])}],
{"xxncEuJK27ygNh7imNfaX8JV6ZQUnoBqzN": (stakeinput_amount-0.01)})
rawtx = self.nodes[0].signrawtransaction(rawtx_unsigned)
assert(rawtx["complete"])
try:
self.nodes[0].sendrawtransaction(rawtx["hex"])
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if e.error["code"] not in [-26, -25]:
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if ([x for x in ["bad-txns-inputs-spent", "Missing inputs"] if x in e.error['message']] == []):
raise e
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
self.log.info("GOOD: v2 spend was not possible.")
# Spend tx_B0 and tx_B1 on the other chain
self.nodes[1].sendrawtransaction(tx_B0)
self.nodes[1].sendrawtransaction(tx_B1)
# Stake 12 blocks with node-1
set_node_times(self.nodes, block_time_1)
self.log.info("Staking 12 blocks with node 1...")
for i in range(12):
block_time_1 = self.generate_pos(1, block_time_1)
expected_balance_1 = initial_balance[1] + DecimalAmt(12 * 250.0)
assert_equal(self.get_tot_balance(1), expected_balance_1)
self.log.info("Balance for node 1 checks out.")
# re-connect and sync nodes and check that node-0 and node-2 get on the other chain
new_best_hash = self.nodes[1].getbestblockhash()
self.log.info("Connecting and syncing nodes...")
set_node_times(self.nodes, block_time_1)
connect_nodes_clique(self.nodes)
sync_blocks(self.nodes)
for i in [0, 2]:
assert_equal(self.nodes[i].getbestblockhash(), new_best_hash)
# check balance of node-0
assert_equal(self.get_tot_balance(0), initial_balance[0])
self.log.info("Balance for node 0 checks out.")
# check that NOW the original stakeinput is present and spendable
res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"], self.nodes[0].listunspent())
assert (res and utxo["spendable"])
self.log.info("Coinstake input %s...%s-%d is spendable again." % (
stakeinput["txid"][:9], stakeinput["txid"][-4:], stakeinput["vout"]))
self.nodes[0].sendrawtransaction(rawtx["hex"])
self.nodes[1].generate(1)
sync_blocks(self.nodes)
res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"], self.nodes[0].listunspent())
assert (not res or not utxo["spendable"])
# Verify that PNY and zPNY supplies were properly updated after the spends and reorgs
self.log.info("Check PNY and zPNY supply...")
expected_money_supply += 250.0 * (self.nodes[1].getblockcount() - 330)
spent_coin_0 = mints[0]["denomination"]
spent_coin_1 = mints[1]["denomination"]
expected_zpny_supply[str(spent_coin_0)] -= spent_coin_0
expected_zpny_supply[str(spent_coin_1)] -= spent_coin_1
expected_zpny_supply["total"] -= (spent_coin_0 + spent_coin_1)
self.check_money_supply(expected_money_supply, expected_zpny_supply)
self.log.info("Supply checks out.")
if __name__ == '__main__':
ReorgStakeTest().main() |
py | 7dffd04f25ebe2531c3135e1645dd3ad3cbaecef | import binascii
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import padding
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers import algorithms
from cryptography.hazmat.primitives.ciphers import modes
from django.core.exceptions import ImproperlyConfigured
from django.shortcuts import redirect
from .. import PaymentStatus
from ..core import BasicProvider
class SagepayProvider(BasicProvider):
"""
Payment provider for sagepay.com
This backend implements payments using `SagePay.com <https://www.sagepay.com/>`_
Form API.
This backend does not support fraud detection. Purchased items are not currently
transferred.
:param vendor: Your vendor code
:param encryption_key: Encryption key assigned by Sage Pay
:param endpoint: The API endpoint to use. For the production environment, use ``'https://live.sagepay.com/gateway/service/vspform-register.vsp'`` instead
"""
_version = "3.00"
_action = "https://test.sagepay.com/Simulator/VSPFormGateway.asp"
def __init__(self, vendor, encryption_key, endpoint=_action, **kwargs):
self._vendor = vendor
self._enckey = encryption_key.encode("utf-8")
self._action = endpoint
super().__init__(**kwargs)
if not self._capture:
raise ImproperlyConfigured("Sagepay does not support pre-authorization.")
def _get_cipher(self):
backend = default_backend()
return Cipher(
algorithms.AES(self._enckey), modes.CBC(self._enckey), backend=backend
)
def _get_padding(self):
return padding.PKCS7(128)
def aes_enc(self, data):
data = data.encode("utf-8")
padder = self._get_padding().padder()
data = padder.update(data) + padder.finalize()
encryptor = self._get_cipher().encryptor()
enc = encryptor.update(data) + encryptor.finalize()
return b"@" + binascii.hexlify(enc)
def aes_dec(self, data):
data = data.lstrip(b"@")
data = binascii.unhexlify(data)
decryptor = self._get_cipher().decryptor()
data = decryptor.update(data) + decryptor.finalize()
return data.decode("utf-8")
def get_hidden_fields(self, payment):
payment.save()
return_url = self.get_return_url(payment)
data = {
"VendorTxCode": payment.pk,
"Amount": f"{payment.total:.2f}",
"Currency": payment.currency,
"Description": f"Payment #{payment.pk}",
"SuccessURL": return_url,
"FailureURL": return_url,
"BillingSurname": payment.billing_last_name,
"BillingFirstnames": payment.billing_first_name,
"BillingAddress1": payment.billing_address_1,
"BillingAddress2": payment.billing_address_2,
"BillingCity": payment.billing_city,
"BillingPostCode": payment.billing_postcode,
"BillingCountry": payment.billing_country_code,
"DeliverySurname": payment.billing_last_name,
"DeliveryFirstnames": payment.billing_first_name,
"DeliveryAddress1": payment.billing_address_1,
"DeliveryAddress2": payment.billing_address_2,
"DeliveryCity": payment.billing_city,
"DeliveryPostCode": payment.billing_postcode,
"DeliveryCountry": payment.billing_country_code,
}
if payment.billing_country_code == "US":
data["BillingState"] = payment.billing_country_area
data["DeliveryState"] = payment.billing_country_area
udata = "&".join("%s=%s" % kv for kv in data.items())
crypt = self.aes_enc(udata)
return {
"VPSProtocol": self._version,
"TxType": "PAYMENT",
"Vendor": self._vendor,
"Crypt": crypt,
}
def process_data(self, payment, request):
udata = self.aes_dec(request.GET["crypt"])
data = {}
for kv in udata.split("&"):
k, v = kv.split("=")
data[k] = v
success_url = payment.get_success_url()
if payment.status == PaymentStatus.WAITING:
# If the payment is not in waiting state, we probably have a page reload.
# We should neither throw 404 nor alter the payment again in such case.
if data["Status"] == "OK":
payment.captured_amount = payment.total
payment.change_status(PaymentStatus.CONFIRMED)
return redirect(success_url)
else:
# XXX: We should recognize AUTHENTICATED and REGISTERED in the future.
payment.change_status(PaymentStatus.REJECTED)
return redirect(payment.get_failure_url())
return redirect(success_url)
|
py | 7dffd195b5621d4d3460b8d329061dfd6d624f5b |
__all__ = ['FunctionType', 'UndefinedFunctionType', 'FunctionPrototype',
'WrapperAddressProtocol', 'CompileResultWAP']
from abc import ABC, abstractmethod
from .abstract import Type
from .. import types, errors
class FunctionType(Type):
"""
First-class function type.
"""
cconv = None
def __init__(self, signature):
sig = types.unliteral(signature)
self.nargs = len(sig.args)
self.signature = sig
self.ftype = FunctionPrototype(sig.return_type, sig.args)
self._key = self.ftype.key
@property
def key(self):
return self._key
@property
def name(self):
return f'{type(self).__name__}[{self.key}]'
def is_precise(self):
return self.signature.is_precise()
def get_precise(self):
return self
def dump(self, tab=''):
print(f'{tab}DUMP {type(self).__name__}[code={self._code}]')
self.signature.dump(tab=tab + ' ')
print(f'{tab}END DUMP {type(self).__name__}')
def get_call_type(self, context, args, kws):
from numba.core import typing
if kws:
# First-class functions carry only the type signature
# information and function address value. So, it is not
# possible to determine the positional arguments
# corresponding to the keyword arguments in the call
# expression. For instance, the definition of the
# first-class function may not use the same argument names
# that the caller assumes. [numba/issues/5540].
raise errors.UnsupportedError(
'first-class function call cannot use keyword arguments')
if len(args) != self.nargs:
raise ValueError(
f'mismatch of arguments number: {len(args)} vs {self.nargs}')
sig = self.signature
# check that arguments types match with the signature types exactly
for atype, sig_atype in zip(args, sig.args):
atype = types.unliteral(atype)
if sig_atype.is_precise():
conv_score = context.context.can_convert(
fromty=atype, toty=sig_atype
)
if conv_score is None \
or conv_score > typing.context.Conversion.safe:
raise ValueError(
f'mismatch of argument types: {atype} vs {sig_atype}')
if not sig.is_precise():
for dispatcher in self.dispatchers:
template, pysig, args, kws \
= dispatcher.get_call_template(args, kws)
new_sig = template(context.context).apply(args, kws)
return types.unliteral(new_sig)
return sig
def check_signature(self, other_sig):
"""Return True if signatures match (up to being precise).
"""
sig = self.signature
return (self.nargs == len(other_sig.args)
and (sig == other_sig or not sig.is_precise()))
def unify(self, context, other):
if isinstance(other, types.UndefinedFunctionType) \
and self.nargs == other.nargs:
return self
class UndefinedFunctionType(FunctionType):
_counter = 0
def __init__(self, nargs, dispatchers):
from numba.core.typing.templates import Signature
signature = Signature(types.undefined,
(types.undefined,) * nargs, recvr=None)
super(UndefinedFunctionType, self).__init__(signature)
self.dispatchers = dispatchers
# make the undefined function type instance unique
type(self)._counter += 1
self._key += str(type(self)._counter)
def get_precise(self):
"""
Return precise function type if possible.
"""
for dispatcher in self.dispatchers:
for cres in dispatcher.overloads.values():
sig = types.unliteral(cres.signature)
return FunctionType(sig)
return self
class FunctionPrototype(Type):
"""
Represents the prototype of a first-class function type.
Used internally.
"""
cconv = None
def __init__(self, rtype, atypes):
self.rtype = rtype
self.atypes = tuple(atypes)
assert isinstance(rtype, Type), (rtype)
lst = []
for atype in self.atypes:
assert isinstance(atype, Type), (atype)
lst.append(atype.name)
name = '%s(%s)' % (rtype, ', '.join(lst))
super(FunctionPrototype, self).__init__(name)
@property
def key(self):
return self.name
class WrapperAddressProtocol(ABC):
"""Base class for Wrapper Address Protocol.
Objects that inherit from the WrapperAddressProtocol can be passed
as arguments to Numba jit compiled functions where it can be used
as first-class functions. As a minimum, the derived types must
implement two methods ``__wrapper_address__`` and ``signature``.
"""
@abstractmethod
def __wrapper_address__(self):
"""Return the address of a first-class function.
Returns
-------
addr : int
"""
@abstractmethod
def signature(self):
"""Return the signature of a first-class function.
Returns
-------
sig : Signature
The returned Signature instance represents the type of a
first-class function that the given WrapperAddressProtocol
instance represents.
"""
class CompileResultWAP(WrapperAddressProtocol):
"""Wrapper of dispatcher instance compilation result to turn it a
first-class function.
"""
def __init__(self, cres):
"""
Parameters
----------
cres : CompileResult
Specify compilation result of a Numba jit-decorated function
(that is a value of dispatcher instance ``overloads``
attribute)
"""
self.cres = cres
name = getattr(cres.fndesc, 'llvm_cfunc_wrapper_name')
self.address = cres.library.get_pointer_to_function(name)
def dump(self, tab=''):
print(f'{tab}DUMP {type(self).__name__} [addr={self.address}]')
self.cres.signature.dump(tab=tab + ' ')
print(f'{tab}END DUMP {type(self).__name__}')
def __wrapper_address__(self):
return self.address
def signature(self):
return self.cres.signature
def __call__(self, *args, **kwargs): # used in object-mode
return self.cres.entry_point(*args, **kwargs)
|
py | 7dffd2cf862e3acaf9f292926e81c2b81841386e | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: lingvo/core/ops/hyps.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='lingvo/core/ops/hyps.proto',
package='tensorflow.lingvo',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n\x1alingvo/core/ops/hyps.proto\x12\x11tensorflow.lingvo\"\xaa\x01\n\nHypothesis\x12\x0f\n\x07\x62\x65\x61m_id\x18\x01 \x01(\x05\x12\x0b\n\x03ids\x18\x02 \x03(\x05\x12\x0e\n\x06scores\x18\x03 \x03(\x02\x12:\n\natten_vecs\x18\x04 \x03(\x0b\x32&.tensorflow.lingvo.Hypothesis.AttenVec\x12\x18\n\x10normalized_score\x18\x05 \x01(\x02\x1a\x18\n\x08\x41ttenVec\x12\x0c\n\x04prob\x18\x01 \x03(\x02')
)
_HYPOTHESIS_ATTENVEC = _descriptor.Descriptor(
name='AttenVec',
full_name='tensorflow.lingvo.Hypothesis.AttenVec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='prob', full_name='tensorflow.lingvo.Hypothesis.AttenVec.prob', index=0,
number=1, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=196,
serialized_end=220,
)
_HYPOTHESIS = _descriptor.Descriptor(
name='Hypothesis',
full_name='tensorflow.lingvo.Hypothesis',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='beam_id', full_name='tensorflow.lingvo.Hypothesis.beam_id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ids', full_name='tensorflow.lingvo.Hypothesis.ids', index=1,
number=2, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scores', full_name='tensorflow.lingvo.Hypothesis.scores', index=2,
number=3, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='atten_vecs', full_name='tensorflow.lingvo.Hypothesis.atten_vecs', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='normalized_score', full_name='tensorflow.lingvo.Hypothesis.normalized_score', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_HYPOTHESIS_ATTENVEC, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=50,
serialized_end=220,
)
_HYPOTHESIS_ATTENVEC.containing_type = _HYPOTHESIS
_HYPOTHESIS.fields_by_name['atten_vecs'].message_type = _HYPOTHESIS_ATTENVEC
DESCRIPTOR.message_types_by_name['Hypothesis'] = _HYPOTHESIS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Hypothesis = _reflection.GeneratedProtocolMessageType('Hypothesis', (_message.Message,), {
'AttenVec' : _reflection.GeneratedProtocolMessageType('AttenVec', (_message.Message,), {
'DESCRIPTOR' : _HYPOTHESIS_ATTENVEC,
'__module__' : 'lingvo.core.ops.hyps_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.lingvo.Hypothesis.AttenVec)
})
,
'DESCRIPTOR' : _HYPOTHESIS,
'__module__' : 'lingvo.core.ops.hyps_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.lingvo.Hypothesis)
})
_sym_db.RegisterMessage(Hypothesis)
_sym_db.RegisterMessage(Hypothesis.AttenVec)
# @@protoc_insertion_point(module_scope)
|
py | 7dffd2eca5a31857dfa2bf074428ec276f3bb777 | import os
from voluptuous import Required
from dvc.exceptions import OutputNotFoundError
from dvc.path_info import PathInfo
from .local import LocalDependency
class RepoDependency(LocalDependency):
PARAM_REPO = "repo"
PARAM_URL = "url"
PARAM_REV = "rev"
PARAM_REV_LOCK = "rev_lock"
REPO_SCHEMA = {
PARAM_REPO: {
Required(PARAM_URL): str,
PARAM_REV: str,
PARAM_REV_LOCK: str,
}
}
def __init__(self, def_repo, stage, *args, **kwargs):
self.def_repo = def_repo
super().__init__(stage, *args, **kwargs)
def _parse_path(self, tree, path):
return None
@property
def is_in_repo(self):
return False
@property
def repo_pair(self):
d = self.def_repo
rev = d.get(self.PARAM_REV_LOCK) or d.get(self.PARAM_REV)
return d[self.PARAM_URL], rev
def __str__(self):
return "{} ({})".format(self.def_path, self.def_repo[self.PARAM_URL])
def _make_repo(self, *, locked=True):
from dvc.external_repo import external_repo
d = self.def_repo
rev = (d.get("rev_lock") if locked else None) or d.get("rev")
return external_repo(d["url"], rev=rev)
def _get_checksum(self, locked=True):
from dvc.repo.tree import RepoTree
with self._make_repo(locked=locked) as repo:
try:
return repo.find_out_by_relpath(self.def_path).info["md5"]
except OutputNotFoundError:
path = PathInfo(os.path.join(repo.root_dir, self.def_path))
# we want stream but not fetch, so DVC out directories are
# walked, but dir contents is not fetched
tree = RepoTree(repo, stream=True)
# We are polluting our repo cache with some dir listing here
if tree.isdir(path):
return self.repo.cache.local.tree.get_hash(path, tree=tree)
return tree.get_file_hash(path)
def workspace_status(self):
current_checksum = self._get_checksum(locked=True)
updated_checksum = self._get_checksum(locked=False)
if current_checksum != updated_checksum:
return {str(self): "update available"}
return {}
def status(self):
return self.workspace_status()
def save(self):
pass
def dumpd(self):
return {self.PARAM_PATH: self.def_path, self.PARAM_REPO: self.def_repo}
def download(self, to):
with self._make_repo() as repo:
if self.def_repo.get(self.PARAM_REV_LOCK) is None:
self.def_repo[self.PARAM_REV_LOCK] = repo.get_rev()
cache = self.repo.cache.local
with repo.use_cache(cache):
_, _, cache_infos = repo.fetch_external([self.def_path])
cache.checkout(to.path_info, cache_infos[0])
def update(self, rev=None):
if rev:
self.def_repo[self.PARAM_REV] = rev
with self._make_repo(locked=False) as repo:
self.def_repo[self.PARAM_REV_LOCK] = repo.get_rev()
def changed_checksum(self):
# From current repo point of view what describes RepoDependency is its
# origin project url and rev_lock, and it makes RepoDependency
# immutable, hence its impossible for checksum to change.
return False
|
py | 7dffd39a0f100ae1bbd4cb95be93f56a01ceacd9 | # Generated by Django 3.2.1 on 2021-05-05 07:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hood', '0004_auto_20210505_0638'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='bio',
),
migrations.RemoveField(
model_name='profile',
name='prof_picture',
),
migrations.AddField(
model_name='profile',
name='email',
field=models.EmailField(max_length=200, null=True),
),
migrations.AddField(
model_name='profile',
name='name',
field=models.CharField(max_length=20, null=True),
),
]
|
py | 7dffd3d7867d511741ed6f993d11e0cfd1696613 | import math
import click
from strava.decorators import format_result
_ZONES_COLUMNS = (
'zone',
'heartrate'
)
@click.command(name='heartrate',
help='Generate heartrate zones and FTHR according to the 20 minutes averaged value provided.')
@click.argument('heartrate', required=True, nargs=1, type=int)
@format_result(table_columns=_ZONES_COLUMNS)
def get_zones_heartrate(heartrate):
fthr = math.ceil(heartrate * 0.95)
hr_zone1_up = round(heartrate*0.81)
hr_zone2_up = round(heartrate*0.89)
hr_zone3_up = round(heartrate*0.93)
hr_zone4_up = round(heartrate*0.99)
hr_zone5_up = round(heartrate*1.02)
hr_zone6_up = round(heartrate*1.06)
fthr_table = {
'FTHR': fthr,
'---': '---',
'1 (<81%)': f'<{hr_zone1_up}',
'2 (82%-89%)': f'{hr_zone1_up + 1}-{hr_zone2_up}',
'3 (90%-93%)': f'{hr_zone2_up + 1}-{hr_zone3_up}',
'4 (94%-99%)': f'{hr_zone3_up + 1}-{hr_zone4_up}',
'5 (100%-102%)': f'{hr_zone4_up + 1}-{hr_zone5_up}',
'6 (103%-106%)': f'{hr_zone5_up + 1}-{hr_zone6_up}',
'7 (>107%)': f'>{hr_zone6_up + 1}',
}
return _as_table(fthr_table)
def _as_table(table):
return [{'zone': k, 'heartrate': v} for k, v in table.items()]
|
py | 7dffd432c8897f08381b52318d6df51d586fe99e | from collections import OrderedDict
from typing import Callable, List
from django.http import HttpResponseNotAllowed
from django.urls import path as django_path
from ninja.operation import Operation, PathView
from ninja.constants import NOT_SET
from ninja.utils import normalize_path
class Router:
def __init__(self):
self.operations = OrderedDict() # TODO: better rename to path_operations
self.api = None
def get(self, path: str, *, auth=NOT_SET, response=None):
return self.api_operation(["GET"], path, auth=auth, response=response)
def post(self, path: str, *, auth=NOT_SET, response=None):
return self.api_operation(["POST"], path, auth=auth, response=response)
def delete(self, path: str, *, auth=NOT_SET, response=None):
return self.api_operation(["DELETE"], path, auth=auth, response=response)
def patch(self, path: str, *, auth=NOT_SET, response=None):
return self.api_operation(["PATCH"], path, auth=auth, response=response)
def put(self, path: str, *, auth=NOT_SET, response=None):
return self.api_operation(["PUT"], path, auth=auth, response=response)
def api_operation(
self, methods: List[str], path: str, *, auth=NOT_SET, response=None
):
def decorator(view_func):
self.add_api_operation(
path, methods, view_func, auth=auth, response=response
)
return view_func
return decorator
def add_api_operation(
self,
path: str,
methods: List[str],
view_func: Callable,
*,
auth=NOT_SET,
response=None
):
if path not in self.operations:
path_view = PathView()
self.operations[path] = path_view
else:
path_view = self.operations[path]
path_view.add(
path=path,
methods=methods,
view_func=view_func,
auth=auth,
response=response,
)
if self.api:
path_view.set_api_instance(self.api)
def set_api_instance(self, api):
self.api = api
for path_view in self.operations.values():
path_view.set_api_instance(self.api)
def urls_paths(self, prefix: str):
for path, path_view in self.operations.items():
path = path.replace("{", "<").replace("}", ">")
route = "/".join([i for i in (prefix, path) if i])
# to skip lot of checks we simply treat double slash as a mistake:
route = normalize_path(route)
route = route.lstrip("/")
yield django_path(route, path_view.get_view())
|
py | 7dffd591662f1e149504dcf0ed2cb0da158a433b | import os
class Environment:
def __init__(self, bucket_name):
'''
Constructor for class Environment.
Initialize environment used by Lambda script.
@param bucket_name: String to name bucket
'''
if 'development' in bucket_name:
self.environment = 'development'
elif 'staging' in bucket_name:
self.environment = 'staging'
else:
self.environment = 'production'
def name(self):
'''
Get name environment used
>>> Environment('my.bucket.name.for.development').name()
'development'
>>> Environment('my.bucket.name.staging').name()
'staging'
>>> Environment('my.bucket.name').name()
'production'
'''
return self.environment
def development(self):
'''
Environment development ?
>>> Environment('my.bucket.name').development()
False
'''
return self.environment == 'development'
def staging(self):
'''
Environment staging ?
>>> Environment('my.bucket.name.development').staging()
False
'''
return self.environment == 'staging'
def production(self):
'''
Environment production ?
>>> Environment('my.bucket.name').production()
True
'''
return self.environment == 'production'
def get_username(self):
'''
Read environment variable for username (email).
>>> os.environ['USERNAME_DEVE'] = '[email protected]'
>>> os.environ['USERNAME_PROD'] = '[email protected]'
>>> Environment('my.bucket.name').get_username()
'[email protected]'
'''
return os.getenv('USERNAME_{}'.format(self._suffix_env()))
def get_password(self):
'''
Read environment variable for password.
>>> os.environ['PASSWORD_DEVE'] = 'password-development'
>>> os.environ['PASSWORD_STAG'] = 'password-staging'
>>> os.environ['PASSWORD_PROD'] = 'password-production'
>>> Environment('my.bucket.name.staging').get_password()
'password-staging'
'''
return os.getenv('PASSWORD_{}'.format(self._suffix_env()))
def get_api_endpoint(self):
'''
Read environment variable for API endpoint.
>>> os.environ['URL_API_DEVE'] = 'https://api.dazzl.local'
>>> os.environ['URL_API_PROD'] = 'https://api.dazzl.tv'
>>> Environment('my.bucket.name.development').get_api_endpoint()
'https://api.dazzl.local'
'''
return os.getenv('URL_API_{}'.format(self._suffix_env()))
def _suffix_env(self):
'''
Get suffix for environment used by Lambda script.
>>> Environment('my.bucket.name.staging')._suffix_env()
'STAG'
'''
if (self.dev()):
return 'DEVE'
elif (self.staging()):
return 'STAG'
elif (self.prod()):
return 'PROD'
# Aliases method
dev = development
prod = production
if __name__ == "__main__":
import doctest
doctest.testmod(report=False, raise_on_error=True)
|
py | 7dffd73720ba6a435427a5eeef1bb573d2545b30 | import unittest, doctest
from test import test_support
from collections import namedtuple
import pickle, cPickle, copy
from collections import Hashable, Iterable, Iterator
from collections import Sized, Container, Callable
from collections import Set, MutableSet
from collections import Mapping, MutableMapping
from collections import Sequence, MutableSequence
TestNT = namedtuple('TestNT', 'x y z') # type used for pickle tests
class TestNamedTuple(unittest.TestCase):
def test_factory(self):
Point = namedtuple('Point', 'x y')
self.assertEqual(Point.__name__, 'Point')
self.assertEqual(Point.__doc__, 'Point(x, y)')
self.assertEqual(Point.__slots__, ())
self.assertEqual(Point.__module__, __name__)
self.assertEqual(Point.__getitem__, tuple.__getitem__)
self.assertEqual(Point._fields, ('x', 'y'))
self.assertRaises(ValueError, namedtuple, 'abc%', 'efg ghi') # type has non-alpha char
self.assertRaises(ValueError, namedtuple, 'class', 'efg ghi') # type has keyword
self.assertRaises(ValueError, namedtuple, '9abc', 'efg ghi') # type starts with digit
self.assertRaises(ValueError, namedtuple, 'abc', 'efg g%hi') # field with non-alpha char
self.assertRaises(ValueError, namedtuple, 'abc', 'abc class') # field has keyword
self.assertRaises(ValueError, namedtuple, 'abc', '8efg 9ghi') # field starts with digit
self.assertRaises(ValueError, namedtuple, 'abc', '_efg ghi') # field with leading underscore
self.assertRaises(ValueError, namedtuple, 'abc', 'efg efg ghi') # duplicate field
namedtuple('Point0', 'x1 y2') # Verify that numbers are allowed in names
namedtuple('_', 'a b c') # Test leading underscores in a typename
nt = namedtuple('nt', u'the quick brown fox') # check unicode input
self.assert_("u'" not in repr(nt._fields))
nt = namedtuple('nt', (u'the', u'quick')) # check unicode input
self.assert_("u'" not in repr(nt._fields))
self.assertRaises(TypeError, Point._make, [11]) # catch too few args
self.assertRaises(TypeError, Point._make, [11, 22, 33]) # catch too many args
def test_instance(self):
Point = namedtuple('Point', 'x y')
p = Point(11, 22)
self.assertEqual(p, Point(x=11, y=22))
self.assertEqual(p, Point(11, y=22))
self.assertEqual(p, Point(y=22, x=11))
self.assertEqual(p, Point(*(11, 22)))
self.assertEqual(p, Point(**dict(x=11, y=22)))
self.assertRaises(TypeError, Point, 1) # too few args
self.assertRaises(TypeError, Point, 1, 2, 3) # too many args
self.assertRaises(TypeError, eval, 'Point(XXX=1, y=2)', locals()) # wrong keyword argument
self.assertRaises(TypeError, eval, 'Point(x=1)', locals()) # missing keyword argument
self.assertEqual(repr(p), 'Point(x=11, y=22)')
self.assert_('__dict__' not in dir(p)) # verify instance has no dict
self.assert_('__weakref__' not in dir(p))
self.assertEqual(p, Point._make([11, 22])) # test _make classmethod
self.assertEqual(p._fields, ('x', 'y')) # test _fields attribute
self.assertEqual(p._replace(x=1), (1, 22)) # test _replace method
self.assertEqual(p._asdict(), dict(x=11, y=22)) # test _asdict method
try:
p._replace(x=1, error=2)
except ValueError:
pass
else:
self._fail('Did not detect an incorrect fieldname')
# verify that field string can have commas
Point = namedtuple('Point', 'x, y')
p = Point(x=11, y=22)
self.assertEqual(repr(p), 'Point(x=11, y=22)')
# verify that fieldspec can be a non-string sequence
Point = namedtuple('Point', ('x', 'y'))
p = Point(x=11, y=22)
self.assertEqual(repr(p), 'Point(x=11, y=22)')
def test_tupleness(self):
Point = namedtuple('Point', 'x y')
p = Point(11, 22)
self.assert_(isinstance(p, tuple))
self.assertEqual(p, (11, 22)) # matches a real tuple
self.assertEqual(tuple(p), (11, 22)) # coercable to a real tuple
self.assertEqual(list(p), [11, 22]) # coercable to a list
self.assertEqual(max(p), 22) # iterable
self.assertEqual(max(*p), 22) # star-able
x, y = p
self.assertEqual(p, (x, y)) # unpacks like a tuple
self.assertEqual((p[0], p[1]), (11, 22)) # indexable like a tuple
self.assertRaises(IndexError, p.__getitem__, 3)
self.assertEqual(p.x, x)
self.assertEqual(p.y, y)
self.assertRaises(AttributeError, eval, 'p.z', locals())
def test_odd_sizes(self):
Zero = namedtuple('Zero', '')
self.assertEqual(Zero(), ())
self.assertEqual(Zero._make([]), ())
self.assertEqual(repr(Zero()), 'Zero()')
self.assertEqual(Zero()._asdict(), {})
self.assertEqual(Zero()._fields, ())
Dot = namedtuple('Dot', 'd')
self.assertEqual(Dot(1), (1,))
self.assertEqual(Dot._make([1]), (1,))
self.assertEqual(Dot(1).d, 1)
self.assertEqual(repr(Dot(1)), 'Dot(d=1)')
self.assertEqual(Dot(1)._asdict(), {'d':1})
self.assertEqual(Dot(1)._replace(d=999), (999,))
self.assertEqual(Dot(1)._fields, ('d',))
n = 5000
import string, random
names = list(set(''.join([random.choice(string.ascii_letters)
for j in range(10)]) for i in range(n)))
n = len(names)
Big = namedtuple('Big', names)
b = Big(*range(n))
self.assertEqual(b, tuple(range(n)))
self.assertEqual(Big._make(range(n)), tuple(range(n)))
for pos, name in enumerate(names):
self.assertEqual(getattr(b, name), pos)
repr(b) # make sure repr() doesn't blow-up
d = b._asdict()
d_expected = dict(zip(names, range(n)))
self.assertEqual(d, d_expected)
b2 = b._replace(**dict([(names[1], 999),(names[-5], 42)]))
b2_expected = range(n)
b2_expected[1] = 999
b2_expected[-5] = 42
self.assertEqual(b2, tuple(b2_expected))
self.assertEqual(b._fields, tuple(names))
def test_pickle(self):
p = TestNT(x=10, y=20, z=30)
for module in pickle, cPickle:
loads = getattr(module, 'loads')
dumps = getattr(module, 'dumps')
for protocol in -1, 0, 1, 2:
q = loads(dumps(p, protocol))
self.assertEqual(p, q)
self.assertEqual(p._fields, q._fields)
def test_copy(self):
p = TestNT(x=10, y=20, z=30)
for copier in copy.copy, copy.deepcopy:
q = copier(p)
self.assertEqual(p, q)
self.assertEqual(p._fields, q._fields)
class TestOneTrickPonyABCs(unittest.TestCase):
def test_Hashable(self):
# Check some non-hashables
non_samples = [list(), set(), dict()]
for x in non_samples:
self.failIf(isinstance(x, Hashable), repr(x))
self.failIf(issubclass(type(x), Hashable), repr(type(x)))
# Check some hashables
samples = [None,
int(), float(), complex(),
str(),
tuple(), frozenset(),
int, list, object, type,
]
for x in samples:
self.failUnless(isinstance(x, Hashable), repr(x))
self.failUnless(issubclass(type(x), Hashable), repr(type(x)))
self.assertRaises(TypeError, Hashable)
# Check direct subclassing
class H(Hashable):
def __hash__(self):
return super(H, self).__hash__()
__eq__ = Hashable.__eq__ # Silence Py3k warning
self.assertEqual(hash(H()), 0)
self.failIf(issubclass(int, H))
def test_Iterable(self):
# Check some non-iterables
non_samples = [None, 42, 3.14, 1j]
for x in non_samples:
self.failIf(isinstance(x, Iterable), repr(x))
self.failIf(issubclass(type(x), Iterable), repr(type(x)))
# Check some iterables
samples = [str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(), dict().values(),
(lambda: (yield))(),
(x for x in []),
]
for x in samples:
self.failUnless(isinstance(x, Iterable), repr(x))
self.failUnless(issubclass(type(x), Iterable), repr(type(x)))
# Check direct subclassing
class I(Iterable):
def __iter__(self):
return super(I, self).__iter__()
self.assertEqual(list(I()), [])
self.failIf(issubclass(str, I))
def test_Iterator(self):
non_samples = [None, 42, 3.14, 1j, "".encode('ascii'), "", (), [],
{}, set()]
for x in non_samples:
self.failIf(isinstance(x, Iterator), repr(x))
self.failIf(issubclass(type(x), Iterator), repr(type(x)))
samples = [iter(str()),
iter(tuple()), iter(list()), iter(dict()),
iter(set()), iter(frozenset()),
iter(dict().keys()), iter(dict().items()),
iter(dict().values()),
(lambda: (yield))(),
(x for x in []),
]
for x in samples:
self.failUnless(isinstance(x, Iterator), repr(x))
self.failUnless(issubclass(type(x), Iterator), repr(type(x)))
def test_Sized(self):
non_samples = [None, 42, 3.14, 1j,
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.failIf(isinstance(x, Sized), repr(x))
self.failIf(issubclass(type(x), Sized), repr(type(x)))
samples = [str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(), dict().values(),
]
for x in samples:
self.failUnless(isinstance(x, Sized), repr(x))
self.failUnless(issubclass(type(x), Sized), repr(type(x)))
def test_Container(self):
non_samples = [None, 42, 3.14, 1j,
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.failIf(isinstance(x, Container), repr(x))
self.failIf(issubclass(type(x), Container), repr(type(x)))
samples = [str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(),
]
for x in samples:
self.failUnless(isinstance(x, Container), repr(x))
self.failUnless(issubclass(type(x), Container), repr(type(x)))
def test_Callable(self):
non_samples = [None, 42, 3.14, 1j,
"", "".encode('ascii'), (), [], {}, set(),
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.failIf(isinstance(x, Callable), repr(x))
self.failIf(issubclass(type(x), Callable), repr(type(x)))
samples = [lambda: None,
type, int, object,
len,
list.append, [].append,
]
for x in samples:
self.failUnless(isinstance(x, Callable), repr(x))
self.failUnless(issubclass(type(x), Callable), repr(type(x)))
def test_direct_subclassing(self):
for B in Hashable, Iterable, Iterator, Sized, Container, Callable:
class C(B):
pass
self.failUnless(issubclass(C, B))
self.failIf(issubclass(int, C))
def test_registration(self):
for B in Hashable, Iterable, Iterator, Sized, Container, Callable:
class C:
__metaclass__ = type
__hash__ = None # Make sure it isn't hashable by default
self.failIf(issubclass(C, B), B.__name__)
B.register(C)
self.failUnless(issubclass(C, B))
class TestCollectionABCs(unittest.TestCase):
# XXX For now, we only test some virtual inheritance properties.
# We should also test the proper behavior of the collection ABCs
# as real base classes or mix-in classes.
def test_Set(self):
for sample in [set, frozenset]:
self.failUnless(isinstance(sample(), Set))
self.failUnless(issubclass(sample, Set))
def test_hash_Set(self):
class OneTwoThreeSet(Set):
def __init__(self):
self.contents = [1, 2, 3]
def __contains__(self, x):
return x in self.contents
def __len__(self):
return len(self.contents)
def __iter__(self):
return iter(self.contents)
def __hash__(self):
return self._hash()
a, b = OneTwoThreeSet(), OneTwoThreeSet()
self.failUnless(hash(a) == hash(b))
def test_MutableSet(self):
self.failUnless(isinstance(set(), MutableSet))
self.failUnless(issubclass(set, MutableSet))
self.failIf(isinstance(frozenset(), MutableSet))
self.failIf(issubclass(frozenset, MutableSet))
def test_Mapping(self):
for sample in [dict]:
self.failUnless(isinstance(sample(), Mapping))
self.failUnless(issubclass(sample, Mapping))
def test_MutableMapping(self):
for sample in [dict]:
self.failUnless(isinstance(sample(), MutableMapping))
self.failUnless(issubclass(sample, MutableMapping))
def test_Sequence(self):
for sample in [tuple, list, str]:
self.failUnless(isinstance(sample(), Sequence))
self.failUnless(issubclass(sample, Sequence))
self.failUnless(issubclass(basestring, Sequence))
def test_MutableSequence(self):
for sample in [tuple, str]:
self.failIf(isinstance(sample(), MutableSequence))
self.failIf(issubclass(sample, MutableSequence))
for sample in [list]:
self.failUnless(isinstance(sample(), MutableSequence))
self.failUnless(issubclass(sample, MutableSequence))
self.failIf(issubclass(basestring, MutableSequence))
import doctest, collections
def test_main(verbose=None):
NamedTupleDocs = doctest.DocTestSuite(module=collections)
test_classes = [TestNamedTuple, NamedTupleDocs, TestOneTrickPonyABCs, TestCollectionABCs]
test_support.run_unittest(*test_classes)
test_support.run_doctest(collections, verbose)
if __name__ == "__main__":
test_main(verbose=True)
|
py | 7dffd7428f654cbe6d05f8139af53b7daeb94eab | # Generated by Django 3.2.9 on 2021-12-18 06:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('boards', '0004_topic_description'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='관련 교과목을 입력하세요.(ex:수학)', max_length=50)),
],
),
migrations.AddField(
model_name='post',
name='category',
field=models.ManyToManyField(help_text='글의 분류를 설정하세요.', to='boards.Category'),
),
]
|
py | 7dffd7fa9679c7c05fd92d40e7aabc62bfd619de | # coding=utf-8
# Copyright 2018 StrTrek Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Inner Required
from Babelor.Session.MessageQueue import ZMQ as MQ
|
py | 7dffd894caed1515e4f403c865faab826c3a77cd | import importlib
import logging
from flask import Flask, Response, current_app, json
from flask_httpauth import HTTPBasicAuth
from .exceptions import BackendError, ProcessingError
from .version import __version__ # noqa
from .views import MEDIA_TYPE_TAXII_V21
# Console Handler for medallion messages
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter("[%(name)s] [%(levelname)-8s] [%(asctime)s] %(message)s"))
# Module-level logger
log = logging.getLogger(__name__)
log.addHandler(ch)
application_instance = Flask(__name__)
auth = HTTPBasicAuth()
def load_app(config_file):
with open(config_file, "r") as f:
configuration = json.load(f)
set_config(application_instance, "users", configuration)
set_config(application_instance, "taxii", configuration)
set_config(application_instance, "backend", configuration)
register_blueprints(application_instance)
return application_instance
def set_config(flask_application_instance, prop_name, config):
with flask_application_instance.app_context():
log.debug("Registering medallion {} configuration into {}".format(prop_name, current_app))
if prop_name == "taxii":
try:
flask_application_instance.taxii_config = config[prop_name]
except KeyError:
flask_application_instance.taxii_config = {'max_page_size': 100}
elif prop_name == "users":
try:
flask_application_instance.users_backend = config[prop_name]
except KeyError:
log.warning("You did not give user information in your config.")
log.warning("We are giving you the default user information of:")
log.warning("User = user")
log.warning("Pass = pass")
flask_application_instance.users_backend = {"user": "pass"}
elif prop_name == "backend":
try:
flask_application_instance.medallion_backend = connect_to_backend(config[prop_name])
except KeyError:
log.warning("You did not give backend information in your config.")
log.warning("We are giving medallion the default settings,")
log.warning("which includes a data file of 'default_data.json'.")
log.warning("Please ensure this file is in your CWD.")
back = {'module': 'medallion.backends.memory_backend', 'module_class': 'MemoryBackend', 'filename': None}
flask_application_instance.medallion_backend = connect_to_backend(back)
def connect_to_backend(config_info):
log.debug("Initializing backend configuration using: {}".format(config_info))
if "module" not in config_info:
raise ValueError("No module parameter provided for the TAXII server.")
if "module_class" not in config_info:
raise ValueError("No module_class parameter provided for the TAXII server.")
try:
module = importlib.import_module(config_info["module"])
module_class = getattr(module, config_info["module_class"])
log.debug("Instantiating medallion backend with {}".format(module_class))
return module_class(**config_info)
except Exception as e:
log.error("Unknown backend for TAXII server. {} ".format(str(e)))
raise
def register_blueprints(flask_application_instance):
from medallion.views import collections
from medallion.views import discovery
from medallion.views import manifest
from medallion.views import objects
with flask_application_instance.app_context():
log.debug("Registering medallion blueprints into {}".format(current_app))
current_app.register_blueprint(collections.collections_bp)
current_app.register_blueprint(discovery.discovery_bp)
current_app.register_blueprint(manifest.manifest_bp)
current_app.register_blueprint(objects.objects_bp)
@auth.get_password
def get_pwd(username):
if username in current_app.users_backend:
return current_app.users_backend.get(username)
return None
@application_instance.errorhandler(500)
def handle_error(error):
e = {
"title": "InternalError",
"http_status": "500",
"description": str(error.args[0]),
}
return Response(
response=json.dumps(e),
status=500,
mimetype=MEDIA_TYPE_TAXII_V21,
)
@application_instance.errorhandler(ProcessingError)
def handle_processing_error(error):
e = {
"title": str(error.__class__.__name__),
"http_status": str(error.status),
"description": str(error),
}
return Response(
response=json.dumps(e),
status=error.status,
headers=getattr(error, "headers", None),
mimetype=MEDIA_TYPE_TAXII_V21,
)
@application_instance.errorhandler(BackendError)
def handle_backend_error(error):
e = {
"title": str(error.__class__.__name__),
"http_status": str(error.status),
"description": str(error),
}
return Response(
response=json.dumps(e),
status=error.status,
mimetype=MEDIA_TYPE_TAXII_V21,
)
|
py | 7dffd898cbe586edb8a7f3ce5f8993b26137366b | from miscSupports import load_json, write_json, flatten
from collections import Counter
from pathlib import Path
class WeightExternal:
def __init__(self, external_data_path, weights_path, date_max, delimiter="__"):
# Load the external data
assert Path(external_data_path).exists(), "Path to external data is invalid"
self.database = load_json(external_data_path)
# The delimiter to access GID and the end date for weighting
self.delimiter = delimiter
self._user_end_date = date_max
# Create a GID: Place lookup dict to aid extraction of data
self.searcher = {place.split(self.delimiter)[0]: place for place in self.database.keys()}
# The unique attributes from all places
self.attributes = list(set([attr for place in self.database.keys() for attr in self.database[place].keys()
if isinstance(self.database[place][attr], dict)]))
# The weight dates created via AssignWeights
self._weights_dates = load_json(weights_path)
# Output json's of the master weighting database as well as a non_common to aid finding weight errors
self._master = {}
self._non_common = {place_name: {} for place_name in self._weights_dates}
def weight_external(self, write_path, write_name="Weighted"):
"""
This will use all the places and weights from the weights by dates file, and use it to weight an external data
source.
"""
for place_name in self._weights_dates:
# See how many changes exist for this place
dates_of_change = [date for date in self._weights_dates[place_name].keys()]
# If there is only one date, we have no weighting to do as the place remains unchanged from its first state
if (len(dates_of_change) == 1) and self.extract_data(place_name):
self._master[place_name] = self.extract_data(place_name)
# Otherwise we need to weight the data, and potentially consider non-common dates across places
else:
self._master[place_name] = self._weight_place(place_name, dates_of_change)
# Write out the weighted data
print("Finished constructing weights - writing to file")
write_json(self._master, write_path, write_name)
if len(self._non_common.keys()) > 0:
write_non_common = {key: value for key, value in self._non_common.items() if len(value) > 0}
write_json(write_non_common, write_path, "NonCommonDates")
def extract_data(self, place):
"""
Check to see if the database contains a given place
"""
try:
return self.database[self._search_name(place)]
except KeyError:
return None
def _search_name(self, place_name):
"""
Extract the key from a place_name, and use that to get the data key from the using this place_name as key in the
search_key dict
"""
return self.searcher[place_name.split(self.delimiter)[0]]
def _weight_place(self, place_name, dates_of_change):
"""
Use weights and dates from a combination of ConstructWeights and AssignWeights to create a weight value set for
a given place.
If a place changes over time, then we need to weight data based on those changes. If there is only a single
place, for example a place used to be 50% larger so in a previous change it represents 50% of the values, then
we just need to weight all the values by that weight.
If the place used to made up of multiple places, then we need to weight each place by that weight, and then sum
the values. The later has more problems associated with it, as you need data in ALL of the places otherwise you
end up with uncommon dates where you cannot weight a certain district because you don't have all the data you
need.
:param place_name: The place we wish to construct weights for
:type place_name: str
:param dates_of_change: The yyyymmdd dates of changes
:type dates_of_change: list
:return: A dict of all the weighted values for all the attributes found for this place
:rtype: dict
"""
place_dict = {attr: {} for attr in self.attributes}
# For each change that occurs in this place
for index, date in enumerate(dates_of_change, 1):
# Set the date min and max dates for the current date change
date_min, date_max = self._set_min_max_date(index, date, dates_of_change)
# If there is only one place for this change, then we just need to weight the values relevant to the dates
if len(self._weights_dates[place_name][date]) == 1:
self._weight_single(place_name, date, place_dict, date_min, date_max)
# Otherwise we have to make sure all places, have all dates, and sum weighted values appropriately .
else:
self._weight_multiple(place_name, date, place_dict, date_min, date_max)
return place_dict
def _set_min_max_date(self, index, date, dates_range):
"""
If we have reached the last date provided, then set max date to be the max provided by the user, otherwise
create a time period from the current date and the next date
"""
if index < len(dates_range):
return int(date), int(dates_range[index])
else:
return int(date), int(self._user_end_date)
def _weight_single(self, place_name, weight_date, place_dict, date_min, date_max):
"""
Extract data from a single location and weight the values between a min and max date
If there is only a single place, then we don't need to worry about non-common dates across weight places and we
can just weight the values of each attribute and append them to our place dict as long as the database contains
information about this place.
:param place_name: The current name of the place we are constructing weights for
:type place_name: str
:param weight_date: The current date of change to load weights from
:type weight_date: str
:param place_dict: The storage dict for all the data from this place which will be appended to the master json
:type place_dict: dict
:param date_min: The start date of this weight
:type date_min: str | int
:param date_max: The end date of this weight
:param date_max: str | int
:return: Nothing, append weight values per date with the date range to the place_dict then stop
:rtype: None
"""
# Extract the place_names place key and weight for this place
place_key, weight = self._extract_weight_place(self._weights_dates[place_name][weight_date])
# If the database contains information about this place
if self.extract_data(place_key):
# For each unique attribute
for attr in self.attributes:
# Isolate the data from the database for this place's attribute
try:
data = self.extract_data(place_key)[attr]
# Assign the weight value for this date for this attribute to the place json database dict
for date, value in data.items():
if date_min <= int(date) < date_max:
place_dict[attr][int(date)] = self.calculate_weight(value, weight)
# If the attribute doesn't exist for this place, pass
except KeyError:
pass
# Warn the user that we have failed to find a location, so it will be missing
else:
print(f"Warning: No data found for {place_key}")
@staticmethod
def _extract_weight_place(date_data):
"""
Extract the places and weights for a given date for a given place in weight_dates
"""
weight_places = [place for place in date_data.keys()]
weight_list = [weight for weight in date_data.values()]
if (len(weight_places) == 1) and (len(weight_list) == 1):
return weight_places[0], weight_list[0]
else:
return weight_places, weight_list
@staticmethod
def calculate_weight(value, weight):
"""
A weight value is equal to the value * (percentage weight / 100)
Note
-----
Weights in weightGIS are storage as if out of 100.00. so 50% is 50.00. When weighting we need to multiple by
a decimal so the weight is restructured back into a decimal form, for example 0.5, by dividing all the weights
by 100.
"""
if isinstance(value, str):
return value
else:
return value * (weight / 100)
def _weight_multiple(self, place_name, weight_date, place_dict, date_min, date_max):
"""
weight a places values based on weights of multiple places
If there are multiple places in this change period, then we need to extract the data and check that we have the
same dates for each place in this change. If we do, we can sum the weighted values for each date.
:param place_name: The current name of the place we are constructing weights for
:type place_name: str
:param weight_date: The current date of change to load weights from
:type weight_date: str
:param place_dict: The storage dict for all the data from this place which will be appended to the master json
:type place_dict: dict
:param date_min: The start date of this weight
:type date_min: str | int
:param date_max: The end date of this weight
:param date_max: str | int
:return: Nothing, append weight values per date with the date range to the place_dict then stop
:rtype: None
:raise AssertionError: If the len of validate dates is not equal to the number of places after trying to pass
only places within common attributes
"""
# Extract the places and weights involved in this change
weight_places, weights = self._extract_weight_place(self._weights_dates[place_name][weight_date])
# Determine if we have data for each place
all_valid = [self.extract_data(place) for place in weight_places if self.extract_data(place)]
# If we have data for both places
if len(all_valid) == len(weight_places):
for attr in self.attributes:
try:
# Not all places will have the same attributes, This should be picked up by a KeyError but if not
# the assertion should catch it
validate_dates = [self.extract_data(place)[attr] for place in weight_places]
assert len(validate_dates) == len(weight_places), "Critical Error: KeyError failed to catch " \
"missing attribute"
# Extract all the common dates
dates_list = self._extract_usable_dates(attr, date_min, date_max, weight_places, place_name)
# Use these dates to create a set of weight values
weight_values = [self._weight_summation(date, attr, weight_places, weights) for date in dates_list]
# Assign the weighted values to the dates
for date, value in zip(dates_list, weight_values):
place_dict[attr][int(date)] = value
except KeyError:
pass
else:
print(f"Warning: Found {len(all_valid)} out of {len(weight_places)} places for {place_name}'s weighted "
f"places of: {weight_places}\n : Data from {weight_date}-{date_max} will be dropped\n")
def _extract_usable_dates(self, attr, date_min, date_max, weight_places, place_name):
"""
Determine if all required dates are present and return the common dates between places. If the location does not
contain common dates, save the location date errors to a separate json
If we have multiple places, not all places may have the same dates. We cannot create a weighted value from
multiple places if some of those places are missing. So in these cases, nothing is written to the dataset, and
a line explaining what was missing is written out to a csv file. If we do have common dates dates for a given
place, then we return these dates which will be indexed to extract the values associated with them
:param attr: The current attribute we are weight values for
:type attr: str
:param date_min: The start date of this weight
:type date_min: str | int
:param date_max: The end date of this weight
:param date_max: str | int
:param weight_places: The places that are involved in weighting for this place between date_min and date_max
:type weight_places: list
:param place_name: The current name of the place we are constructing weights for
:type place_name: str
:return: All the common dates for the weight_places involved in this place
:rtype: list
"""
# Isolate all the dates for all the weights places in place_list
dates_list = flatten([list(self.extract_data(place)[attr].keys()) for place in weight_places])
# Keep the dates within the time range we are looking for
dates_list = [int(date) for date in dates_list if date_min <= int(date) < date_max]
# Count each occurrence of a date to insure we have the same number in all places
dates_dict = Counter(dates_list)
# If we have any non_common dates, we can't use this date for weighting as we won't have data in all of the
# places involved in the weight
non_common_dates = [date for date in dates_dict if dates_dict[date] != len(weight_places)]
if len(non_common_dates) > 0:
# Write out this information for users so they can fix their raw data
self._non_common[place_name][attr] = {"Places": weight_places, "Target": len(weight_places),
"Dates": {d: dates_dict[d] for d in non_common_dates}}
print(f"Warning: Non Common dates found for {attr} in {weight_places}")
# Return common dates list
return sorted([date for date in dates_dict if date not in non_common_dates])
def _weight_summation(self, date, attr, place_list, weights):
"""
Create a summed value from all the weights of the places
"""
# Isolate the raw value from each place
values = [self.extract_data(place)[attr][str(date)] for place in place_list]
# Weight these values
weighted_values = [self.calculate_weight(value, weight) for value, weight in zip(values, weights)]
# Return them if they are all ints or floats, else return NA
if all(isinstance(v, (int, float)) for v in weighted_values):
return sum(weighted_values)
else:
return "NA"
|
py | 7dffd8a4dc8db0d3e5bf80564a8853fe91019822 | import os
import sys
import json
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def writelines(self, datas):
self.stream.writelines(datas)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
sys.stdout = Unbuffered(sys.stdout)
print("Analysing....")
with open('base_classification.json') as json_file:
system = json.load(json_file)
system["epochs"] = int(system["epochs"]);
if(system["freeze_base_model"] == "yes"):
system["freeze_base_model"] = True;
else:
system["freeze_base_model"] = False;
if(system["val"] == "yes"):
system["val"] = True;
else:
system["val"] = False;
sys.path.append("monk_v1/monk/");
if(system["backend"] == "Mxnet-1.5.1"):
from gluon_prototype import prototype
elif(system["backend"] == "Pytorch-1.3.1"):
from pytorch_prototype import prototype
elif(system["backend"] == "Keras-2.2.5_Tensorflow-1"):
from keras_prototype import prototype
ptf = prototype(verbose=1);
ptf.Prototype(system["project"], system["experiment"]);
if(system["structuretype"] == "foldered"):
if(system["val"]):
ptf.Default(dataset_path=[system["traindata"]["dir"], system["valdata"]["dir"]],
model_name=system["model"],
freeze_base_network=system["freeze_base_model"],
num_epochs=system["epochs"]);
else:
ptf.Default(dataset_path=system["traindata"]["dir"],
model_name=system["model"],
freeze_base_network=system["freeze_base_model"],
num_epochs=system["epochs"]);
else:
if(system["val"]):
ptf.Default(dataset_path=[system["traindata"]["cdir"], system["valdata"]["cdir"]],
path_to_csv=[system["traindata"]["csv"], system["valdata"]["csv"]],
model_name=system["model"],
freeze_base_network=system["freeze_base_model"],
num_epochs=system["epochs"]);
else:
ptf.Default(dataset_path=system["traindata"]["cdir"],
path_to_csv=system["traindata"]["csv"],
model_name=system["model"],
freeze_base_network=system["freeze_base_model"],
num_epochs=system["epochs"]);
dataset_reload_status = False;
if(system["update"]["input_size"]["active"]):
dataset_reload_status = True;
ptf.update_input_size(int(system["update"]["input_size"]["value"]));
if(system["update"]["batch_size"]["active"]):
dataset_reload_status = True;
ptf.update_batch_size(int(system["update"]["batch_size"]["value"]));
if(system["update"]["shuffle_data"]["active"]):
dataset_reload_status = True;
if(system["update"]["shuffle_data"]["value"] == "True"):
ptf.update_shuffle_data(True);
else:
ptf.update_shuffle_data(False);
if(system["update"]["num_processors"]["active"]):
dataset_reload_status = True;
ptf.update_num_processors(int(system["update"]["num_processors"]["value"]));
if(system["update"]["trainval_split"]["active"]):
dataset_reload_status = True;
ptf.update_trainval_split(float(system["update"]["trainval_split"]["value"]));
if(system["update"]["transforms"]["active"]):
dataset_reload_status = True;
ptf.reset_transforms();
ptf.reset_transforms(test=True);
if(system["backend"] == "Mxnet-1.5.1"):
for i in range(len(system["update"]["transforms"]["value"])):
name = system["update"]["transforms"]["value"][i]["name"];
params = system["update"]["transforms"]["value"][i]["params"];
if(params["train"] == "True"):
train = True;
else:
train = False;
if(params["val"] == "True"):
val = True;
else:
val = False;
if(params["test"] == "True"):
test = True;
else:
test = False;
if(name == "apply_random_resized_crop"):
ptf.apply_random_resized_crop(
int(params["input_size"]),
scale=(float(params["scale"][0]), float(params["scale"][1])),
ratio=(float(params["ratio"][0]), float(params["ratio"][0])),
train=train, val=val, test=test);
elif(name == "apply_center_crop"):
ptf.apply_center_crop(
int(params["input_size"]),
train=train, val=val, test=test);
elif(name == "apply_color_jitter"):
ptf.apply_color_jitter(
brightness=float(params["brightness"]),
contrast=float(params["contrast"]),
saturation=float(params["saturation"]),
hue=float(params["hue"]),
train=train, val=val, test=test);
elif(name == "apply_random_horizontal_flip"):
ptf.apply_random_horizontal_flip(
probability=float(params["probability"]),
train=train, val=val, test=test);
elif(name == "apply_random_vertical_flip"):
ptf.apply_random_vertical_flip(
probability=float(params["probability"]),
train=train, val=val, test=test);
elif(name == "apply_random_lighting"):
ptf.apply_random_lighting(
alpha=float(params["alpha"]),
train=train, val=val, test=test);
elif(name == "apply_resize"):
ptf.apply_resize(
int(params["input_size"]),
train=train, val=val, test=test);
elif(name == "apply_normalize"):
ptf.apply_normalize(
mean=[float(params["mean"][0]),float(params["mean"][1]), float(params["mean"][2])],
std=[float(params["std"][0]),float(params["std"][1]), float(params["std"][2])],
train=train, val=val, test=test);
elif(system["backend"] == "Pytorch-1.3.1"):
for i in range(len(system["update"]["transforms"]["value"])):
name = system["update"]["transforms"]["value"][i]["name"];
params = system["update"]["transforms"]["value"][i]["params"];
if(params["train"] == "True"):
train = True;
else:
train = False;
if(params["val"] == "True"):
val = True;
else:
val = False;
if(params["test"] == "True"):
test = True;
else:
test = False;
if(name == "apply_center_crop"):
ptf.apply_center_crop(
int(params["input_size"]),
train=train, val=val, test=test);
elif(name == "apply_color_jitter"):
ptf.apply_color_jitter(
brightness=float(params["brightness"]),
contrast=float(params["contrast"]),
saturation=float(params["saturation"]),
hue=float(params["hue"]),
train=train, val=val, test=test);
elif(name == "apply_random_affine"):
ptf.apply_random_affine(
float(params["degrees"]),
translate=(float(params["translate"][0]), float(params["translate"][1])),
scale=(float(params["scale"][0]), float(params["scale"][1])),
shear=(float(params["shear"][0]), float(params["shear"][1])),
train=train, val=val, test=test);
elif(name == "apply_random_crop"):
ptf.apply_random_crop(
int(params["input_size"]),
train=train, val=val, test=test);
elif(name == "apply_random_horizontal_flip"):
ptf.apply_random_horizontal_flip(
probability=float(params["probability"]),
train=train, val=val, test=test);
elif(name == "apply_random_perspective"):
ptf.apply_random_perspective(
distortion_scale=float(params["distortion_scale"]),
probability=float(params["probability"]),
train=train, val=val, test=test);
elif(name == "apply_random_resized_crop"):
ptf.apply_random_resized_crop(
int(params["input_size"]),
scale=(float(params["scale"][0]), float(params["scale"][1])),
ratio=(float(params["ratio"][0]), float(params["ratio"][0])),
train=train, val=val, test=test);
elif(name == "apply_random_rotation"):
ptf.apply_random_rotation(
float(params["degrees"]),
train=train, val=val, test=test);
elif(name == "apply_random_vertical_flip"):
ptf.apply_random_vertical_flip(
probability=float(params["probability"]),
train=train, val=val, test=test);
elif(name == "apply_resize"):
ptf.apply_resize(
int(params["input_size"]),
train=train, val=val, test=test);
elif(name == "apply_normalize"):
ptf.apply_normalize(
mean=[float(params["mean"][0]),float(params["mean"][1]), float(params["mean"][2])],
std=[float(params["std"][0]),float(params["std"][1]), float(params["std"][2])],
train=train, val=val, test=test);
elif(system["backend"] == "Keras-2.2.5_Tensorflow-1"):
for i in range(len(system["update"]["transforms"]["value"])):
name = system["update"]["transforms"]["value"][i]["name"];
params = system["update"]["transforms"]["value"][i]["params"];
if(params["train"] == "True"):
train = True;
else:
train = False;
if(params["val"] == "True"):
val = True;
else:
val = False;
if(params["test"] == "True"):
test = True;
else:
test = False;
if(name == "apply_color_jitter"):
ptf.apply_color_jitter(
brightness=float(params["brightness"]),
contrast=float(params["contrast"]),
saturation=float(params["saturation"]),
hue=float(params["hue"]),
train=train, val=val, test=test);
elif(name == "apply_random_affine"):
ptf.apply_random_affine(
float(params["degrees"]),
translate=(float(params["translate"][0]), float(params["translate"][1])),
scale=(float(params["scale"][0]), float(params["scale"][1])),
shear=(float(params["shear"][0]), float(params["shear"][1])),
train=train, val=val, test=test);
elif(name == "apply_random_horizontal_flip"):
ptf.apply_random_horizontal_flip(
probability=float(params["probability"]),
train=train, val=val, test=test);
elif(name == "apply_random_vertical_flip"):
ptf.apply_random_vertical_flip(
probability=float(params["probability"]),
train=train, val=val, test=test);
elif(name == "apply_random_rotation"):
ptf.apply_random_rotation(
float(params["degrees"]),
train=train, val=val, test=test);
elif(name == "apply_mean_subtraction"):
ptf.apply_mean_subtraction(
mean=[float(params["mean"][0]),float(params["mean"][1]), float(params["mean"][2])],
train=train, val=val, test=test);
elif(name == "apply_normalize"):
ptf.apply_normalize(
mean=[float(params["mean"][0]),float(params["mean"][1]), float(params["mean"][2])],
std=[float(params["std"][0]),float(params["std"][1]), float(params["std"][2])],
train=train, val=val, test=test);
if(dataset_reload_status):
ptf.Reload();
model_reload_status = False;
if(system["update"]["model_name"]["active"]):
model_reload_status = True;
ptf.update_model_name(system["update"]["model_name"]["value"]);
if(system["update"]["use_gpu"]["active"]):
model_reload_status = True;
if(system["update"]["use_gpu"]["value"] == "True"):
ptf.update_use_gpu(True);
else:
ptf.update_use_gpu(False);
if(system["update"]["use_pretrained"]["active"]):
model_reload_status = True;
if(system["update"]["use_pretrained"]["value"] == "True"):
ptf.update_use_pretrained(True);
else:
ptf.update_use_pretrained(False);
if(system["update"]["freeze_base_network"]["active"]):
model_reload_status = True;
if(system["update"]["freeze_base_network"]["value"] == "True"):
ptf.update_freeze_base_network(True);
else:
ptf.update_freeze_base_network(False);
if(system["update"]["freeze_layers"]["active"]):
model_reload_status = True;
ptf.update_freeze_layers(int(system["update"]["freeze_layers"]["value"]));
if(system["update"]["layers"]["active"]):
model_reload_status = True;
if(system["backend"] == "Mxnet-1.5.1"):
for i in range(len(system["update"]["layers"]["value"])):
name = system["update"]["layers"]["value"][i]["name"];
params = system["update"]["layers"]["value"][i]["params"];
if(params["final"] == "Yes"):
final_layer = True;
else:
final_layer = False;
if(name == "append_linear"):
if(final_layer):
ptf.append_linear(
final_layer=final_layer);
else:
ptf.append_linear(
num_neurons=int(params["neurons"]),
final_layer=final_layer);
elif(name == "append_dropout"):
ptf.append_dropout(
probability=float(params["probability"]),
final_layer=final_layer);
elif(name == "relu"):
ptf.append_relu(
final_layer=final_layer);
elif(name == "sigmoid"):
ptf.append_sigmoid(
final_layer=final_layer);
elif(name == "tanh"):
ptf.append_tanh(
final_layer=final_layer);
elif(name == "softpllus"):
ptf.append_softplus(
beta=float(params["beta"]),
threshold=float(params["threshold"]),
final_layer=final_layer);
elif(name == "softsign"):
ptf.append_softsign(
final_layer=final_layer);
elif(name == "elu"):
ptf.append_elu(
alpha=float(params["alpha"]),
final_layer=final_layer);
elif(name == "leaky_relu"):
ptf.append_leakyrelu(
negative_slope=float(params["negative_slope"]),
final_layer=final_layer);
elif(name == "prelu"):
ptf.append_prelu(
num_parameters=1,
init=float(params["init"]),
final_layer=final_layer);
elif(name == "selu"):
ptf.append_selu(
final_layer=final_layer);
elif(name == "swish"):
ptf.append_swish(
beta=float(params["beta"]),
final_layer=final_layer);
elif(system["backend"] == "Pytorch-1.3.1"):
for i in range(len(system["update"]["layers"]["value"])):
name = system["update"]["layers"]["value"][i]["name"];
params = system["update"]["layers"]["value"][i]["params"];
if(params["final"] == "Yes"):
final_layer = True;
else:
final_layer = False;
if(name == "append_linear"):
if(final_layer):
ptf.append_linear(
final_layer=final_layer);
else:
ptf.append_linear(
num_neurons=int(params["neurons"]),
final_layer=final_layer);
elif(name == "append_dropout"):
ptf.append_dropout(
probability=float(params["probability"]),
final_layer=final_layer);
elif(name == "relu"):
ptf.append_relu(
final_layer=final_layer);
elif(name == "sigmoid"):
ptf.append_sigmoid(
final_layer=final_layer);
elif(name == "tanh"):
ptf.append_tanh(
final_layer=final_layer);
elif(name == "softplus"):
ptf.append_softplus(
beta=float(params["beta"]),
threshold=float(params["threshold"]),
final_layer=final_layer);
elif(name == "softsign"):
ptf.append_softsign(
final_layer=final_layer);
elif(name == "elu"):
ptf.append_elu(
alpha=float(params[alpha]),
final_layer=final_layer);
elif(name == "leaky_relu"):
ptf.append_leakyrelu(
negative_slope=float(params["negative_slope"]),
final_layer=final_layer);
elif(name == "prelu"):
ptf.append_prelu(
num_parameters=1,
init=float(params["init"]),
final_layer=final_layer);
elif(name == "selu"):
ptf.append_selu(
final_layer=final_layer);
elif(name == "hardshrink"):
ptf.append_hardshrink(
lambd=float(params["lambd"]),
final_layer=final_layer);
elif(name == "hardtanh"):
ptf.append_hardtanh(
min_val=float(params["min_val"]),
max_val=float(params["max_val"]),
final_layer=final_layer);
elif(name == "logsigmoid"):
ptf.append_logsigmoid(
final_layer=final_layer);
elif(name == "relu6"):
ptf.append_relu6(
final_layer=final_layer);
elif(name == "rrelu"):
ptf.append_rrelu(
lower=float(params["lower"]),
upper=float(params["upper"]),
final_layer=final_layer);
elif(name == "celu"):
ptf.append_celu(
alpha=float(params["alpha"]),
final_layer=final_layer);
elif(name == "softshrink"):
ptf.append_softshrink(
lambd=float(params["lambd"]),
final_layer=final_layer);
elif(name == "tanhshrink"):
ptf.append_tanhshrink(
final_layer=final_layer);
elif(name == "threshold"):
ptf.append_threshold(
float(params["threshold"]),
float(params["value"]),
final_layer=final_layer)
elif(name == "softmin"):
ptf.append_softmin(
final_layer=final_layer);
elif(name == "softmax"):
ptf.append_softmax(
final_layer=final_layer);
elif(name == "logsoftmax"):
ptf.append_logsoftmax(
final_layer=final_layer);
elif(system["backend"] == "Keras-2.2.5_Tensorflow-1"):
for i in range(len(system["update"]["layers"]["value"])):
name = system["update"]["layers"]["value"][i]["name"];
params = system["update"]["layers"]["value"][i]["params"];
if(params["final"] == "Yes"):
final_layer = True;
else:
final_layer = False;
if(name == "append_linear"):
if(final_layer):
ptf.append_linear(
final_layer=final_layer);
else:
ptf.append_linear(
num_neurons=int(params["neurons"]),
final_layer=final_layer);
elif(name == "append_dropout"):
ptf.append_dropout(
probability=float(params["probability"]),
final_layer=final_layer);
elif(name == "relu"):
ptf.append_relu(
final_layer=final_layer);
elif(name == "elu"):
ptf.append_elu(
alpha=float(params[alpha]),
final_layer=final_layer);
elif(name == "leaky_relu"):
ptf.append_leakyrelu(
negative_slope=float(params["negative_slope"]),
final_layer=final_layer);
elif(name == "prelu"):
ptf.append_prelu(
num_parameters=1,
init=float(params["init"]),
final_layer=final_layer);
elif(name == "threshold"):
ptf.append_threshold(
float(params["threshold"]),
float(params["value"]),
final_layer=final_layer);
elif(name == "softmax"):
ptf.append_softmax(
final_layer=final_layer);
elif(name == "selu"):
ptf.append_selu(
final_layer=final_layer);
elif(name == "softplus"):
ptf.append_softplus(
beta=float(params["beta"]),
threshold=float(params["threshold"]),
final_layer=final_layer);
elif(name == "softsign"):
ptf.append_softsign(
final_layer=final_layer);
elif(name == "tanh"):
ptf.append_tanh(
final_layer=final_layer);
elif(name == "sigmoid"):
ptf.append_sigmoid(
final_layer=final_layer);
if(model_reload_status):
ptf.Reload();
train_reload_status = False;
if(system["update"]["realtime_progress"]["active"]):
train_reload_status = True;
if(system["update"]["realtime_progress"]["value"] == "True"):
ptf.update_display_progress_realtime(True);
else:
ptf.update_display_progress_realtime(False);
if(system["update"]["progress"]["active"]):
train_reload_status = True;
if(system["update"]["progress"]["value"] == "True"):
ptf.update_display_progress(True);
else:
ptf.update_display_progress(False);
if(system["update"]["save_intermediate"]["active"]):
train_reload_status = True;
if(system["update"]["save_intermediate"]["value"] == "True"):
ptf.update_save_intermediate_models(True);
else:
ptf.update_save_intermediate_models(False);
if(system["update"]["save_logs"]["active"]):
train_reload_status = True;
if(system["update"]["save_logs"]["value"] == "True"):
ptf.update_save_training_logs(True);
else:
ptf.update_save_training_logs(False);
if(train_reload_status):
ptf.Reload();
optimizer_reload_status = False;
if(system["update"]["optimizers"]["active"]):
optimizer_reload_status = True;
if(system["backend"] == "Mxnet-1.5.1"):
name = system["update"]["optimizers"]["value"]["name"];
params = system["update"]["optimizers"]["value"]["params"];
if(name == "optimizer_sgd"):
ptf.optimizer_sgd(
float(params["learning_rate"]),
momentum=float(params["momentum"]),
weight_decay=float(params["weight_decay"]));
elif(name == "optimizer_nesterov_sgd"):
ptf.optimizer_nesterov_sgd(
float(params["learning_rate"]),
momentum=float(params["momentum"]),
weight_decay=float(params["weight_decay"]));
elif(name == "optimizer_rmsprop"):
ptf.optimizer_rmsprop(
float(params["learning_rate"]),
decay_rate=float(params["decay_rate"]),
epsilon=float(params["epsilon"]),
weight_decay=float(params["weight_decay"]));
elif(name == "optimizer_momentum_rmsprop"):
ptf.optimizer_momentum_rmsprop(
float(params["learning_rate"]),
momentum=float(params["momentum"]),
weight_decay=float(params["weight_decay"]));
elif(name == "optimizer_adam"):
ptf.optimizer_adam(
float(params["learning_rate"]),
beta1=float(params["beta1"]),
beta2=float(params["beta2"]),
epsilon=float(params["epsilon"]),
weight_decay=float(params["weight_decay"]));
elif(name == "optimizer_adamax"):
ptf.optimizer_adamax(
float(params["learning_rate"]),
beta1=float(params["beta1"]),
beta2=float(params["beta2"]),
epsilon=float(params["epsilon"]),
weight_decay=float(params["weight_decay"]))
elif(name == "optimizer_nesterov_adam"):
if(params["amsgrad"] == "Yes"):
amsgrad = True;
else:
amsgrad = False;
ptf.optimizer_nesterov_adam(
float(params["learning_rate"]),
beta1=float(params["beta1"]),
beta2=float(params["beta2"]),
epsilon=float(params["epsilon"]),
weight_decay=float(params["weight_decay"]),
amsgrad=amsgrad)
elif(name == "optimizer_adagrad"):
ptf.optimizer_adagrad(
float(params["learning_rate"]),
learning_rate_decay=float(params["learning_rate_decay"]),
epsilon=float(params["epsilon"]),
weight_decay=float(params["weight_decay"]))
elif(name == "optimizer_adadelta"):
ptf.optimizer_adadelta(
float(params["learning_rate"]),
rho=float(params["rho"]),
epsilon=float(params["epsilon"]),
weight_decay=float(params["weight_decay"]));
elif(system["backend"] == "Pytorch-1.3.1"):
name = system["update"]["optimizers"]["value"]["name"];
params = system["update"]["optimizers"]["value"]["params"];
if(name == "optimizer_sgd"):
ptf.optimizer_sgd(
float(params["learning_rate"]),
momentum=float(params["momentum"]),
weight_decay=float(params["weight_decay"]));
elif(name == "optimizer_nesterov_sgd"):
ptf.optimizer_nesterov_sgd(
float(params["learning_rate"]),
momentum=float(params["momentum"]),
weight_decay=float(params["weight_decay"]));
elif(name == "optimizer_rmsprop"):
ptf.optimizer_rmsprop(
float(params["learning_rate"]),
decay_rate=float(params["decay_rate"]),
epsilon=float(params["epsilon"]),
weight_decay=float(params["weight_decay"]));
elif(name == "optimizer_momentum_rmsprop"):
ptf.optimizer_momentum_rmsprop(
float(params["learning_rate"]),
momentum=float(params["momentum"]),
weight_decay=float(params["weight_decay"]));
elif(name == "optimizer_adam"):
ptf.optimizer_adam(
float(params["learning_rate"]),
beta1=float(params["beta1"]),
beta2=float(params["beta2"]),
epsilon=float(params["epsilon"]),
weight_decay=float(params["weight_decay"]));
elif(name == "optimizer_adamax"):
ptf.optimizer_adamax(
float(params["learning_rate"]),
beta1=float(params["beta1"]),
beta2=float(params["beta2"]),
epsilon=float(params["epsilon"]),
weight_decay=float(params["weight_decay"]))
elif(name == "optimizer_adamw"):
if(params["amsgrad"] == "Yes"):
amsgrad = True;
else:
amsgrad = False;
ptf.optimizer_adamw(
float(params["learning_rate"]),
beta1=float(params["beta1"]),
beta2=float(params["beta2"]),
weight_decay=float(params["weight_decay"]),
amsgrad=amsgrad)
elif(name == "optimizer_adagrad"):
ptf.optimizer_adagrad(
float(params["learning_rate"]),
learning_rate_decay=float(params["learning_rate_decay"]),
epsilon=float(params["epsilon"]),
weight_decay=float(params["weight_decay"]))
elif(name == "optimizer_adadelta"):
ptf.optimizer_adadelta(
float(params["learning_rate"]),
rho=float(params["rho"]),
epsilon=float(params["epsilon"]),
weight_decay=float(params["weight_decay"]));
elif(system["backend"] == "Keras-2.2.5_Tensorflow-1"):
name = system["update"]["optimizers"]["value"]["name"];
params = system["update"]["optimizers"]["value"]["params"];
if(name == "optimizer_sgd"):
ptf.optimizer_sgd(
float(params["learning_rate"]),
momentum=float(params["momentum"]),
weight_decay=float(params["weight_decay"]));
elif(name == "optimizer_nesterov_sgd"):
ptf.optimizer_nesterov_sgd(
float(params["learning_rate"]),
momentum=float(params["momentum"]),
weight_decay=float(params["weight_decay"]));
elif(name == "optimizer_rmsprop"):
ptf.optimizer_rmsprop(
float(params["learning_rate"]),
decay_rate=float(params["decay_rate"]),
epsilon=float(params["epsilon"]),
weight_decay=float(params["weight_decay"]));
elif(name == "optimizer_adam"):
ptf.optimizer_adam(
float(params["learning_rate"]),
beta1=float(params["beta1"]),
beta2=float(params["beta2"]),
epsilon=float(params["epsilon"]),
weight_decay=float(params["weight_decay"]));
elif(name == "optimizer_nesterov_adam"):
if(params["amsgrad"] == "Yes"):
amsgrad = True;
else:
amsgrad = False;
ptf.optimizer_nesterov_adam(
float(params["learning_rate"]),
beta1=float(params["beta1"]),
beta2=float(params["beta2"]),
epsilon=float(params["epsilon"]),
weight_decay=float(params["weight_decay"]),
amsgrad=amsgrad)
elif(name == "optimizer_adamax"):
ptf.optimizer_adamax(
float(params["learning_rate"]),
beta1=float(params["beta1"]),
beta2=float(params["beta2"]),
epsilon=float(params["epsilon"]),
weight_decay=float(params["weight_decay"]))
elif(name == "optimizer_adagrad"):
ptf.optimizer_adagrad(
float(params["learning_rate"]),
learning_rate_decay=float(params["learning_rate_decay"]),
epsilon=float(params["epsilon"]),
weight_decay=float(params["weight_decay"]))
elif(name == "optimizer_adadelta"):
ptf.optimizer_adadelta(
float(params["learning_rate"]),
rho=float(params["rho"]),
epsilon=float(params["epsilon"]),
weight_decay=float(params["weight_decay"]));
if(optimizer_reload_status):
ptf.Reload();
scheduler_reload_status = False;
if(system["update"]["schedulers"]["active"]):
scheduler_reload_status = True;
if(system["backend"] == "Mxnet-1.5.1"):
name = system["update"]["schedulers"]["value"]["name"];
params = system["update"]["schedulers"]["value"]["params"];
if(name == "lr_fixed"):
ptf.lr_fixed();
elif(name == "lr_step_decrease"):
ptf.lr_step_decrease(
params["step_size"],
gamma=float(params["gamma"]));
elif(name == "lr_multistep_decrease"):
milestones = params["milestones"].split(",");
for i in range(len(milestones)):
milestones[i] = int(milestones[i]);
ptf.lr_multistep_decrease(
milestones,
gamma=float(params["gamma"]));
elif(system["backend"] == "Pytorch-1.3.1"):
name = system["update"]["schedulers"]["value"]["name"];
params = system["update"]["schedulers"]["value"]["params"];
if(name == "lr_fixed"):
ptf.lr_fixed();
elif(name == "lr_step_decrease"):
ptf.lr_step_decrease(
params["step_size"],
gamma=float(params["gamma"]));
elif(name == "lr_multistep_decrease"):
milestones = params["milestones"].split(",");
for i in range(len(milestones)):
milestones[i] = int(milestones[i]);
ptf.lr_multistep_decrease(
milestones,
gamma=float(params["gamma"]));
elif(name == "lr_exponential_decrease"):
ptf.lr_exponential_decrease(
float(params["gamma"]));
elif(name == "lr_plateau_decrease"):
lr_plateau_decrease(
mode=params["mode"].lower(),
factor=float(params["factor"]),
patience=int(params["patience"]),
verbose=True,
threshold=float(params["threshold"]),
min_lr=float(params["min_lr"]));
elif(system["backend"] == "Keras-2.2.5_Tensorflow-1"):
name = system["update"]["schedulers"]["value"]["name"];
params = system["update"]["schedulers"]["value"]["params"];
if(name == "lr_fixed"):
ptf.lr_fixed();
elif(name == "lr_step_decrease"):
ptf.lr_step_decrease(
params["step_size"],
gamma=float(params["gamma"]));
elif(name == "lr_exponential_decrease"):
ptf.lr_exponential_decrease(
float(params["gamma"]));
elif(name == "lr_plateau_decrease"):
ptf.lr_plateau_decrease(
mode=params["mode"].lower(),
factor=float(params["factor"]),
patience=int(params["patience"]),
verbose=True,
threshold=float(params["threshold"]),
min_lr=float(params["min_lr"]));
if(scheduler_reload_status):
ptf.Reload();
loss_reload_status = False;
if(system["update"]["losses"]["active"]):
scheduler_reload_status = True;
if(system["backend"] == "Mxnet-1.5.1"):
name = system["update"]["losses"]["value"]["name"];
params = system["update"]["losses"]["value"]["params"];
if(name == "loss_l1"):
ptf.loss_l1(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]));
elif(name == "loss_l2"):
ptf.loss_l2(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]));
elif(name == "loss_softmax_crossentropy"):
ptf.loss_softmax_crossentropy(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]));
elif(name == "loss_crossentropy"):
ptf.loss_crossentropy(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]));
elif(name == "loss_sigmoid_binary_crossentropy"):
ptf.loss_sigmoid_binary_crossentropy(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]));
elif(name == "loss_binary_crossentropy"):
ptf.loss_binary_crossentropy(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]));
elif(name == "loss_kldiv"):
if(params["log_pre_applied"] == "Yes"):
log_pre_applied = True;
else:
log_pre_applied = False;
ptf.loss_kldiv(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]),
log_pre_applied=log_pre_applied);
elif(name == "loss_poisson_nll"):
if(params["log_pre_applied"] == "Yes"):
log_pre_applied = True;
else:
log_pre_applied = False;
ptf.loss_poisson_nll(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]),
log_pre_applied=log_pre_applied);
elif(name == "loss_huber"):
ptf.loss_huber(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]),
threshold_for_mean_estimator=float(params["threshold_for_mean_estimator"]));
elif(name == "loss_hinge"):
ptf.loss_hinge(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]),
margin=float(params["margin"]));
elif(name == "loss_squared_hinge"):
ptf.loss_squared_hinge(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]),
margin=float(params["margin"]));
elif(system["backend"] == "Pytorch-1.3.1"):
name = system["update"]["losses"]["value"]["name"];
params = system["update"]["losses"]["value"]["params"];
if(name == "loss_l1"):
ptf.loss_l1(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]));
elif(name == "loss_l2"):
ptf.loss_l2(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]));
elif(name == "loss_softmax_crossentropy"):
ptf.loss_softmax_crossentropy(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]));
elif(name == "loss_crossentropy"):
ptf.loss_crossentropy(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]));
elif(name == "loss_sigmoid_binary_crossentropy"):
ptf.loss_sigmoid_binary_crossentropy(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]));
elif(name == "loss_binary_crossentropy"):
ptf.loss_binary_crossentropy(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]));
elif(name == "loss_kldiv"):
if(params["log_pre_applied"] == "Yes"):
log_pre_applied = True;
else:
log_pre_applied = False;
ptf.loss_kldiv(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]),
log_pre_applied=log_pre_applied);
elif(name == "loss_poisson_nll"):
if(params["log_pre_applied"] == "Yes"):
log_pre_applied = True;
else:
log_pre_applied = False;
ptf.loss_poisson_nll(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]),
log_pre_applied=log_pre_applied);
elif(name == "loss_huber"):
ptf.loss_huber(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]),
threshold_for_mean_estimator=float(params["threshold_for_mean_estimator"]));
elif(name == "loss_hinge"):
ptf.loss_hinge(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]),
margin=float(params["margin"]));
elif(name == "loss_squared_hinge"):
ptf.loss_squared_hinge(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]),
margin=float(params["margin"]));
elif(name == "loss_multimargin"):
ptf.loss_multimargin(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]));
elif(name == "loss_squared_multimargin"):
ptf.loss_squared_multimargin(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]));
elif(name == "loss_multilabel_margin"):
ptf.loss_multilabel_margin(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]));
elif(name == "loss_multilabel_softmargin"):
ptf.loss_multilabel_softmargin(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]));
elif(system["backend"] == "Keras-2.2.5_Tensorflow-1"):
name = system["update"]["losses"]["value"]["name"];
params = system["update"]["losses"]["value"]["params"];
if(name == "loss_l1"):
ptf.loss_l1(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]));
elif(name == "loss_l2"):
ptf.loss_l2(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]));
elif(name == "loss_crossentropy"):
ptf.loss_crossentropy(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]));
elif(name == "loss_binary_crossentropy"):
ptf.loss_binary_crossentropy(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]));
elif(name == "loss_kldiv"):
if(params["log_pre_applied"] == "Yes"):
log_pre_applied = True;
else:
log_pre_applied = False;
ptf.loss_kldiv(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]),
log_pre_applied=log_pre_applied);
elif(name == "loss_hinge"):
ptf.loss_hinge(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]),
margin=float(params["margin"]));
elif(name == "loss_squared_hinge"):
ptf.loss_squared_hinge(
weight=float(params["weight"]),
batch_axis=int(parmas["batch_axis"]),
margin=float(params["margin"]));
if(loss_reload_status):
ptf.Reload();
analysis_name = system["analysis"]["model_list"]["analysis_name"];
tmp_list = system["analysis"]["model_list"]["list"].split(",");
model_lists = [];
for i in range(len(tmp_list)):
tmp_list[i] = tmp_list[i].replace(" ", "");
model_lists.append([tmp_list[i], True, True])
epochs=int(system["analysis"]["model_list"]["epochs"]);
percent_data=int(system["analysis"]["model_list"]["percent"]);
analysis = ptf.Analyse_Models(analysis_name,
model_lists,
percent_data,
num_epochs=epochs,
state="keep_none");
print("Completed"); |
py | 7dffd90379906168d0f421b7508dd1ff57fe60bc | #!/usr/bin/python
# encoding: utf-8
# Este arquivo é chamado pelo make para executar todos os resolvedores e
# verificar se eles estão produzindo respostas corretas.
from same import *
import os
import sys
import subprocess
from time import time
LOG = "testes.log"
def is_executable(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
def listar_dir(dir):
arquivos = []
for dirpath, _, files in os.walk(dir):
for f in files:
f = os.path.join(dirpath, f)
arquivos.append(f)
arquivos.sort()
return arquivos
def encontrar_resolvedores(path):
resolvedores = []
for f in os.listdir(path):
nome = f
f = os.path.join(path, f)
run = os.path.join(f, 'run')
build = os.path.join(f, 'build')
if is_executable(run):
if not is_executable(build):
build = None
resolvedores.append((nome, build, run))
else:
print "Ignorando %s - arquivo nao existe ou nao e executavel." % (run,)
resolvedores.sort()
return resolvedores
def executar_resolvedores(log, resolvedores, casos):
total = len(casos)
for nome, build, run in resolvedores:
if build:
print "Executando %s" % (build,)
if not exec_prog(build):
print "Falha no make. Ignorando %s" % (nome,)
continue
inicio = time()
acertos = 0
for caso in casos:
if executar_resolvedor_caso(log, nome, run, caso):
acertos += 1
tempo = time() - inicio
print "%d/%d acertos (%.2f s)" % (acertos, total, tempo)
log.write("%d/%d acertos (%.2f)\n" % (acertos, total, tempo))
def exec_prog(prog):
try:
retcode = subprocess.call(prog, shell=True)
if retcode < 0:
return False
except OSError as e:
print >> sys.stderr, e
return False
return True
def executar_resolvedor_caso(log, nome, resolvedor, caso):
print resolvedor, caso,
sys.stdout.flush()
log.write('%s %s ' % (resolvedor, caso))
tem_solucao = 'sem-solucao' not in caso
jogo = string_to_jogo(open(caso).read())
ok, resolucao = executar_programa(resolvedor, os.path.abspath(caso))
if not ok:
print 'Fail (veja o arquivo %s)' % (LOG)
log.write('Fail\n')
log.write(' %s\n' % (resolucao))
return False
erros = checar_resolucao(jogo, resolucao, tem_solucao)
if not erros:
print 'OK'
log.write('OK\n')
else:
print 'Fail (veja o arquivo %s)' % (LOG)
log.write('Fail\n')
log.write(' Resolucao\n')
for s in resolucao:
log.write(' %s\n' % (s))
for s in erros:
log.write(' %s\n' % (s))
log.flush()
return not erros
def executar_programa(prog, caso):
try:
# TODO: desviar stderr para o arquivo de log
return True, subprocess.check_output([prog, caso]).split('\n')
except Exception as e:
return False, e
def main():
if len(sys.argv) != 3:
print "Numero de parametros incorreto."
print "Modo de usar: %s diretorio-com-os-resolvedores diretorio-com-os-testes" % (sys.argv[0])
sys.exit(1)
resolvedores = encontrar_resolvedores(sys.argv[1])
if len(resolvedores) == 0:
print "Nenhum resolvedor encontrado em %s" % (sys.argv[1])
sys.exit(1)
casos = listar_dir(sys.argv[2])
if len(casos) == 0:
print "Nenhum caso de teste encontrado em %s" % (sys.argv[2])
sys.exit(1)
with open(LOG, 'w') as log:
executar_resolvedores(log, resolvedores, casos)
if __name__ == '__main__':
main()
|
py | 7dffd919b064ca20437e89a66d270bc4568c06f4 | """
WSGI config for manipulator project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'manipulator.settings')
application = get_wsgi_application()
|
py | 7dffd920ce887063e2931ff46dbc0cfa5f41873f | """Different strategies for generating node2vec walks."""
import numpy as np
from gensim.models import Word2Vec
from numba import njit
from numba import prange
from numba.np.ufunc.parallel import _get_thread_id
from numba_progress import ProgressBar
from .graph import BaseGraph
from .rw import DenseRWGraph
from .rw import SparseRWGraph
from .typing import Embeddings
from .typing import Float32Array
from .typing import HasNbrs
from .typing import List
from .typing import MoveForward
from .typing import Optional
from .typing import Uint32Array
from .typing import Uint64Array
from .wrappers import Timer
class Base(BaseGraph):
"""Base node2vec object.
This base object provides the skeleton for the node2vec walk algorithm,
which consists of the ``simulate_walks`` method that generate node2vec
random walks. In contrast to the original Python implementaion of node2vec,
it is prallelized where each process generate walks independently.
Note:
The ``preprocess_transition_probs`` is required for implenetations that
precomputes and store 2nd order transition probabilities.
Examples:
Generate node2vec embeddings
>>> from pecanpy import pecanpy as node2vec
>>>
>>> # initialize node2vec object, similarly for SparseOTF and DenseOTF
>>> g = node2vec.PreComp(p=0.5, q=1, workers=4, verbose=True)
>>> # alternatively, can specify ``extend=True`` for using node2vec+
>>>
>>> # load graph from edgelist file
>>> g.read_edg(path_to_edg_file, weighted=True, directed=False)
>>> # precompute and save 2nd order transition probs (for PreComp only)
>>> g.preprocess_transition_probs()
>>>
>>> # generate random walks, which could then be used to train w2v
>>> walks = g.simulate_walks(num_walks=10, walk_length=80)
>>>
>>> # alternatively, generate the embeddings directly using ``embed``
>>> emd = g.embed()
"""
def __init__(
self,
p: float = 1,
q: float = 1,
workers: int = 1,
verbose: bool = False,
extend: bool = False,
gamma: float = 0,
random_state: Optional[int] = None,
):
"""Initializ node2vec base class.
Args:
p (float): return parameter, value less than 1 encourages returning
back to previous vertex, and discourage for value grater than 1
(default: 1).
q (float): in-out parameter, value less than 1 encourages walks to
go "outward", and value greater than 1 encourage walking within
a localized neighborhood (default: 1)
workers (int): number of threads to be spawned for runing node2vec
including walk generation and word2vec embedding (default: 1)
verbose (bool): show progress bar for walk generation.
extend (bool): use node2vec+ extension if set to :obj:`True`
(default: :obj:`False`).
gamma (float): Multiplication factor for the std term of edge
weights added to the average edge weights as the noisy edge
threashold, only used by node2vec+ (default: 0)
random_state (int, optional): Random seed for generating random
walks. Note that to fully ensure reproducibility, use single
thread (i.e., workers=1), and potentially need to set the
Python environment variable ``PYTHONHASHSEED`` to match the
random_state (default: :obj:`None`).
"""
super().__init__()
self.p = p
self.q = q
self.workers = workers # TODO: not doing anything, need to fix.
self.verbose = verbose
self.extend = extend
self.gamma = gamma
self.random_state = random_state
self._preprocessed: bool = False
def _map_walk(self, walk_idx_ary: Uint32Array) -> List[str]:
"""Map walk from node index to node ID.
Note:
The last element in the ``walk_idx_ary`` encodes the effective walk
length. Only walk indices up to the effective walk length are
translated (mapped to node IDs).
"""
end_idx = walk_idx_ary[-1]
walk = [self.nodes[i] for i in walk_idx_ary[:end_idx]]
return walk
def simulate_walks(
self,
num_walks: int,
walk_length: int,
) -> List[List[str]]:
"""Generate walks starting from each nodes ``num_walks`` time.
Note:
This is the master process that spawns worker processes, where the
worker function ``node2vec_walks`` genearte a single random walk
starting from a vertex of the graph.
Args:
num_walks (int): number of walks starting from each node.
walks_length (int): length of walk.
"""
self._preprocess_transition_probs()
nodes = np.array(range(self.num_nodes), dtype=np.uint32)
start_node_idx_ary = np.concatenate([nodes] * num_walks)
tot_num_jobs = start_node_idx_ary.size
random_state = self.random_state
np.random.seed(random_state)
np.random.shuffle(start_node_idx_ary) # for balanced work load
move_forward = self.get_move_forward()
has_nbrs = self.get_has_nbrs()
verbose = self.verbose
# Acquire numba progress proxy for displaying the progress bar
with ProgressBar(total=tot_num_jobs, disable=not verbose) as progress:
walk_idx_mat = self._random_walks(
tot_num_jobs,
walk_length,
random_state,
start_node_idx_ary,
has_nbrs,
move_forward,
progress,
)
# Map node index back to node ID
walks = [self._map_walk(walk_idx_ary) for walk_idx_ary in walk_idx_mat]
return walks
@staticmethod
@njit(parallel=True, nogil=True)
def _random_walks(
tot_num_jobs: int,
walk_length: int,
random_state: Optional[int],
start_node_idx_ary: Uint32Array,
has_nbrs: HasNbrs,
move_forward: MoveForward,
progress_proxy: ProgressBar,
) -> Uint32Array:
"""Simulate a random walk starting from start node."""
# Seed the random number generator
if random_state is not None:
np.random.seed(random_state + _get_thread_id())
# use the last entry of each walk index array to keep track of the
# effective walk length
walk_idx_mat = np.zeros((tot_num_jobs, walk_length + 2), dtype=np.uint32)
walk_idx_mat[:, 0] = start_node_idx_ary # initialize seeds
walk_idx_mat[:, -1] = walk_length + 1 # set to full walk length by default
for i in prange(tot_num_jobs):
# initialize first step as normal random walk
start_node_idx = walk_idx_mat[i, 0]
if has_nbrs(start_node_idx):
walk_idx_mat[i, 1] = move_forward(start_node_idx)
else:
walk_idx_mat[i, -1] = 1
continue
# start bias random walk
for j in range(2, walk_length + 1):
cur_idx = walk_idx_mat[i, j - 1]
if has_nbrs(cur_idx):
prev_idx = walk_idx_mat[i, j - 2]
walk_idx_mat[i, j] = move_forward(cur_idx, prev_idx)
else:
walk_idx_mat[i, -1] = j
break
progress_proxy.update(1)
return walk_idx_mat
def setup_get_normalized_probs(self):
"""Transition probability computation setup.
This is function performs necessary preprocessing of computing the
average edge weights array, which is used later by the transition
probability computation function ``get_extended_normalized_probs``,
if node2vec+ is used. Otherwise, return the normal transition function
``get_noramlized_probs`` with a trivial placeholder for average edge
weights array ``noise_thresholds``.
"""
if self.extend: # use n2v+
get_normalized_probs = self.get_extended_normalized_probs
noise_thresholds = self.get_noise_thresholds()
else: # use normal n2v
get_normalized_probs = self.get_normalized_probs
noise_thresholds = None
return get_normalized_probs, noise_thresholds
def preprocess_transition_probs(self):
"""Null default preprocess method."""
pass
def _preprocess_transition_probs(self):
if not self._preprocessed:
self.preprocess_transition_probs()
self._preprocessed = True
def embed(
self,
dim: int = 128,
num_walks: int = 10,
walk_length: int = 80,
window_size: int = 10,
epochs: int = 1,
verbose: bool = False,
) -> Embeddings:
"""Generate embeddings.
This is a shortcut function that combines ``simulate_walks`` with
``Word2Vec`` to generate the node2vec embedding.
Note:
The resulting embeddings are aligned with the graph, i.e., the
index of embeddings is the same as that for the graph.
Args:
dim (int): dimension of the final embedding, default is 128
num_walks (int): number of random walks generated using each node
as the seed node, default is 10
walk_length (int): length of the random walks, default is 80
window_size (int): context window sized for training the
``Word2Vec`` model, default is 10
epochs (int): number of epochs for training ``Word2Vec``, default
is 1
verbose (bool): print time usage for random walk generation and
skip-gram training if set to True
Return:
Embeddings: The embedding matrix, each row is a node embedding
vector. The index is the same as that for the graph.
"""
timed_walk = Timer("generate walks", verbose)(self.simulate_walks)
timed_w2v = Timer("train embeddings", verbose)(Word2Vec)
walks = timed_walk(num_walks, walk_length)
w2v = timed_w2v(
walks,
vector_size=dim,
window=window_size,
sg=1,
min_count=0,
workers=self.workers,
epochs=epochs,
seed=self.random_state,
)
return w2v.wv[self.nodes]
class FirstOrderUnweighted(Base, SparseRWGraph):
"""Directly sample edges for first order random walks."""
def __init__(self, *args, **kwargs):
"""Initialize FirstOrderUnweighted mode."""
Base.__init__(self, *args, **kwargs)
def get_move_forward(self):
"""Wrap ``move_forward``."""
indices = self.indices
indptr = self.indptr
@njit(nogil=True)
def move_forward(cur_idx, prev_idx=None):
start, end = indptr[cur_idx], indptr[cur_idx + 1]
return indices[np.random.randint(start, end)]
return move_forward
class PreCompFirstOrder(Base, SparseRWGraph):
"""Precompute transition probabilities for first order random walks."""
def __init__(self, *args, **kwargs):
"""Initialize PreCompFirstOrder mode."""
Base.__init__(self, *args, **kwargs)
self.alias_j = self.alias_q = None
def get_move_forward(self):
"""Wrap ``move_forward``."""
indices = self.indices
indptr = self.indptr
alias_j = self.alias_j
alias_q = self.alias_q
@njit(nogil=True)
def move_forward(cur_idx, prev_idx=None):
start, end = indptr[cur_idx], indptr[cur_idx + 1]
choice = alias_draw(alias_j[start:end], alias_q[start:end])
return indices[indptr[cur_idx] + choice]
return move_forward
def preprocess_transition_probs(self):
"""Precompute and store first order transition probabilities."""
data = self.data
indices = self.indices
indptr = self.indptr
# Retrieve transition probability computation callback function
get_normalized_probs = self.get_normalized_probs_first_order
# Determine the dimensionality of the 1st order transition probs
n_nodes = indptr.size - 1 # number of nodes
n_probs = indptr[-1] # total number of 1st order transition probs
@njit(parallel=True, nogil=True)
def compute_all_transition_probs():
alias_j = np.zeros(n_probs, dtype=np.uint32)
alias_q = np.zeros(n_probs, dtype=np.float32)
for idx in range(n_nodes):
start, end = indptr[idx], indptr[idx + 1]
probs = get_normalized_probs(data, indices, indptr, idx)
alias_j[start:end], alias_q[start:end] = alias_setup(probs)
return alias_j, alias_q
self.alias_j, self.alias_q = compute_all_transition_probs()
class PreComp(Base, SparseRWGraph):
"""Precompute transition probabilites.
This implementation precomputes and store 2nd order transition probabilites
first and uses read off transition probabilities during the process of
random walk. The graph type used is ``SparseRWGraph``.
Note:
Need to call ``preprocess_transition_probs()`` first before generating
walks.
"""
def __init__(self, *args, **kwargs):
"""Initialize PreComp mode node2vec."""
Base.__init__(self, *args, **kwargs)
self.alias_dim: Optional[Uint32Array] = None
self.alias_j: Optional[Uint32Array] = None
self.alias_q: Optional[Float32Array] = None
self.alias_indptr: Optional[Uint64Array] = None
def get_move_forward(self):
"""Wrap ``move_forward``.
This function returns a ``numba.njit`` compiled function that takes
current vertex index (and the previous vertex index if available) and
return the next vertex index by sampling from a discrete random
distribution based on the transition probabilities that are read off
the precomputed transition probabilities table.
Note:
The returned function is used by the ``simulate_walks`` method.
"""
data = self.data
indices = self.indices
indptr = self.indptr
p = self.p
q = self.q
get_normalized_probs = self.get_normalized_probs
alias_j = self.alias_j
alias_q = self.alias_q
alias_indptr = self.alias_indptr
alias_dim = self.alias_dim
@njit(nogil=True)
def move_forward(cur_idx, prev_idx=None):
"""Move to next node based on transition probabilities."""
if prev_idx is None:
normalized_probs = get_normalized_probs(
data,
indices,
indptr,
p,
q,
cur_idx,
None,
None,
)
cdf = np.cumsum(normalized_probs)
choice = np.searchsorted(cdf, np.random.random())
else:
# Find index of neighbor (previous node) for reading alias
start = indptr[cur_idx]
end = indptr[cur_idx + 1]
nbr_idx = np.searchsorted(indices[start:end], prev_idx)
if indices[start + nbr_idx] != prev_idx:
print("FATAL ERROR! Neighbor not found.")
dim = alias_dim[cur_idx]
start = alias_indptr[cur_idx] + dim * nbr_idx
end = start + dim
choice = alias_draw(alias_j[start:end], alias_q[start:end])
return indices[indptr[cur_idx] + choice]
return move_forward
def preprocess_transition_probs(self):
"""Precompute and store 2nd order transition probabilities.
Each node contains n ** 2 number of 2nd order transition probabilities,
where n is the number of neigbors of that specific nodes, since one can
pick any one of its neighbors as the previous node and / or the next
node. For each second order transition probability of a node, set up
the alias draw table to be used during random walk.
Note:
Uses uint64 instaed of uint32 for tracking alias_indptr to prevent
overflowing since the 2nd order transition probs grows much faster
than the first order transition probs, which is the same as the
total number of edges in the graph.
"""
data = self.data
indices = self.indices
indptr = self.indptr
p = self.p
q = self.q
# Retrieve transition probability computation callback function
get_normalized_probs, noise_thresholds = self.setup_get_normalized_probs()
# Determine the dimensionality of the 2nd order transition probs
n_nodes = self.indptr.size - 1 # number of nodes
n = self.indptr[1:] - self.indptr[:-1] # number of nbrs per node
n2 = np.power(n, 2) # number of 2nd order trans probs per node
# Set the dimensionality of alias probability table
self.alias_dim = alias_dim = n
self.alias_indptr = alias_indptr = np.zeros(self.indptr.size, dtype=np.uint64)
alias_indptr[1:] = np.cumsum(n2)
n_probs = alias_indptr[-1] # total number of 2nd order transition probs
@njit(parallel=True, nogil=True)
def compute_all_transition_probs():
alias_j = np.zeros(n_probs, dtype=np.uint32)
alias_q = np.zeros(n_probs, dtype=np.float32)
for idx in range(n_nodes):
offset = alias_indptr[idx]
dim = alias_dim[idx]
nbrs = indices[indptr[idx] : indptr[idx + 1]]
for nbr_idx in prange(n[idx]):
nbr = nbrs[nbr_idx]
probs = get_normalized_probs(
data,
indices,
indptr,
p,
q,
idx,
nbr,
noise_thresholds,
)
start = offset + dim * nbr_idx
end = start + dim
alias_j[start:end], alias_q[start:end] = alias_setup(probs)
return alias_j, alias_q
self.alias_j, self.alias_q = compute_all_transition_probs()
class SparseOTF(Base, SparseRWGraph):
"""Sparse graph transition on the fly.
This implementation do *NOT* precompute transition probabilities in advance
but instead calculate them on-the-fly during the process of random walk.
The graph type used is ``SparseRWGraph``.
"""
def __init__(self, *args, **kwargs):
"""Initialize PreComp mode node2vec."""
Base.__init__(self, *args, **kwargs)
def get_move_forward(self):
"""Wrap ``move_forward``.
This function returns a ``numba.njit`` compiled function that takes
current vertex index (and the previous vertex index if available) and
return the next vertex index by sampling from a discrete random
distribution based on the transition probabilities that are calculated
on-the-fly.
Note:
The returned function is used by the ``simulate_walks`` method.
"""
data = self.data
indices = self.indices
indptr = self.indptr
p = self.p
q = self.q
get_normalized_probs, noise_thresholds = self.setup_get_normalized_probs()
@njit(nogil=True)
def move_forward(cur_idx, prev_idx=None):
"""Move to next node."""
normalized_probs = get_normalized_probs(
data,
indices,
indptr,
p,
q,
cur_idx,
prev_idx,
noise_thresholds,
)
cdf = np.cumsum(normalized_probs)
choice = np.searchsorted(cdf, np.random.random())
return indices[indptr[cur_idx] + choice]
return move_forward
class DenseOTF(Base, DenseRWGraph):
"""Dense graph transition on the fly.
This implementation do *NOT* precompute transition probabilities in advance
but instead calculate them on-the-fly during the process of random walk.
The graph type used is ``DenseRWGraph``.
"""
def __init__(self, *args, **kwargs):
"""Initialize DenseOTF mode node2vec."""
Base.__init__(self, *args, **kwargs)
def get_move_forward(self):
"""Wrap ``move_forward``.
This function returns a ``numba.njit`` compiled function that takes
current vertex index (and the previous vertex index if available) and
return the next vertex index by sampling from a discrete random
distribution based on the transition probabilities that are calculated
on-the-fly.
Note:
The returned function is used by the ``simulate_walks`` method.
"""
data = self.data
nonzero = self.nonzero
p = self.p
q = self.q
get_normalized_probs, noise_thresholds = self.setup_get_normalized_probs()
@njit(nogil=True)
def move_forward(cur_idx, prev_idx=None):
"""Move to next node."""
normalized_probs = get_normalized_probs(
data,
nonzero,
p,
q,
cur_idx,
prev_idx,
noise_thresholds,
)
cdf = np.cumsum(normalized_probs)
choice = np.searchsorted(cdf, np.random.random())
nbrs = np.where(nonzero[cur_idx])[0]
return nbrs[choice]
return move_forward
@njit(nogil=True)
def alias_setup(probs):
"""Construct alias lookup table.
This code is modified from the blog post here:
https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/
, where you can find more details about how the method work. In general,
the alias method improves the time complexity of sampling from a discrete
random distribution to O(1) if the alias table is setup in advance.
Args:
probs (list(float32)): normalized transition probabilities array, could
be in either list or NDArray, of float32 values.
"""
k = probs.size
q = np.zeros(k, dtype=np.float32)
j = np.zeros(k, dtype=np.uint32)
smaller = np.zeros(k, dtype=np.uint32)
larger = np.zeros(k, dtype=np.uint32)
smaller_ptr = 0
larger_ptr = 0
for kk in range(k):
q[kk] = k * probs[kk]
if q[kk] < 1.0:
smaller[smaller_ptr] = kk
smaller_ptr += 1
else:
larger[larger_ptr] = kk
larger_ptr += 1
while (smaller_ptr > 0) & (larger_ptr > 0):
smaller_ptr -= 1
small = smaller[smaller_ptr]
larger_ptr -= 1
large = larger[larger_ptr]
j[small] = large
q[large] = q[large] + q[small] - 1.0
if q[large] < 1.0:
smaller[smaller_ptr] = large
smaller_ptr += 1
else:
larger[larger_ptr] = large
larger_ptr += 1
return j, q
@njit(nogil=True)
def alias_draw(j, q):
"""Draw sample from a non-uniform discrete distribution using alias sampling."""
k = j.size
kk = np.random.randint(k)
if np.random.rand() < q[kk]:
return kk
else:
return j[kk]
|
py | 7dffdb0792e8d7df6f3c40626c954356bda9a375 | """
Dancing links C++ wrapper
"""
#*****************************************************************************
# Copyright (C) 2008 Carlo Hamalainen <[email protected]>,
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import print_function
from __future__ import absolute_import
# OneExactCover and AllExactCovers are almost exact copies of the
# functions with the same name in sage/combinat/dlx.py by Tom Boothby.
from .dancing_links import dlx_solver
def DLXCPP(rows):
"""
Solves the Exact Cover problem by using the Dancing Links algorithm
described by Knuth.
Consider a matrix M with entries of 0 and 1, and compute a subset
of the rows of this matrix which sum to the vector of all 1's.
The dancing links algorithm works particularly well for sparse
matrices, so the input is a list of lists of the form::
[
[i_11,i_12,...,i_1r]
...
[i_m1,i_m2,...,i_ms]
]
where M[j][i_jk] = 1.
The first example below corresponds to the matrix::
1110
1010
0100
0001
which is exactly covered by::
1110
0001
and
::
1010
0100
0001
If soln is a solution given by DLXCPP(rows) then
[ rows[soln[0]], rows[soln[1]], ... rows[soln[len(soln)-1]] ]
is an exact cover.
Solutions are given as a list.
EXAMPLES::
sage: rows = [[0,1,2]]
sage: rows+= [[0,2]]
sage: rows+= [[1]]
sage: rows+= [[3]]
sage: [x for x in DLXCPP(rows)]
[[3, 0], [3, 1, 2]]
"""
if not rows:
return
x = dlx_solver(rows)
while x.search():
yield x.get_solution()
def AllExactCovers(M):
"""
Solves the exact cover problem on the matrix M (treated as a dense
binary matrix).
EXAMPLES: No exact covers::
sage: M = Matrix([[1,1,0],[1,0,1],[0,1,1]])
sage: [cover for cover in AllExactCovers(M)]
[]
Two exact covers::
sage: M = Matrix([[1,1,0],[1,0,1],[0,0,1],[0,1,0]])
sage: [cover for cover in AllExactCovers(M)]
[[(1, 1, 0), (0, 0, 1)], [(1, 0, 1), (0, 1, 0)]]
"""
rows = []
for R in M.rows():
row = []
for i in range(len(R)):
if R[i]:
row.append(i)
rows.append(row)
for s in DLXCPP(rows):
yield [M.row(i) for i in s]
def OneExactCover(M):
"""
Solves the exact cover problem on the matrix M (treated as a dense
binary matrix).
EXAMPLES::
sage: M = Matrix([[1,1,0],[1,0,1],[0,1,1]]) #no exact covers
sage: print(OneExactCover(M))
None
sage: M = Matrix([[1,1,0],[1,0,1],[0,0,1],[0,1,0]]) #two exact covers
sage: OneExactCover(M)
[(1, 1, 0), (0, 0, 1)]
"""
for s in AllExactCovers(M):
return s
|
py | 7dffdb0b031d1d1df547fd40e99df59fcc4750fd | import os
import fnmatch
import subprocess
from threading import Thread
from scipy.stats import truncnorm
import matplotlib.colors as colors
import numpy as np
def get_truncated_normal(mean=0., sd=1., low=0., upp=10.):
return truncnorm(
(low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd)
def find(pattern, path):
" Finds the files in a path with a given pattern. "
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
def run_subprocess(program):
""" Runs a given program as a subrocess. """
print("\tRunning subprocess: %s" % (" ".join(program)))
return_code = None
while not return_code == 0:
p = subprocess.Popen(program, stdout=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True, bufsize=1,
close_fds=True)
for stdout_line in iter(p.stdout.readline, ""):
print(stdout_line, end='')
p.stdout.close()
return_code = p.wait()
if return_code != 0: print("\t\t\t\t Error n: ", return_code, " resetting simulation...")
class SubprocessRunner(object):
def __init__(self, program, id: int):
self._program = program
self._return_code = None
self._p = None
self.thread = None
self.id = id
self.return_code = None
self.is_stopped = False
def run(self):
print("\tRunning subprocess: %s" % (" ".join(self._program)))
self.thread = Thread(target=self._run)
self.thread.start()
def _run(self):
while not self.return_code == 0 and not self.is_stopped:
p = subprocess.Popen(self._program, stdout=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
for stdout_line in iter(p.stdout.readline, ""):
print(f'process {self.id}:\t', stdout_line, end='')
p.stdout.close()
self.return_code = p.wait()
if self.return_code != 0:
print(f"\t\t\t\t Process {self.id}:\tError n: ", self.return_code, " resetting simulation...")
@property
def is_finished(self):
if self.return_code == 0:
return True
else:
return False
def wait(self):
if self.thread is not None:
self.thread.join()
def stop(self):
self.is_stopped = True
if self.thread is not None:
self.thread.join()
class MidpointNormalize(colors.Normalize):
"""
Normalise the colorbar so that diverging bars work there way either side from a prescribed midpoint value)
e.g. im=ax1.imshow(array, norm=MidpointNormalize(midpoint=0.,vmin=-100, vmax=100))
from: http://chris35wills.github.io/matplotlib_diverging_colorbar/
"""
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y), np.isnan(value))
|
py | 7dffdb11635f90a701c206c1757ccff7e0286ff1 | from dataclasses import dataclass
from copy import deepcopy
from prettyprinter import cpprint as pp
from arpeggio import ParseTreeNode, Terminal, NonTerminal, StrMatch
from .parsetreenodes import *
#------------------------------------------------------------------------------
def is_unpackable_sequence(x):
return isinstance(x, (list, tuple))
#------------------------------------------------------------------------------
#
# Wrapper to emulate an Arpeggio Node
#
@dataclass
class Unwrap(object):
value : object
position : int = 0
def __post_init__(self):
assert isinstance(self.value, (list, NonTerminal))
assert len(self.value) > 0
assert isinstance(self.value[0], ParseTreeNode)
#------------------------------------------------------------------------------
@dataclass
class FakeNode(object):
value : object
position : int = 0
_please_unwrap : int = 0
def NO__post_init__(self):
assert isinstance(self.value, (list, NonTerminal))
assert len(self.value) > 0
assert isinstance(self.value[0], ParseTreeNode)
#------------------------------------------------------------------------------
class FakeNonTerminal(NonTerminal):
def __init__(self, rule, value):
super(FakeNonTerminal, self).__init__(rule, 0, value)
class FakeTerminal(NonTerminal):
def __init__(self, rule, value):
super(FakeTerminal, self).__init__(rule, 0, value)
#------------------------------------------------------------------------------
def nt_ok(value):
return isinstance(value, list) and isinstance(value[0], ParseTreeNode)
class WrappedList(list):
def __init__(self, *args, **kwargs):
self._please_unwrap = kwargs['_please_unwrap']
del kwargs['_please_unwrap']
super().__init__(*args, **kwargs)
#------------------------------------------------------------------------------
from contextlib import redirect_stdout
_tty = None
def tprint(*args, **kwargs):
global _tty
if False :
if _tty is None :
_tty = open("/dev/tty", "w")
with redirect_stdout(_tty) :
print(*args, **kwargs)
#------------------------------------------------------------------------------
def wrap(value):
"""Wrap <value> such that NonTerminal will accept it as a list of Parse Tree Nodes.
Does nothing if <value> is already capable of being such.
"""
tprint(f": wrap : value = {value}")
# Note: NonTerminal value is Case 0 since NonTerminal is a sub-type of list
# and a NonTerminal [0] is always a ParseTreeNode
if isinstance(value, list):
if isinstance(value[0], ParseTreeNode):
# Case 0 : not wrapped : list and [0] is a ParseTreeNode
tprint(f" => not wrapped : Case 0 : list and [0] is a ParseTreeNode")
return value
# Case 1 : list but [0] is not a ParseTreeNode
tprint(f" => wrapped : Case 1 : list but [0] is not a ParseTreeNode")
value[0] = FakeNode(value[0])
return WrappedList(value, _please_unwrap=1)
if isinstance(value, Terminal):
# Case 2 : not a list, value is a Terminal
tprint(f" => wrapped : Case 2 : not a list, value is a Terminal")
return WrappedList([ value ], _please_unwrap=2)
# Case 3 : not a list, value is not a Terminal
tprint(f" => wrapped : Case 3 : not a list, value is not a Terminal")
return WrappedList([ FakeNode(value) ], _please_unwrap=3)
#------------------------------------------------------------------------------
def unwrap(wrapped):
""" ...
"""
# wrapped must be a WrappedList
if not isinstance(wrapped, WrappedList):
return wrapped
# Case 1 : list but [0] is not a ParseTreeNode
if wrapped._please_unwrap == 1:
# print('\n: unwrap 1')
wrapped[0] = wrapped[0].value
# pp(wrapped)
return list(wrapped)
# Case 2 : not a list, value is a Terminal
if wrapped._please_unwrap == 2:
# print(': unwrap 2')
return wrapped[0]
# Case 3 : not a list, value is not a Terminal
if wrapped._please_unwrap == 3:
# print(': unwrap 3')
return wrapped[0].value
raise ValueError(f"unwrap_value(): unrecognized _please_unwrap value "
f"{wrapped._please_unwrap}")
#------------------------------------------------------------------------------
def unwrap_extend(dst, wrapped):
"""appends unwrapped element(s) to the end list 'dst'"""
debug = False
if debug:
print(f"\n[ unwrap_extend : enter")
print("[wrapped]")
pp(wrapped)
value = unwrap(wrapped)
if debug:
print("[value]")
pp(value)
if debug:
print("[dst] : before")
pp(dst)
if is_unpackable_sequence(value):
dst.extend(value)
else:
dst.append(value)
if debug:
print("[dst] : after")
pp(dst)
return dst
#------------------------------------------------------------------------------
def unwrap_at(dst, idx):
dst[idx] = unwrap(dst[idx])
return dst[idx]
#------------------------------------------------------------------------------
# returns how many elements added to dst
def unwrap_into(dst, idx):
"""Replaces wrapped element dst[idx] with unwrapped value.
if unwrapped value is a list or NonTerminal, explode list in place.
return number additional elements added to the list.
(i.e. zero (0) when values isn't a list or NonTerminal)
"""
wrapped = dst[idx]
tprint(f": unwrap_into : type(wrapped?) = {str(type(wrapped))}")
# wrapped is always a WrappedList
if not isinstance(wrapped, WrappedList):
tprint(f" => not a WrappedList -- nothing to do")
return 0
value = unwrap(wrapped)
tprint(f": unwrap_into : type(value) = {str(type(value))}")
if not is_unpackable_sequence(value):
tprint(f" => not an unpackable sequence, simply assign value to dst[idx]")
dst[idx] = value
return 0
tprint(f" : exploding inplace")
del dst[idx]
for elt in value[::-1]:
dst.insert(idx, elt)
delta = len(value) - 1
tprint(f" => delta = {delta}")
return delta
#------------------------------------------------------------------------------
|
py | 7dffdcf1b110f2fbf1a66874ecb5d31e197e95a9 | # smartmirror.py
# requirements
# requests, feedparser, traceback, Pillow
from tkinter import *
from locale import setlocale, LC_ALL
import threading
import time
import requests
import json
import traceback
import feedparser, datetime
from PIL import Image, ImageTk
from contextlib import contextmanager
LOCALE_LOCK = threading.Lock()
ui_locale = '' # e.g. 'fr_FR' fro French, '' as default
time_format = 12 # 12 or 24
date_format = "%b %d, %Y" # check python doc for strftime() for options
news_country_code = 'us'
weather_api_token = '12e2d78a6dfed11b903530d6dbe07f07' # create account at https://darksky.net/dev/
weather_lang = 'en' # see https://darksky.net/dev/docs/forecast for full list of language parameters values
weather_unit = 'auto' # see https://darksky.net/dev/docs/forecast for full list of unit parameters values
latitude ='52.520008' # Set this if IP location lookup does not work for you (must be a string)
longitude = '13.404954' # Set this if IP location lookup does not work for you (must be a string)
xlarge_text_size = 94
large_text_size = 48
medium_text_size = 28
small_text_size = 10
@contextmanager
def setlocale(name): #thread proof function to work with locale
with LOCALE_LOCK:
saved = setlocale(LC_ALL)
try:
yield setlocale(LC_ALL)# yield setlocale(LC_ALL, name)
finally:
setlocale(LC_ALL)# setlocale(LC_ALL, saved)
# maps open weather icons to
# icon reading is not impacted by the 'lang' parameter
icon_lookup = {
'clear-day': "assets/Sun.png", # clear sky day
'wind': "assets/Wind.png", #wind
'cloudy': "assets/Cloud.png", # cloudy day
'partly-cloudy-day': "assets/PartlySunny.png", # partly cloudy day
'rain': "assets/Rain.png", # rain day
'snow': "assets/Snow.png", # snow day
'snow-thin': "assets/Snow.png", # sleet day
'fog': "assets/Haze.png", # fog day
'clear-night': "assets/Moon.png", # clear sky night
'partly-cloudy-night': "assets/PartlyMoon.png", # scattered clouds night
'thunderstorm': "assets/Storm.png", # thunderstorm
'tornado': "assests/Tornado.png", # tornado
'hail': "assests/Hail.png" # hail
}
class Clock(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, bg='black')
# initialize time label
self.time1 = ''
self.timeLbl = Label(self, font=('Helvetica', large_text_size), fg="white", bg="black")
self.timeLbl.pack(side=TOP, anchor=E)
# initialize day of week
self.day_of_week1 = ''
self.dayOWLbl = Label(self, text=self.day_of_week1, font=('Helvetica', small_text_size), fg="white", bg="black")
self.dayOWLbl.pack(side=TOP, anchor=E)
# initialize date label
self.date1 = ''
self.dateLbl = Label(self, text=self.date1, font=('Helvetica', small_text_size), fg="white", bg="black")
self.dateLbl.pack(side=TOP, anchor=E)
self.tick()
def tick(self):
with setlocale(ui_locale):
if time_format == 12:
time2 = time.strftime('%I:%M %p') #hour in 12h format
else:
time2 = time.strftime('%H:%M') #hour in 24h format
day_of_week2 = time.strftime('%A')
date2 = time.strftime(date_format)
# if time string has changed, update it
if time2 != self.time1:
self.time1 = time2
self.timeLbl.config(text=time2)
if day_of_week2 != self.day_of_week1:
self.day_of_week1 = day_of_week2
self.dayOWLbl.config(text=day_of_week2)
if date2 != self.date1:
self.date1 = date2
self.dateLbl.config(text=date2)
# calls itself every 200 milliseconds
# to update the time display as needed
# could use >200 ms, but display gets jerky
self.timeLbl.after(200, self.tick)
class Weather(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, bg='black')
self.temperature = ''
self.forecast = ''
self.location = ''
self.currently = ''
self.icon = ''
self.degreeFrm = Frame(self, bg="black")
self.degreeFrm.pack(side=TOP, anchor=W)
self.temperatureLbl = Label(self.degreeFrm, font=('Helvetica', xlarge_text_size), fg="white", bg="black")
self.temperatureLbl.pack(side=LEFT, anchor=N)
self.iconLbl = Label(self.degreeFrm, bg="black")
self.iconLbl.pack(side=LEFT, anchor=N, padx=20)
self.currentlyLbl = Label(self, font=('Helvetica', medium_text_size), fg="white", bg="black")
self.currentlyLbl.pack(side=TOP, anchor=W)
self.forecastLbl = Label(self, font=('Helvetica', small_text_size), fg="white", bg="black")
self.forecastLbl.pack(side=TOP, anchor=W)
self.locationLbl = Label(self, font=('Helvetica', small_text_size), fg="white", bg="black")
self.locationLbl.pack(side=TOP, anchor=W)
self.get_weather()
def get_ip(self):
try:
ip_url = "http://jsonip.com/"
req = requests.get(ip_url)
ip_json = json.loads(req.text)
return ip_json['ip']
except Exception as e:
traceback.print_exc()
return "Error: %s. Cannot get ip." % e
def get_weather(self):
try:
if latitude is None and longitude is None:
# get location
location_req_url = "http://freegeoip.net/json/%s" % self.get_ip()
r = requests.get(location_req_url)
location_obj = json.loads(r.text)
lat = location_obj['latitude']
lon = location_obj['longitude']
location2 = "%s, %s" % (location_obj['city'], location_obj['region_code'])
# get weather
weather_req_url = "https://api.darksky.net/forecast/%s/%s,%s?lang=%s&units=%s" % (weather_api_token, lat,lon,weather_lang,weather_unit)
else:
location2 = ""
# get weather
weather_req_url = "https://api.darksky.net/forecast/%s/%s,%s?lang=%s&units=%s" % (weather_api_token, latitude, longitude, weather_lang, weather_unit)
r = requests.get(weather_req_url)
weather_obj = json.loads(r.text)
degree_sign= u'\N{DEGREE SIGN}'
temperature2 = "%s%s" % (str(int(weather_obj['currently']['temperature'])), degree_sign)
currently2 = weather_obj['currently']['summary']
forecast2 = weather_obj["hourly"]["summary"]
icon_id = weather_obj['currently']['icon']
icon2 = None
if icon_id in icon_lookup:
icon2 = icon_lookup[icon_id]
if icon2 is not None:
if self.icon != icon2:
self.icon = icon2
image = Image.open(icon2)
image = image.resize((100, 100), Image.ANTIALIAS)
image = image.convert('RGB')
photo = ImageTk.PhotoImage(image)
self.iconLbl.config(image=photo)
self.iconLbl.image = photo
else:
# remove image
self.iconLbl.config(image='')
if self.currently != currently2:
self.currently = currently2
self.currentlyLbl.config(text=currently2)
if self.forecast != forecast2:
self.forecast = forecast2
self.forecastLbl.config(text=forecast2)
if self.temperature != temperature2:
self.temperature = temperature2
self.temperatureLbl.config(text=temperature2)
if self.location != location2:
if location2 == ", ":
self.location = "Cannot Pinpoint Location"
self.locationLbl.config(text="Cannot Pinpoint Location")
else:
self.location = location2
self.locationLbl.config(text=location2)
except Exception as e:
traceback.print_exc()
print("Error: %s. Cannot get weather." % e)
self.after(600000, self.get_weather)
@staticmethod
def convert_kelvin_to_fahrenheit(kelvin_temp):
return 1.8 * (kelvin_temp - 273) + 32
class News(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, *args, **kwargs)
self.config(bg='black')
self.title = 'News' # 'News' is more internationally generic
self.newsLbl = Label(self, text=self.title, font=('Helvetica', small_text_size), fg="white", bg="black")
self.newsLbl.pack(side=TOP, anchor=W)
self.headlinesContainer = Frame(self, bg="black")
self.headlinesContainer.pack(side=TOP)
self.get_headlines()
def get_headlines(self):
try:
# remove all children
for widget in self.headlinesContainer.winfo_children():
widget.destroy()
if news_country_code == None:
headlines_url = "https://news.google.com/news?ned=us&output=rss"
else:
headlines_url = "https://news.google.com/news?ned=%s&output=rss" % news_country_code
feed = feedparser.parse(headlines_url)
for post in feed.entries[0:5]:
headline = NewsHeadline(self.headlinesContainer, post.title)
headline.pack(side=TOP, anchor=W)
except Exception as e:
traceback.print_exc()
print("Error: %s. Cannot get news." % e)
self.after(600000, self.get_headlines)
class NewsHeadline(Frame):
def __init__(self, parent, event_name=""):
Frame.__init__(self, parent, bg='black')
image = Image.open("assets/Newspaper.png")
image = image.resize((15, 15), Image.ANTIALIAS)
image = image.convert('RGB')
photo = ImageTk.PhotoImage(image)
self.iconLbl = Label(self, bg='black', image=photo)
self.iconLbl.image = photo
self.iconLbl.pack(side=LEFT, anchor=N)
self.eventName = event_name
self.eventNameLbl = Label(self, text=self.eventName, font=('Helvetica', small_text_size), fg="white", bg="black")
self.eventNameLbl.pack(side=LEFT, anchor=N)
class Calendar(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, bg='black')
self.title = 'Calendar Events'
self.calendarLbl = Label(self, text=self.title, font=('Helvetica', small_text_size), fg="white", bg="black")
self.calendarLbl.pack(side=TOP, anchor=E)
self.calendarEventContainer = Frame(self, bg='black')
self.calendarEventContainer.pack(side=TOP, anchor=E)
self.get_events()
def get_events(self):
#TODO: implement this method
#reference https://developers.google.com/google-apps/calendar/quickstart/python
for widget in self.calendarEventContainer.winfo_children():
widget.destroy()
pass
cal_url = "https://www.googleapis.com/calendar/v3/calendars/bipm2019%40gmail.com/events?maxResults=5&timeMin={}&key=AIzaSyBrIuIIm4gYvvRTiipjjZugb5mZjm8SlCA".format(datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ"))
feed = []
try:
resp = requests.get(cal_url)
if resp.status_code == 200:
feed = json.loads(resp.text)
else:
print(resp.status_code)
print(resp.text)
except Exception as err:
print(err)
if len(feed.get("items")) == 0:
cal_headline = CalendarEvent(self.calendarEventContainer)
cal_headline.pack(side=TOP, anchor=E)
for post in feed.get("items"):
# if datetime.datetime.strptime(post['start']['dateTime'].split("+")[0], "%Y-%m-%dT%H:%M:%S") > datetime.datetime.now():
cal_headline = CalendarEvent(self.calendarEventContainer, post.get("summary"), datetime.datetime.strptime(post['start']['dateTime'].split("+")[0], "%Y-%m-%dT%H:%M:%S").strftime("%Y-%m-%d %H:%M"))
cal_headline.pack(side=TOP, anchor=E)
pass
class CalendarEvent(Frame):
def __init__(self, parent, event_name="You're good - No upcoming events found.", event_time = None):
Frame.__init__(self, parent, bg='black')
self.eventName = event_name
if event_time:
self.eventTime = event_time
else:
self.eventTime = ""
self.eventNameLbl = Label(self, text= self.eventTime + "\t" + self.eventName, font=('Helvetica', small_text_size), fg="white", bg="black")
self.eventNameLbl.pack(side=TOP, anchor=E)
class Splash(Toplevel):
def __init__(self, parent):
# print("Here")
Toplevel.__init__(self, parent)
self.title("Hello Beautiful!")
self.configure(background='black')
self.geometry("600x600")
# self.topFrame = Frame(self.tk, background='black')
text_label = Label(self, text = "Hello Beautiful!!!", font=('Helvetica', small_text_size), fg="white", bg="black")
text_label.pack(side=TOP, fill=BOTH, expand=YES)
## required to make window show before the program gets to the mainloop
self.update()
class Tweet(object):
def __init__(self, tweet_dict):
self.user_screen_name = tweet_dict.get('user', {}).get('screen_name', None)
self.creation_time = tweet_dict.get("created_at", None)
self.tweet_text = tweet_dict.get("text", "").replace("\n", "").replace("\t","")
self.hashtags = []
for hashtag in tweet_dict.get("entities", {}).get('hashtags', []):
self.hashtags.append(hashtag.get('text'))
class TwitterTweetListener():
def __init__(self, parent):
self.tweet_count = 0
self.parent = parent
pass
def on_data(self, data):
try:
tweet = json.loads(data.strip())
tweet_obj = Tweet(tweet)
self.tweet_count += 1
tw_event = TwitterTweetEvent(self.parent, tweet_obj.tweet_text, tweet_obj.creation_time)
tw_event.pack(side=TOP, anchor=E)
if self.tweet_count >4:
return False
return True
except BaseException as e:
print("Error on_data %s" % str(e))
return True
def on_error(self, status):
print(status)
pass
def on_exception(self, status):
print(status)
pass
def on_connect(self):
print("Connected Successfully!!!")
pass
def on_disconnect(self):
print("Disconnected Successfully")
class TwitterTweetEvent(Frame):
def __init__(self, parent, event_name="No Tweets Running Hot at this time", event_time = None):
Frame.__init__(self, parent, bg='black')
self.eventName = event_name
if event_time:
self.eventTime = event_time
else:
self.eventTime = ""
self.eventNameLbl = Label(self, text=self.eventTime + "\t" + self.eventName,
font=('Helvetica', small_text_size), fg="white", bg="black")
self.eventNameLbl.pack(side=TOP, anchor=E)
class Twitter(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, bg='black')
self.title = 'Twitter Tweets'
self.twitterLbl = Label(self, text=self.title, font=('Helvetica', small_text_size), fg="white", bg="black")
self.twitterLbl.pack(side=TOP, anchor=E)
self.twitterEventContainer = Frame(self, bg='black')
self.twitterEventContainer.pack(side=TOP, anchor=E)
self.get_tweets()
def get_tweets(self):
for widget in self.twitterEventContainer.winfo_children():
widget.destroy()
hashtag_list = ['CWC19']
from twitter_stream_listener import TwitterStreamer
self.stream_listener = TwitterStreamer(self.twitterEventContainer)
self.stream_listener.stream_tweets(hashtag_list)
self.after(600000, self.get_tweets)
class FullscreenWindow:
def __init__(self):
self.tk = Tk()
self.tk.withdraw()
splash = Splash(self.tk)
#
# ## simulate a delay while loading
time.sleep(6)
#
# ## finished loading so destroy splash
self.tk.title("Smart Watch")
self.tk.configure(background='black')
self.topFrame = Frame(self.tk, background = 'black')
self.middleFrame = Frame(self.tk, background = 'black')
self.bottomFrame = Frame(self.tk, background='black')
self.topFrame.pack(side = TOP, fill=BOTH, expand = YES)
self.middleFrame.pack(side = TOP, after=self.topFrame, fill = BOTH, expand = YES)
self.bottomFrame.pack(side = BOTTOM, fill=BOTH, expand = YES)
self.state = False
self.tk.bind("<Return>", self.toggle_fullscreen)
self.tk.bind("<Escape>", self.end_fullscreen)
# clock
self.clock = Clock(self.topFrame)
self.clock.pack(side=RIGHT, anchor=N, padx=100, pady=60)
# weather
self.weather = Weather(self.topFrame)
self.weather.pack(side=LEFT, anchor=N, padx=100, pady=60)
# news
self.news = News(self.bottomFrame)
self.news.pack(side=LEFT, anchor=S, padx=100, pady=60)
# calender - removing for now
self.calender = Calendar(self.bottomFrame)
self.calender.pack(side = RIGHT, anchor=S, padx=100, pady=60)
self.twitter = Twitter(self.middleFrame)
self.twitter.pack(side=LEFT, anchor=S, padx=100, pady=60)
# ## show window again
splash.destroy()
self.tk.deiconify()
def toggle_fullscreen(self, event=None):
self.state = not self.state # Just toggling the boolean
self.tk.attributes("-fullscreen", self.state)
return "break"
def end_fullscreen(self, event=None):
self.state = False
self.tk.attributes("-fullscreen", False)
return "break"
if __name__ == '__main__':
w = FullscreenWindow()
w.tk.mainloop()
# smartmirror.py
# requirements
# requests, feedparser, traceback, Pillow
from Tkinter import *
import locale
import threading
import time
import requests
import json
import traceback
import feedparser
from PIL import Image, ImageTk
from contextlib import contextmanager
LOCALE_LOCK = threading.Lock()
ui_locale = '' # e.g. 'fr_FR' fro French, '' as default
time_format = 12 # 12 or 24
date_format = "%b %d, %Y" # check python doc for strftime() for options
news_country_code = 'us'
weather_api_token = '<TOKEN>' # create account at https://darksky.net/dev/
weather_lang = 'en' # see https://darksky.net/dev/docs/forecast for full list of language parameters values
weather_unit = 'us' # see https://darksky.net/dev/docs/forecast for full list of unit parameters values
latitude = None # Set this if IP location lookup does not work for you (must be a string)
longitude = None # Set this if IP location lookup does not work for you (must be a string)
xlarge_text_size = 94
large_text_size = 48
medium_text_size = 28
small_text_size = 18
@contextmanager
def setlocale(name): #thread proof function to work with locale
with LOCALE_LOCK:
saved = locale.setlocale(locale.LC_ALL)
try:
yield locale.setlocale(locale.LC_ALL, name)
finally:
locale.setlocale(locale.LC_ALL, saved)
# maps open weather icons to
# icon reading is not impacted by the 'lang' parameter
icon_lookup = {
'clear-day': "assets/Sun.png", # clear sky day
'wind': "assets/Wind.png", #wind
'cloudy': "assets/Cloud.png", # cloudy day
'partly-cloudy-day': "assets/PartlySunny.png", # partly cloudy day
'rain': "assets/Rain.png", # rain day
'snow': "assets/Snow.png", # snow day
'snow-thin': "assets/Snow.png", # sleet day
'fog': "assets/Haze.png", # fog day
'clear-night': "assets/Moon.png", # clear sky night
'partly-cloudy-night': "assets/PartlyMoon.png", # scattered clouds night
'thunderstorm': "assets/Storm.png", # thunderstorm
'tornado': "assests/Tornado.png", # tornado
'hail': "assests/Hail.png" # hail
}
class Clock(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, bg='black')
# initialize time label
self.time1 = ''
self.timeLbl = Label(self, font=('Helvetica', large_text_size), fg="white", bg="black")
self.timeLbl.pack(side=TOP, anchor=E)
# initialize day of week
self.day_of_week1 = ''
self.dayOWLbl = Label(self, text=self.day_of_week1, font=('Helvetica', small_text_size), fg="white", bg="black")
self.dayOWLbl.pack(side=TOP, anchor=E)
# initialize date label
self.date1 = ''
self.dateLbl = Label(self, text=self.date1, font=('Helvetica', small_text_size), fg="white", bg="black")
self.dateLbl.pack(side=TOP, anchor=E)
self.tick()
def tick(self):
with setlocale(ui_locale):
if time_format == 12:
time2 = time.strftime('%I:%M %p') #hour in 12h format
else:
time2 = time.strftime('%H:%M') #hour in 24h format
day_of_week2 = time.strftime('%A')
date2 = time.strftime(date_format)
# if time string has changed, update it
if time2 != self.time1:
self.time1 = time2
self.timeLbl.config(text=time2)
if day_of_week2 != self.day_of_week1:
self.day_of_week1 = day_of_week2
self.dayOWLbl.config(text=day_of_week2)
if date2 != self.date1:
self.date1 = date2
self.dateLbl.config(text=date2)
# calls itself every 200 milliseconds
# to update the time display as needed
# could use >200 ms, but display gets jerky
self.timeLbl.after(200, self.tick)
class Weather(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, bg='black')
self.temperature = ''
self.forecast = ''
self.location = ''
self.currently = ''
self.icon = ''
self.degreeFrm = Frame(self, bg="black")
self.degreeFrm.pack(side=TOP, anchor=W)
self.temperatureLbl = Label(self.degreeFrm, font=('Helvetica', xlarge_text_size), fg="white", bg="black")
self.temperatureLbl.pack(side=LEFT, anchor=N)
self.iconLbl = Label(self.degreeFrm, bg="black")
self.iconLbl.pack(side=LEFT, anchor=N, padx=20)
self.currentlyLbl = Label(self, font=('Helvetica', medium_text_size), fg="white", bg="black")
self.currentlyLbl.pack(side=TOP, anchor=W)
self.forecastLbl = Label(self, font=('Helvetica', small_text_size), fg="white", bg="black")
self.forecastLbl.pack(side=TOP, anchor=W)
self.locationLbl = Label(self, font=('Helvetica', small_text_size), fg="white", bg="black")
self.locationLbl.pack(side=TOP, anchor=W)
self.get_weather()
def get_ip(self):
try:
ip_url = "http://jsonip.com/"
req = requests.get(ip_url)
ip_json = json.loads(req.text)
return ip_json['ip']
except Exception as e:
traceback.print_exc()
return "Error: %s. Cannot get ip." % e
def get_weather(self):
try:
if latitude is None and longitude is None:
# get location
location_req_url = "http://freegeoip.net/json/%s" % self.get_ip()
r = requests.get(location_req_url)
location_obj = json.loads(r.text)
lat = location_obj['latitude']
lon = location_obj['longitude']
location2 = "%s, %s" % (location_obj['city'], location_obj['region_code'])
# get weather
weather_req_url = "https://api.darksky.net/forecast/%s/%s,%s?lang=%s&units=%s" % (weather_api_token, lat,lon,weather_lang,weather_unit)
else:
location2 = ""
# get weather
weather_req_url = "https://api.darksky.net/forecast/%s/%s,%s?lang=%s&units=%s" % (weather_api_token, latitude, longitude, weather_lang, weather_unit)
r = requests.get(weather_req_url)
weather_obj = json.loads(r.text)
degree_sign= u'\N{DEGREE SIGN}'
temperature2 = "%s%s" % (str(int(weather_obj['currently']['temperature'])), degree_sign)
currently2 = weather_obj['currently']['summary']
forecast2 = weather_obj["hourly"]["summary"]
icon_id = weather_obj['currently']['icon']
icon2 = None
if icon_id in icon_lookup:
icon2 = icon_lookup[icon_id]
if icon2 is not None:
if self.icon != icon2:
self.icon = icon2
image = Image.open(icon2)
image = image.resize((100, 100), Image.ANTIALIAS)
image = image.convert('RGB')
photo = ImageTk.PhotoImage(image)
self.iconLbl.config(image=photo)
self.iconLbl.image = photo
else:
# remove image
self.iconLbl.config(image='')
if self.currently != currently2:
self.currently = currently2
self.currentlyLbl.config(text=currently2)
if self.forecast != forecast2:
self.forecast = forecast2
self.forecastLbl.config(text=forecast2)
if self.temperature != temperature2:
self.temperature = temperature2
self.temperatureLbl.config(text=temperature2)
if self.location != location2:
if location2 == ", ":
self.location = "Cannot Pinpoint Location"
self.locationLbl.config(text="Cannot Pinpoint Location")
else:
self.location = location2
self.locationLbl.config(text=location2)
except Exception as e:
traceback.print_exc()
print "Error: %s. Cannot get weather." % e
self.after(600000, self.get_weather)
@staticmethod
def convert_kelvin_to_fahrenheit(kelvin_temp):
return 1.8 * (kelvin_temp - 273) + 32
class News(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, *args, **kwargs)
self.config(bg='black')
self.title = 'News' # 'News' is more internationally generic
self.newsLbl = Label(self, text=self.title, font=('Helvetica', medium_text_size), fg="white", bg="black")
self.newsLbl.pack(side=TOP, anchor=W)
self.headlinesContainer = Frame(self, bg="black")
self.headlinesContainer.pack(side=TOP)
self.get_headlines()
def get_headlines(self):
try:
# remove all children
for widget in self.headlinesContainer.winfo_children():
widget.destroy()
if news_country_code == None:
headlines_url = "https://news.google.com/news?ned=us&output=rss"
else:
headlines_url = "https://news.google.com/news?ned=%s&output=rss" % news_country_code
feed = feedparser.parse(headlines_url)
for post in feed.entries[0:5]:
headline = NewsHeadline(self.headlinesContainer, post.title)
headline.pack(side=TOP, anchor=W)
except Exception as e:
traceback.print_exc()
print "Error: %s. Cannot get news." % e
self.after(600000, self.get_headlines)
class NewsHeadline(Frame):
def __init__(self, parent, event_name=""):
Frame.__init__(self, parent, bg='black')
image = Image.open("assets/Newspaper.png")
image = image.resize((25, 25), Image.ANTIALIAS)
image = image.convert('RGB')
photo = ImageTk.PhotoImage(image)
self.iconLbl = Label(self, bg='black', image=photo)
self.iconLbl.image = photo
self.iconLbl.pack(side=LEFT, anchor=N)
self.eventName = event_name
self.eventNameLbl = Label(self, text=self.eventName, font=('Helvetica', small_text_size), fg="white", bg="black")
self.eventNameLbl.pack(side=LEFT, anchor=N)
class Calendar(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, bg='black')
self.title = 'Calendar Events'
self.calendarLbl = Label(self, text=self.title, font=('Helvetica', medium_text_size), fg="white", bg="black")
self.calendarLbl.pack(side=TOP, anchor=E)
self.calendarEventContainer = Frame(self, bg='black')
self.calendarEventContainer.pack(side=TOP, anchor=E)
self.get_events()
def get_events(self):
#TODO: implement this method
# reference https://developers.google.com/google-apps/calendar/quickstart/python
# remove all children
for widget in self.calendarEventContainer.winfo_children():
widget.destroy()
calendar_event = CalendarEvent(self.calendarEventContainer)
calendar_event.pack(side=TOP, anchor=E)
pass
class CalendarEvent(Frame):
def __init__(self, parent, event_name="Event 1"):
Frame.__init__(self, parent, bg='black')
self.eventName = event_name
self.eventNameLbl = Label(self, text=self.eventName, font=('Helvetica', small_text_size), fg="white", bg="black")
self.eventNameLbl.pack(side=TOP, anchor=E)
class FullscreenWindow:
def __init__(self):
self.tk = Tk()
self.tk.configure(background='black')
self.topFrame = Frame(self.tk, background = 'black')
self.bottomFrame = Frame(self.tk, background = 'black')
self.topFrame.pack(side = TOP, fill=BOTH, expand = YES)
self.bottomFrame.pack(side = BOTTOM, fill=BOTH, expand = YES)
self.state = False
self.tk.bind("<Return>", self.toggle_fullscreen)
self.tk.bind("<Escape>", self.end_fullscreen)
# clock
self.clock = Clock(self.topFrame)
self.clock.pack(side=RIGHT, anchor=N, padx=100, pady=60)
# weather
self.weather = Weather(self.topFrame)
self.weather.pack(side=LEFT, anchor=N, padx=100, pady=60)
# news
self.news = News(self.bottomFrame)
self.news.pack(side=LEFT, anchor=S, padx=100, pady=60)
# calender - removing for now
# self.calender = Calendar(self.bottomFrame)
# self.calender.pack(side = RIGHT, anchor=S, padx=100, pady=60)
def toggle_fullscreen(self, event=None):
self.state = not self.state # Just toggling the boolean
self.tk.attributes("-fullscreen", self.state)
return "break"
def end_fullscreen(self, event=None):
self.state = False
self.tk.attributes("-fullscreen", False)
return "break"
if __name__ == '__main__':
w = FullscreenWindow()
w.tk.mainloop()
|
py | 7dffdd9dbdb4c45afce975f7da337cb645bce4e7 | escala=[
[0.000, 'rgb(235, 245, 251)'],
[0.055, 'rgb(214, 234, 248)'],
[0.111, 'rgb(174, 214, 241)'],
[0.155, 'rgb(133, 193, 233)'],
[0.225, 'rgb(93, 173, 226)'],
[0.335, 'rgb(52, 152, 219)'],
[0.555, 'rgb(46, 134, 193)'],
[0.777,'rgb(40, 116, 166)'],
[0.888, 'rgb(27, 79, 114)'],
[1.000,'rgb(33, 97, 1403)']
]
escala_tabla=[
[0,'#364AF5'],
[0.5,'#B8CCE7'],
[1,'#ffffff']
]
|
py | 7dffdde7673ca5320e8ff3fc6038145af06a6e87 | # -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
class _GetAttrMeta(type):
# https://stackoverflow.com/questions/33727217/subscriptable-objects-in-class
def __getitem__(cls, x):
return getattr(cls, x)
def __iter__(cls):
""" Getting subclasses which usually represent resolutions """
for attr in vars(cls):
if not attr.startswith("_"):
yield attr
class DatasetTreeCore(metaclass=_GetAttrMeta):
pass
|
py | 7dffdee56c9cb1012b670f58fb911297b833434f | '''GenoMEL-Bionimbus Protected Data Cloud SLURM runner'''
import argparse
import utils.workflow
def is_nat(pos):
'''Checks that a value is a natural number.'''
if int(pos) > 0:
return int(pos)
raise argparse.ArgumentTypeError('{} must be positive, non-zero'.format(pos))
def get_args():
'''Loads the parser'''
# Main parser
parser = argparse.ArgumentParser(prog='GenoMEL-Bionimbus Protected Data Cloud SLURM runner.', \
add_help=False)
# Required parser
required = parser.add_argument_group("Required input parameters")
required.add_argument('--basedir', \
required=True, \
help='Base local work directory')
required.add_argument('--project', \
required=True, \
help='Project')
required.add_argument('--batch_id', \
required=True, \
help='Batch id')
required.add_argument('--job_uuid', \
required=True, \
help='Job uuid')
required.add_argument('--input_table', \
required=True, \
help='PSQL input table')
required.add_argument('--psql_conf', \
required=True, \
help='Local PSQL config file')
required.add_argument('--gvcf_files_manifest', \
required=True, \
help='Manifest of all gvcf files')
required.add_argument('--gatk4_genotyping_thread_count', \
required=True, \
type=is_nat, \
help='Threads used for GATK4 genotyping')
required.add_argument('--number_of_chunks_for_gatk', \
required=True, \
type=is_nat, \
help='Number of chunks for GATK4 on each node')
required.add_argument('--bam_files_manifest', \
required=True, \
help='Manifest of all bam files')
required.add_argument('--freebayes_thread_count', \
required=True, \
type=is_nat, \
help='Threads used for Freebayes')
required.add_argument('--number_of_chunks_for_freebayes', \
required=True, \
type=is_nat, \
help='Number of chunks for Freebayes on each node')
required.add_argument('--upload_s3_bucket', \
required=True, \
help='S3 bucket for uploads')
required.add_argument('--cromwell_jar_path', \
required=True, \
help='Cromwell jar path')
return parser.parse_args()
if __name__ == '__main__':
# Get args
args = get_args()
# Run pipeline
utils.workflow.run_cohort_genotyping(args)
|
py | 7dffe0af140e898e023d9f401734828912553e6f |
# Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Union
from deeppavlov.core.common.registry import register
from deeppavlov.core.models.component import Component
StrTokenReverserInfo = Union[List[str], List['StrTokenReverserInfo']]
@register('str_token_reverser')
class StrTokenReverser(Component):
"""Component for converting strings to strings with reversed token positions
Args:
tokenized: The parameter is only needed to reverse tokenized strings.
"""
def __init__(self, tokenized: bool = False, *args, **kwargs) -> None:
self.tokenized = tokenized
@staticmethod
def _reverse_str(raw_string):
splitted = raw_string.split()
splitted.reverse()
string = ' '.join(splitted)
return string
@staticmethod
def _reverse_tokens(raw_tokens):
raw_tokens.reverse()
return raw_tokens
def __call__(self, batch: Union[str, list, tuple]) -> StrTokenReverserInfo:
"""Recursively search for strings in a list and convert them to strings with reversed token positions
Args:
batch: a string or a list containing strings
Returns:
the same structure where all strings tokens are reversed
"""
if isinstance(batch, (list, tuple)):
batch = batch.copy()
if self.tokenized:
if isinstance(batch, (list, tuple)):
if isinstance(batch[-1], str):
return self._reverse_tokens(batch)
else:
return [self(line) for line in batch]
raise RuntimeError(f'The objects passed to the reverser are not list or tuple! '
f' But they are {type(batch)}.'
f' If you want to passed str type directly use option tokenized = False')
else:
if isinstance(batch, (list, tuple)):
return [self(line) for line in batch]
else:
return self._reverse_str(batch)
|
py | 7dffe13b02ad8f179bf51c4f3dfcdea26b8b51a7 | # Generated by Django 2.0.6 on 2018-06-16 16:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('post', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='claps',
new_name='downvote',
),
migrations.AddField(
model_name='post',
name='upvote',
field=models.PositiveIntegerField(default=0),
),
]
|
py | 7dffe231c31a4be0d60223846b10ded3b3382b58 | import numpy as np
import cv2
import os
import caffe
from scipy.spatial.distance import cosine
image_folder = './images'
output_folder = './features'
model = './train/code/sphereface_deploy.prototxt'
weights = './train/result/sphereface_model.caffemodel'
net = caffe.Net(model, weights, caffe.TEST)
def extract_deep_feature(filename, net):
img = cv2.imread(filename)
if img is None:
return None
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = (img - 127.5)/128
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
img = transformer.preprocess('data', img)
img = np.transpose(img, [2, 0, 1])
data = np.concatenate([np.expand_dims(img, axis=0), np.expand_dims(np.flip(img, axis=0), axis=0)], axis=0)
net.blobs['data'].reshape(2, 3, 112, 96)
net.blobs['data'].data[...] = data
res = net.forward()['fc5']
feature = np.concatenate([res[0], res[1]])
return feature
def save_feature_vectors():
list_images = os.listdir(image_folder)
os.makedirs(output_folder, exist_ok=True)
for image_name in list_images:
feature_vector = extract_deep_feature(os.path.join(image_folder, image_name), net)
np.savetxt(os.path.join(output_folder, image_name.split('.')[0]), feature_vector)
def detect(feature):
list_features = os.listdir(output_folder)
scores = []
for image_name in list_features:
feature2 = np.loadtxt(os.path.join(output_folder, image_name))
score = 1 - cosine(feature,feature2)
scores.append(score)
scores = np.array(scores)
return list_features[np.argmax(scores)]
def detect_from_img(img_path):
img_feature = extract_deep_feature(img_path, net)
return detect(img_feature)
if __name__ == '__main__':
#save_feature_vectors()
print(detect_from_img('./Aaron_Peirsol_0003.jpg'))
#img_feature = extract_deep_feature('./Aaron_Peirsol_0003.jpg', net)
|
py | 7dffe23c70d0958809a77338bd1279f1061c027a | from django.urls import path, include
urlpatterns = [
path('example/', include('project.apps.example.urls', namespace='example')),
]
|
py | 7dffe251b6758c6abed55017a56dc480ea2ca298 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for the JointDistributionSequential."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import warnings
# Dependency imports
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.distributions import joint_distribution_sequential
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import test_util
from tensorflow.python.util import tf_inspect # pylint: disable=g-direct-tensorflow-import
tfb = tfp.bijectors
tfd = tfp.distributions
# Defer creating test dists (by hiding them in functions) until we know what
# execution regime (eager/graph/tf-function) the test will run under.
def basic_model_fn():
return [
tfd.Normal(0., 1., name='a'),
tfd.Independent(tfd.Exponential(rate=[100, 120]),
reinterpreted_batch_ndims=1),
lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1])
]
def nested_lists_model_fn():
return [
tfd.JointDistributionSequential([
tfd.MultivariateNormalDiag([0., 0.], [1., 1.]),
tfd.JointDistributionSequential([
tfd.StudentT(3., -2., 5.),
tfd.Exponential(4.)])], name='abc'),
lambda abc: tfd.JointDistributionSequential([ # pylint: disable=g-long-lambda
tfd.Normal(abc[0] * abc[1][0], abc[1][1]),
tfd.Normal(abc[0] + abc[1][0], abc[1][1])], name='de')
]
class Dummy(object):
"""Dummy object to ensure `tf_inspect.getfullargspec` works for `__init__`."""
# To ensure no code is keying on the unspecial name "self", we use "me".
def __init__(me, arg1, arg2, arg3=None, **named): # pylint: disable=no-self-argument
pass
@test_util.test_all_tf_execution_regimes
class JointDistributionSequentialTest(test_util.TestCase):
def test_sample_log_prob(self):
d = tfd.JointDistributionSequential(
[
tfd.Independent(tfd.Exponential(rate=[100, 120]), 1),
lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]),
tfd.Normal(loc=0, scale=2.),
tfd.Normal, # Or, `lambda loc, scale: tfd.Normal(loc, scale)`.
lambda m: tfd.Sample(tfd.Bernoulli(logits=m), 12),
],
validate_args=True)
self.assertEqual(
(
('e', ()),
('scale', ('e',)),
('loc', ()),
('m', ('loc', 'scale')),
('x', ('m',)),
),
d.resolve_graph())
xs = d.sample(seed=test_util.test_seed())
self.assertLen(xs, 5)
# We'll verify the shapes work as intended when we plumb these back into the
# respective log_probs.
ds, _ = d.sample_distributions(value=xs, seed=test_util.test_seed())
self.assertLen(ds, 5)
self.assertIsInstance(ds[0], tfd.Independent)
self.assertIsInstance(ds[1], tfd.Gamma)
self.assertIsInstance(ds[2], tfd.Normal)
self.assertIsInstance(ds[3], tfd.Normal)
self.assertIsInstance(ds[4], tfd.Sample)
# Static properties.
self.assertAllEqual(
[tf.float32, tf.float32, tf.float32, tf.float32, tf.int32],
d.dtype)
for expected, actual_tensorshape, actual_shapetensor in zip(
[[2], [], [], [], [12]],
d.event_shape,
self.evaluate(d.event_shape_tensor())):
self.assertAllEqual(expected, actual_tensorshape)
self.assertAllEqual(expected, actual_shapetensor)
for expected, actual_tensorshape, actual_shapetensor in zip(
[[], [], [], []],
d.batch_shape,
self.evaluate(d.batch_shape_tensor())):
self.assertAllEqual(expected, actual_tensorshape)
self.assertAllEqual(expected, actual_shapetensor)
expected_jlp = sum(d_.log_prob(x) for d_, x in zip(ds, xs))
actual_jlp = d.log_prob(xs)
self.assertAllEqual(*self.evaluate([expected_jlp, actual_jlp]))
def test_kl_divergence(self):
d0 = tfd.JointDistributionSequential(
[
tfd.Independent(tfd.Exponential(rate=[100, 120]), 1),
tfd.Normal(loc=0, scale=2.),
],
validate_args=True)
d1 = tfd.JointDistributionSequential(
[
tfd.Independent(tfd.Exponential(rate=[10, 12]), 1),
tfd.Normal(loc=1, scale=1.),
],
validate_args=True)
expected_kl = sum(tfd.kl_divergence(d0_, d1_) for d0_, d1_
in zip(d0.model, d1.model))
actual_kl = tfd.kl_divergence(d0, d1)
other_actual_kl = d0.kl_divergence(d1)
expected_kl_, actual_kl_, other_actual_kl_ = self.evaluate([
expected_kl, actual_kl, other_actual_kl])
self.assertNear(expected_kl_, actual_kl_, err=1e-5)
self.assertNear(expected_kl_, other_actual_kl_, err=1e-5)
def test_cross_entropy(self):
d0 = tfd.JointDistributionSequential(
[
tfd.Independent(tfd.Exponential(rate=[100, 120]), 1),
tfd.Normal(loc=0, scale=2.),
],
validate_args=True)
d1 = tfd.JointDistributionSequential(
[
tfd.Independent(tfd.Exponential(rate=[10, 12]), 1),
tfd.Normal(loc=1, scale=1.),
],
validate_args=True)
expected_xent = sum(
d0_.cross_entropy(d1_) for d0_, d1_
in zip(d0.model, d1.model))
actual_xent = d0.cross_entropy(d1)
expected_xent_, actual_xent_ = self.evaluate([expected_xent, actual_xent])
self.assertNear(actual_xent_, expected_xent_, err=1e-5)
def test_norequired_args_maker(self):
"""Test that only non-default args are passed through."""
with self.assertRaisesWithPredicateMatch(
ValueError, 'Must pass probs or logits, but not both.'):
tfd.JointDistributionSequential([tfd.Normal(0., 1.), tfd.Bernoulli])
def test_graph_resolution(self):
d = tfd.JointDistributionSequential(
[
tfd.Independent(tfd.Exponential(rate=[100, 120]), 1),
lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]),
tfd.HalfNormal(2.5),
lambda s: tfd.Normal(loc=0, scale=s),
tfd.Exponential(2),
lambda df, loc, _, scale: tfd.StudentT(df, loc, scale),
],
validate_args=True)
self.assertEqual(
(('e', ()),
('scale', ('e',)),
('s', ()),
('loc', ('s',)),
('df', ()),
('x', ('df', 'loc', '_', 'scale'))),
d.resolve_graph())
@parameterized.parameters('mean', 'mode', 'stddev', 'variance')
def test_summary_statistic(self, attr):
d = tfd.JointDistributionSequential(
[tfd.Normal(0., 1.), tfd.Bernoulli(logits=0.)],
validate_args=True)
expected = tuple(getattr(d_, attr)() for d_ in d.model)
actual = getattr(d, attr)()
self.assertAllEqual(*self.evaluate([expected, actual]))
@parameterized.parameters(('covariance',))
def test_notimplemented_summary_statistic(self, attr):
d = tfd.JointDistributionSequential([tfd.Normal(0., 1.),
tfd.Bernoulli(probs=0.5)],
validate_args=True)
with self.assertRaisesWithPredicateMatch(
NotImplementedError,
attr + ' is not implemented: JointDistributionSequential'):
getattr(d, attr)()
@parameterized.parameters(
'log_cdf', 'cdf', 'log_survival_function', 'survival_function')
def test_notimplemented_evaluative_statistic(self, attr):
d = tfd.JointDistributionSequential([tfd.Normal(0., 1.),
tfd.Bernoulli(probs=0.5)],
validate_args=True)
with self.assertRaisesWithPredicateMatch(
NotImplementedError,
attr + ' is not implemented: JointDistributionSequential'):
getattr(d, attr)([0.]*len(d.model))
def test_notimplemented_quantile(self):
d = tfd.JointDistributionSequential([tfd.Normal(0., 1.),
tfd.Bernoulli(probs=0.5)],
validate_args=True)
with self.assertRaisesWithPredicateMatch(
NotImplementedError,
'quantile is not implemented: JointDistributionSequential'):
d.quantile(0.5)
def test_copy(self):
pgm = [tfd.Normal(0., 1.), tfd.Bernoulli(probs=0.5)]
d = tfd.JointDistributionSequential(pgm, validate_args=True)
d_copy = d.copy()
self.assertAllEqual(
{'model': pgm,
'validate_args': True,
'name': None},
d_copy.parameters)
def test_batch_slicing(self):
d = tfd.JointDistributionSequential(
[
tfd.Exponential(rate=[10, 12, 14]),
lambda s: tfd.Normal(loc=0, scale=s),
lambda: tfd.Beta(concentration0=[3, 2, 1], concentration1=1),
],
validate_args=True)
d0, d1 = d[:1], d[1:]
x0 = d0.sample(seed=test_util.test_seed())
x1 = d1.sample(seed=test_util.test_seed())
self.assertLen(x0, 3)
self.assertEqual([1], x0[0].shape)
self.assertEqual([1], x0[1].shape)
self.assertEqual([1], x0[2].shape)
self.assertLen(x1, 3)
self.assertEqual([2], x1[0].shape)
self.assertEqual([2], x1[1].shape)
self.assertEqual([2], x1[2].shape)
def test_sample_shape_propagation_default_behavior(self):
d = tfd.JointDistributionSequential(
[
tfd.Independent(tfd.Exponential(rate=[100, 120]), 1),
lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]),
tfd.HalfNormal(2.5),
lambda s: tfd.Normal(loc=0, scale=s),
tfd.Exponential(2),
lambda df, loc, _, scale: tfd.StudentT(df, loc, scale),
],
validate_args=True)
x = d.sample([2, 3], seed=test_util.test_seed())
self.assertLen(x, 6)
self.assertEqual((2, 3, 2), x[0].shape)
self.assertEqual((2, 3), x[1].shape)
self.assertEqual((2, 3), x[2].shape)
self.assertEqual((2, 3), x[3].shape)
self.assertEqual((2, 3), x[4].shape)
self.assertEqual((2, 3), x[5].shape)
lp = d.log_prob(x)
self.assertEqual((2, 3), lp.shape)
def test_sample_shape_propagation_nondefault_behavior(self):
d = tfd.JointDistributionSequential(
[
tfd.Independent(tfd.Exponential(rate=[100, 120]), 1), # 0
lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]), # 1
tfd.HalfNormal(2.5), # 2
lambda s: tfd.Normal(loc=0, scale=s), # 3
tfd.Exponential(2), # 4
lambda df, loc, _, scale: tfd.StudentT(df, loc, scale), # 5
],
validate_args=False) # So log_prob doesn't complain.
# The following enables the nondefault sample shape behavior.
d._always_use_specified_sample_shape = True
sample_shape = (2, 3)
x = d.sample(sample_shape, seed=test_util.test_seed())
self.assertLen(x, 6)
self.assertEqual(sample_shape + (2,), x[0].shape)
self.assertEqual(sample_shape * 2, x[1].shape) # Has 1 arg.
self.assertEqual(sample_shape * 1, x[2].shape) # Has 0 args.
self.assertEqual(sample_shape * 2, x[3].shape) # Has 1 arg.
self.assertEqual(sample_shape * 1, x[4].shape) # Has 0 args.
# Has 3 args, one being scalar.
self.assertEqual(sample_shape * 3, x[5].shape)
lp = d.log_prob(x)
self.assertEqual(sample_shape * 3, lp.shape)
def test_argspec(self):
argspec = tf_inspect.getfullargspec(Dummy)
self.assertAllEqual(['me', 'arg1', 'arg2', 'arg3'], argspec.args)
self.assertIs(None, argspec.varargs)
self.assertIs('named', argspec.varkw)
self.assertAllEqual((None,), argspec.defaults)
def test_invalid_structure_raises_error(self):
with self.assertRaisesWithPredicateMatch(
TypeError, 'Unable to unflatten like `model` with type "model".'):
tfd.JointDistributionSequential(
collections.namedtuple('model',
'a b')(a=tfd.Normal(0, 1), b=tfd.Normal(1, 2)),
validate_args=True)
def test_simple_example_with_dynamic_shapes(self):
dist = tfd.JointDistributionSequential([
tfd.Normal(tf1.placeholder_with_default(0., shape=None),
tf1.placeholder_with_default(1., shape=None)),
lambda a: tfd.Normal(a, 1.)], validate_args=True)
lp = dist.log_prob(dist.sample(5, seed=test_util.test_seed()))
self.assertAllEqual(self.evaluate(lp).shape, [5])
def test_dist_fn_takes_varargs(self):
dist = tfd.JointDistributionSequential(
[
tfb.Scale(-1.)(tfd.Exponential(1.)), # Negative.
lambda *args: tfd.Exponential(tf.exp(args[0])), # Positive.
lambda *args: tfd.Normal(loc=args[1], # pylint: disable=g-long-lambda
scale=args[0], # Must be positive.
validate_args=True)
], validate_args=True)
lp = dist.log_prob(dist.sample(5, seed=test_util.test_seed()))
self.assertAllEqual(lp.shape, [5])
@parameterized.named_parameters(
('basic', basic_model_fn),
('nested_lists', nested_lists_model_fn))
def test_can_call_log_prob_with_args_and_kwargs(self, model_fn):
d = tfd.JointDistributionSequential(
model_fn(), validate_args=True)
# Destructure vector-valued Tensors into Python lists, to mimic the values
# a user might type.
def _convert_ndarray_to_list(x):
if isinstance(x, np.ndarray) and x.ndim > 0:
return list(x)
return x
value = tf.nest.map_structure(
_convert_ndarray_to_list,
self.evaluate(d.sample(seed=test_util.test_seed())))
value_with_names = list(zip(d._flat_resolve_names(), value))
lp_value_positional = self.evaluate(d.log_prob(value))
lp_value_named = self.evaluate(d.log_prob(value=value))
self.assertAllEqual(lp_value_positional, lp_value_named)
lp_args = self.evaluate(d.log_prob(*value))
self.assertAllEqual(lp_value_positional, lp_args)
lp_kwargs = self.evaluate(d.log_prob(**dict(value_with_names)))
self.assertAllEqual(lp_value_positional, lp_kwargs)
lp_args_then_kwargs = self.evaluate(d.log_prob(
*value[:1], **dict(value_with_names[1:])))
self.assertAllEqual(lp_value_positional, lp_args_then_kwargs)
with self.assertRaisesRegexp(
ValueError, r'Joint distribution expected values for [0-9] components'):
d.log_prob(badvar=27.)
with self.assertRaisesRegexp(TypeError, 'unexpected keyword argument'):
d.log_prob(*value, extra_arg=27.)
def test_can_call_prob_with_args_and_kwargs(self):
d = tfd.JointDistributionSequential(basic_model_fn(), validate_args=True)
a, e, x = self.evaluate(d.sample([2, 3], seed=test_util.test_seed()))
prob_value_positional = self.evaluate(d.prob([a, e, x]))
prob_value_named = self.evaluate(d.prob(value=[a, e, x]))
self.assertAllEqual(prob_value_positional, prob_value_named)
prob_args = self.evaluate(d.prob(a, e, x))
self.assertAllEqual(prob_value_positional, prob_args)
prob_kwargs = self.evaluate(d.prob(a=a, e=e, x=x))
self.assertAllEqual(prob_value_positional, prob_kwargs)
prob_args_then_kwargs = self.evaluate(d.prob(a, e=e, x=x))
self.assertAllEqual(prob_value_positional, prob_args_then_kwargs)
def test_uses_structure_to_convert_nested_lists(self):
joint = tfd.JointDistributionSequential([
tfd.MultivariateNormalDiag([0., 0.], [1., 1.]),
lambda a: tfd.JointDistributionSequential([ # pylint: disable=g-long-lambda
tfd.JointDistributionSequential([
tfd.Normal(a[..., 0], 1.)]),
tfd.Normal(a[..., 1], 1.)])
])
x = [tf.convert_to_tensor([4., 2.]), [[1.], 3.]]
x_with_tensor_as_list = [[4., 2.], [[1.], 3.]]
lp = self.evaluate(joint.log_prob(x))
lp_with_tensor_as_list = self.evaluate(
joint.log_prob(x_with_tensor_as_list))
self.assertAllClose(lp, lp_with_tensor_as_list, rtol=3e-7, atol=5e-7)
def test_matrix_factorization(self):
# A matrix factorization model based on
# Probabilistic Matrix Factorization by
# Ruslan Salakhutdinov and Andriy Mnih
# https://papers.nips.cc/paper/3208-probabilistic-matrix-factorization.pdf
#
# users
# +-----3-+
# | U |
# | | +-------+
# | | | | |
# | +-->R<--+ |
# | | | | |
# +---|---+ | |
# | V |
# +-5-----+
# items
n_users = 3
n_items = 5
n_factors = 2
user_trait_prior_scale = 10.
item_trait_prior_scale = 10.
observation_noise_prior_scale = 1.
dist = tfd.JointDistributionSequential([
tfd.Sample(tfd.Normal(loc=0.,
scale=user_trait_prior_scale),
sample_shape=[n_factors, n_users]), # U
tfd.Sample(tfd.Normal(loc=0.,
scale=item_trait_prior_scale),
sample_shape=[n_factors, n_items]), # V
lambda item_traits, user_traits: tfd.Independent( # pylint: disable=g-long-lambda
tfd.Normal(loc=tf.matmul(user_traits, item_traits, # R
adjoint_a=True),
scale=observation_noise_prior_scale),
reinterpreted_batch_ndims=2)], validate_args=True)
self.assertAllEqual(dist.event_shape, [[2, 3], [2, 5], [3, 5]])
z = dist.sample(seed=test_util.test_seed())
self.assertAllEqual(tf.shape(z[0]), [2, 3])
self.assertAllEqual(tf.shape(z[1]), [2, 5])
self.assertAllEqual(tf.shape(z[2]), [3, 5])
lp = dist.log_prob(z)
self.assertEqual(lp.shape, [])
z = dist.sample((7, 9), seed=test_util.test_seed())
self.assertAllEqual(tf.shape(z[0]), [7, 9, 2, 3])
self.assertAllEqual(tf.shape(z[1]), [7, 9, 2, 5])
self.assertAllEqual(tf.shape(z[2]), [7, 9, 3, 5])
lp = dist.log_prob(z)
self.assertEqual(lp.shape, [7, 9])
@test_util.jax_disable_variable_test
def test_latent_dirichlet_allocation(self):
"""Tests Latent Dirichlet Allocation joint model.
The LDA generative process can be written as:
```none
N[i] ~ Poisson(xi)
theta[i] ~ Dirichlet(alpha)
Z[i] ~ Multinomial(N[i], theta[i])
for k in 1...K:
X[i,k] ~ Multinomial(Z[i, k], beta[j])
```
Typically `xi` is specified and `alpha`, `beta` are fit using type-II
maximum likelihood estimators.
Reference: http://www.jmlr.org/papers/volume3/blei03a/blei03a.pdf
"""
# Hyperparameters.
num_topics = 3
num_words = 10
avg_doc_length = 5
u = tfd.Uniform(low=-1., high=1.)
alpha = tfp.util.TransformedVariable(
u.sample([num_topics], seed=test_util.test_seed()),
tfb.Softplus(), name='alpha')
beta = tf.Variable(u.sample([num_topics, num_words],
seed=test_util.test_seed()), name='beta')
# LDA Model.
# Note near 1:1 with mathematical specification. The main distinction is the
# use of Independent--this lets us easily aggregate multinomials across
# topics (and in any "shape" of documents).
lda = tfd.JointDistributionSequential(
[
tfd.Poisson(rate=avg_doc_length), # n
tfd.Dirichlet(concentration=alpha), # theta
lambda theta, n: tfd.Multinomial(total_count=n, probs=theta), # z
lambda z: tfd.Independent( # x pylint: disable=g-long-lambda
tfd.Multinomial(total_count=z, logits=beta),
reinterpreted_batch_ndims=1),
],
validate_args=True)
# Now, let's sample some "documents" and compute the log-prob of each.
docs_shape = [2, 4] # That is, 8 docs in the shape of [2, 4].
[n, theta, z, x] = lda.sample(docs_shape, seed=test_util.test_seed())
log_probs = lda.log_prob([n, theta, z, x])
self.assertEqual(docs_shape, log_probs.shape)
# Verify we correctly track trainable variables.
self.assertLen(lda.trainable_variables, 2)
self.assertIs(alpha.pretransformed_input, lda.trainable_variables[0])
self.assertIs(beta, lda.trainable_variables[1])
# Ensure we can compute gradients.
with tf.GradientTape() as tape:
# Note: The samples are not taped, hence implicitly "stop_gradient."
negloglik = -lda.log_prob([n, theta, z, x])
grads = tape.gradient(negloglik, lda.trainable_variables)
self.assertLen(grads, 2)
self.assertAllEqual((alpha.pretransformed_input.shape, beta.shape),
(grads[0].shape, grads[1].shape))
self.assertAllNotNone(grads)
def test_poisson_switchover_graphical_model(self):
# Build a pretend dataset.
seed = test_util.test_seed_stream(salt='poisson')
n = [43, 31]
count_data = tf.cast(
tf.concat([
tfd.Poisson(rate=15.).sample(n[0], seed=seed()),
tfd.Poisson(rate=25.).sample(n[1], seed=seed()),
], axis=0),
dtype=tf.float32)
count_data = self.evaluate(count_data)
n = np.sum(n)
# Make model.
gather = lambda tau, lambda_: tf.gather( # pylint: disable=g-long-lambda
lambda_,
indices=tf.cast(
tau[..., tf.newaxis] < tf.linspace(0., 1., n),
dtype=tf.int32),
# TODO(b/139204153): Remove static value hack after bug closed.
batch_dims=int(tf.get_static_value(tf.rank(tau))))
alpha = tf.math.reciprocal(tf.reduce_mean(count_data))
joint = tfd.JointDistributionSequential([
tfd.Sample(tfd.Exponential(rate=alpha),
sample_shape=[2]),
tfd.Uniform(),
lambda tau, lambda_: tfd.Independent( # pylint: disable=g-long-lambda
tfd.Poisson(rate=gather(tau, lambda_)),
reinterpreted_batch_ndims=1),
], validate_args=True)
# Verify model correctly "compiles".
batch_shape = [3, 4]
self.assertEqual(
batch_shape,
joint.log_prob(
joint.sample(batch_shape, seed=test_util.test_seed())).shape)
def test_default_event_space_bijector(self):
# Define dist parameters that also parameterize the event space
# bijector outside of the distribution constructor to ensure that
# bijector caching works.
low = tf.constant([0., 0.], dtype=tf.float32)
dist_fns = [
tfd.LogNormal(0., 1., validate_args=True),
lambda h: tfd.Independent(tfd.Uniform(low, h, validate_args=True)),
lambda s: tfd.Normal(0., s, validate_args=True)
]
jd = tfd.JointDistributionSequential(dist_fns, validate_args=True)
joint_bijector = jd.experimental_default_event_space_bijector()
# define a sample in the unconstrained space and construct the component
# distributions
x = [tf.constant(w) for w in [-0.2, [0.3, 0.1], -1.]]
bijectors = []
y = []
b = dist_fns[0].experimental_default_event_space_bijector()
bijectors.append(b)
y.append(b(x[0]))
for i in range(1, 3):
b = dist_fns[i](y[i - 1]).experimental_default_event_space_bijector()
y.append(b(x[i]))
bijectors.append(b)
# Test forward and inverse values.
self.assertAllClose(joint_bijector.forward(x), y)
self.assertAllClose(joint_bijector.inverse(y), x)
# Test forward log det Jacobian via finite differences.
event_ndims = [0, 1, 0]
delta = 0.01
fldj = joint_bijector.forward_log_det_jacobian(x, event_ndims)
forward_plus = [b.forward(x[i] + delta) for i, b in enumerate(bijectors)]
forward_minus = [b.forward(x[i] - delta) for i, b in enumerate(bijectors)]
fldj_fd = tf.reduce_sum(
[tf.reduce_sum(tf.math.log((p - m) / (2. * delta)))
for p, m in zip(forward_plus, forward_minus)])
self.assertAllClose(self.evaluate(fldj), self.evaluate(fldj_fd), rtol=1e-5)
# Test inverse log det Jacobian via finite differences.
delta = 0.001
y = [tf.constant(w) for w in [0.8, [0.4, 0.3], -0.05]]
ildj = joint_bijector.inverse_log_det_jacobian(y, event_ndims)
bijectors = []
bijectors.append(dist_fns[0].experimental_default_event_space_bijector())
for i in range(1, 3):
bijectors.append(
dist_fns[i](y[i - 1]).experimental_default_event_space_bijector())
inverse_plus = [b.inverse(y[i] + delta) for i, b in enumerate(bijectors)]
inverse_minus = [b.inverse(y[i] - delta) for i, b in enumerate(bijectors)]
ildj_fd = tf.reduce_sum(
[tf.reduce_sum(tf.math.log((p - m) / (2. * delta)))
for p, m in zip(inverse_plus, inverse_minus)])
self.assertAllClose(self.evaluate(ildj), self.evaluate(ildj_fd), rtol=1e-4)
# test event shapes
event_shapes = [[2, None], [2], [4]]
self.assertAllEqual(
[shape.as_list()
for shape in joint_bijector.forward_event_shape(event_shapes)],
[bijectors[i].forward_event_shape(event_shapes[i]).as_list()
for i in range(3)])
self.assertAllEqual(
[shape.as_list()
for shape in joint_bijector.inverse_event_shape(event_shapes)],
[bijectors[i].inverse_event_shape(event_shapes[i]).as_list()
for i in range(3)])
event_shapes = [[3], [3, 2], []]
forward_joint_event_shape = joint_bijector.forward_event_shape_tensor(
event_shapes)
inverse_joint_event_shape = joint_bijector.inverse_event_shape_tensor(
event_shapes)
for i in range(3):
self.assertAllEqual(
self.evaluate(forward_joint_event_shape[i]),
self.evaluate(
bijectors[i].forward_event_shape_tensor(event_shapes[i])))
self.assertAllEqual(
self.evaluate(inverse_joint_event_shape[i]),
self.evaluate(
bijectors[i].inverse_event_shape_tensor(event_shapes[i])))
# test shared cache
joint_bijector_2 = jd.experimental_default_event_space_bijector()
y_1 = joint_bijector.forward(x)
y_2 = joint_bijector_2.forward(x)
for a, b in zip(y_1, y_2):
self.assertIs(a, b)
x_1 = joint_bijector.inverse(y_1)
x_2 = joint_bijector_2.inverse(y_1)
for a, b in zip(x_1, x_2):
self.assertIs(a, b)
def test_sample_kwargs(self):
joint = tfd.JointDistributionSequential([
tfd.Normal(0., 1.),
lambda a: tfd.Normal(a, 1.),
lambda b, a: tfd.Normal(a + b, 1.)
])
seed = test_util.test_seed()
tf.random.set_seed(seed)
samples = joint.sample(seed=seed, a=1.)
# Check the first value is actually 1.
self.assertEqual(1., self.evaluate(samples[0]))
# Check the sample is reproducible using the `value` argument.
tf.random.set_seed(seed)
samples_tuple = joint.sample(seed=seed, value=[1., None, None])
self.assertAllEqual(self.evaluate(samples), self.evaluate(samples_tuple))
# Make sure to throw an exception if strange keywords are passed.
expected_error = (
'Found unexpected keyword arguments. Distribution names are\n'
'a, b, x\n'
'but received\n'
'z\n'
'These names were invalid:\n'
'z')
with self.assertRaisesRegex(ValueError, expected_error):
joint.sample(seed=seed, z=2.)
# Also raise if value and keywords are passed
with self.assertRaisesRegex(
ValueError, r'Supplied both `value` and keyword arguments .*'):
joint.sample(seed=seed, a=1., value=[1., None, None])
def test_creates_valid_coroutine(self):
joint = tfd.JointDistributionSequential(
[
tfd.Poisson(rate=100.),
tfd.Dirichlet(concentration=[1., 1.]),
lambda theta, n: tfd.Multinomial(total_count=n, probs=theta),
lambda z: tfd.Independent( # pylint: disable=g-long-lambda
tfd.Multinomial(total_count=z, logits=[[0., 1., 2.],
[3., 4., 5.]]),
reinterpreted_batch_ndims=1),
],
validate_args=True)
sample_shapes = [
x.shape for x in joint._model_flatten(
joint.sample([5], seed=test_util.test_seed()))]
jdc = tfd.JointDistributionCoroutine(joint._model_coroutine)
jdc_sample_shapes = [
x.shape for x in jdc._model_flatten(
jdc.sample([5], seed=test_util.test_seed()))]
self.assertAllEqualNested(sample_shapes, jdc_sample_shapes)
class ResolveDistributionNamesTest(test_util.TestCase):
def test_dummy_names_are_unique(self):
dist_names = joint_distribution_sequential._resolve_distribution_names(
dist_fn_args=[None, None, None],
dist_names=None,
leaf_name='x',
instance_names=[None, None, None])
self.assertAllEqual(dist_names, ['x2', 'x1', 'x'])
dist_names = joint_distribution_sequential._resolve_distribution_names(
dist_fn_args=[None, None, None],
dist_names=None,
leaf_name='x',
instance_names=['x', 'x1', None])
self.assertAllEqual(dist_names, ['x', 'x1', 'x2'])
def test_ignores_trivial_names(self):
# Should ignore a trivial reference downstream of the real name `z`.
dist_names = joint_distribution_sequential._resolve_distribution_names(
dist_fn_args=[None, ['z'], ['w', '_']],
dist_names=None,
leaf_name='y',
instance_names=[None, None, None])
self.assertAllEqual(dist_names, ['z', 'w', 'y'])
# Trivial reference upstream of the real name `z`.
dist_names = joint_distribution_sequential._resolve_distribution_names(
dist_fn_args=[None, ['_'], ['w', 'z']],
dist_names=None,
leaf_name='y',
instance_names=[None, None, None])
self.assertAllEqual(dist_names, ['z', 'w', 'y'])
# The only direct reference is trivial, but we have an instance name.
dist_names = joint_distribution_sequential._resolve_distribution_names(
dist_fn_args=[None, ['_']],
dist_names=None,
leaf_name='y',
instance_names=['z', None])
self.assertAllEqual(dist_names, ['z', 'y'])
def test_inconsistent_names_raise_error(self):
with self.assertRaisesRegexp(ValueError, 'Inconsistent names'):
# Refers to first variable as both `z` and `x`.
joint_distribution_sequential._resolve_distribution_names(
dist_fn_args=[None, ['z'], ['x', 'w']],
dist_names=None,
leaf_name='y',
instance_names=[None, None, None])
with self.assertRaisesRegexp(ValueError, 'Inconsistent names'):
# Refers to first variable as `x`, but it was explicitly named `z`.
joint_distribution_sequential._resolve_distribution_names(
dist_fn_args=[None, ['x']],
dist_names=None,
leaf_name='y',
instance_names=['z', None])
@test_util.jax_disable_test_missing_functionality('stateful samplers')
@test_util.numpy_disable_test_missing_functionality('stateful samplers')
def test_legacy_dists(self):
class StatefulNormal(tfd.Normal):
def _sample_n(self, n, seed=None):
return self.loc + self.scale * tf.random.normal(
tf.concat([[n], self.batch_shape_tensor()], axis=0),
seed=seed)
d = tfd.JointDistributionSequential(
[
tfd.Independent(tfd.Exponential(rate=[100, 120]), 1),
lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]),
StatefulNormal(loc=0, scale=2.),
tfd.Normal, # Or, `lambda loc, scale: tfd.Normal(loc, scale)`.
lambda m: tfd.Sample(tfd.Bernoulli(logits=m), 12),
],
validate_args=True)
warnings.simplefilter('always')
with warnings.catch_warnings(record=True) as w:
d.sample(seed=test_util.test_seed())
self.assertRegexpMatches(
str(w[0].message),
r'Falling back to stateful sampling for distribution #2.*of type.*'
r'StatefulNormal.*component name "loc" and `dist.name` "Normal"',
msg=w)
@test_util.jax_disable_test_missing_functionality('stateful samplers')
@test_util.numpy_disable_test_missing_functionality('stateful samplers')
def test_legacy_dists_stateless_seed_raises(self):
class StatefulNormal(tfd.Normal):
def _sample_n(self, n, seed=None):
return self.loc + self.scale * tf.random.normal(
tf.concat([[n], self.batch_shape_tensor()], axis=0),
seed=seed)
d = tfd.JointDistributionSequential(
[
tfd.Independent(tfd.Exponential(rate=[100, 120]), 1),
lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]),
StatefulNormal(loc=0, scale=2.),
tfd.Normal, # Or, `lambda loc, scale: tfd.Normal(loc, scale)`.
lambda m: tfd.Sample(tfd.Bernoulli(logits=m), 12),
],
validate_args=True)
with self.assertRaisesRegexp(TypeError, r'Expected int for argument'):
d.sample(seed=samplers.zeros_seed())
if __name__ == '__main__':
tf.test.main()
|
py | 7dffe275425da6bf191fa16ff917f952c783dcf5 | from jjcli import *
import re
import unidecode
def load_names(filename: str) -> [str]:
pass
def load_names_and_emails(filename: str) -> [(str, str)]:
with open(filename, 'r', encoding='utf-8') as f:
#nomes_file = unidecode.unidecode(f.read())
result = []
for linha in f:
linha = linha.strip()
if search(r'::', linha):
nome,email = split(r'\s*::\s*', linha)
result.append((nome,email))
return result
def avalia1(solucao: [(str,str)], calc) -> float:
nomes = [x for x,y in solucao]
soma = 0
for n,e in solucao:
r = calc(e, nomes)
if n in r:
i = r.index(n)
a = 1/(i+1)
soma += a
return soma / len(solucao)
def calcular_nome1(email: str, nomes: [str]) -> [str]:
stripped_email = sub("[^a-zA-Z]+", "", email)
re_e_final = sub(r"(\w)", r"\1(.* |)", stripped_email)
#print(re_e_final)
resultados_email = []
for nome in nomes:
nome_aux = unidecode.unidecode(nome)
if re.search(rf'{re_e_final}', nome_aux, flags=re.IGNORECASE):
resultados_email.append(nome)
return resultados_email
def calcular_nome2(email: str, nomes: [str]) -> [(str,float)]:
pass
def calcular_nome3(email: str, nomes: [str]) -> str:
pass
def main():
s = load_names_and_emails('nomes.txt')
nomes = [x for x,y in s]
#print(s)
e = calcular_nome1('jno', nomes)
#print(e)
a = avalia1( s , calcular_nome1)
print(a)
main()
|
py | 7dffe30e613eb0145a116d406ad6c9d973c47047 | '''
New Integration Test for Simple VM start scheduler create 1000.
@author: quarkonics
'''
import os
import time
import sys
import threading
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.scheduler_operations as schd_ops
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
test_stub = test_lib.lib_get_test_stub()
vm = None
schd_jobs = []
schd_triggers = []
def create_start_vm_scheduler(vm_uuid, start_date, ops_id):
global schd_jobs
global schd_triggers
schd_job = schd_ops.create_scheduler_job('simple_start_vm_scheduler_%s' % (ops_id), 'simple_stop_vm_scheduler', vm_uuid, 'startVm', None)
schd_trigger = schd_ops.create_scheduler_trigger('simple_stop_vm_scheduler', start_date+100+ops_id, None, 1000, 'simple')
schd_ops.add_scheduler_job_to_trigger(schd_trigger.uuid, schd_job.uuid)
schd_jobs.append(schd_job)
schd_triggers.append(schd_trigger)
#schds.append(vm_ops.start_vm_scheduler(vm_uuid, 'simple', 'simple_start_vm_scheduler_%s' % (ops_id), start_date+100+ops_id, 1000))
def delete_scheduler_job(schd_job_uuid):
schd_ops.del_scheduler_job(schd_job_uuid)
def delete_scheduler_trigger(schd_trigger_uuid):
schd_ops.del_scheduler_trigger(schd_trigger_uuid)
def test():
global vm
global schds
vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1'))
start_date = int(time.time())
test_util.test_logger('Setup start VM scheduler')
for ops_id in range(1000):
thread = threading.Thread(target=create_start_vm_scheduler, args=(vm.get_vm().uuid, start_date, ops_id, ))
while threading.active_count() > 10:
time.sleep(0.5)
exc = sys.exc_info()
thread.start()
while threading.activeCount() > 1:
exc = sys.exc_info()
time.sleep(0.1)
test_stub.sleep_util(start_date+200)
start_msg_mismatch = 0
for i in range(0, 100):
if not test_lib.lib_find_in_local_management_server_log(start_date+100+i, '[msg send]: {"org.zstack.header.vm.StartVmInstanceMsg', vm.get_vm().uuid):
start_msg_mismatch += 1
test_util.test_warn('StartVmInstanceMsg is expected to execute at %s' % (start_date+100+i))
if start_msg_mismatch > 5:
test_util.test_fail('%s of 58 StartVmInstanceMsg not executed at expected timestamp' % (start_msg_mismatch))
for schd_job in schd_jobs:
thread = threading.Thread(target=delete_scheduler_job, args=(schd_job.uuid, ))
while threading.active_count() > 10:
time.sleep(0.5)
exc = sys.exc_info()
thread.start()
while threading.activeCount() > 1:
exc = sys.exc_info()
time.sleep(0.1)
for schd_trigger in schd_triggers:
thread = threading.Thread(target=delete_scheduler_trigger, args=(schd_trigger.uuid, ))
while threading.active_count() > 10:
time.sleep(0.5)
exc = sys.exc_info()
thread.start()
while threading.activeCount() > 1:
exc = sys.exc_info()
time.sleep(0.1)
try:
vm.destroy()
except:
test_util.test_logger('expected exception when destroy VM since too many queued task')
test_util.test_pass('Create 1000 Simple VM Start Scheduler Success')
#Will be called only if exception happens in test().
def error_cleanup():
global vm
global schd_jobs
global schd_triggers
for schd_job in schd_jobs:
thread = threading.Thread(target=delete_scheduler_job, args=(schd_job.uuid, ))
while threading.active_count() > 10:
time.sleep(0.5)
exc = sys.exc_info()
thread.start()
while threading.activeCount() > 1:
exc = sys.exc_info()
time.sleep(0.1)
for schd_trigger in schd_triggers:
thread = threading.Thread(target=delete_scheduler_trigger, args=(schd_trigger.uuid, ))
while threading.active_count() > 10:
time.sleep(0.5)
exc = sys.exc_info()
thread.start()
while threading.activeCount() > 1:
exc = sys.exc_info()
time.sleep(0.1)
if vm:
try:
vm.destroy()
except:
test_util.test_logger('expected exception when destroy VM since too many queued task')
|
py | 7dffe369f6b207b347a62a3651c3fbf3c0c3fcd0 | # Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Timesketch API search object."""
import datetime
import json
import logging
import pandas
from . import error
from . import resource
from . import searchtemplate
logger = logging.getLogger('timesketch_api.search')
class Chip:
"""Class definition for a query filter chip."""
# The type of a chip that is defined.
CHIP_TYPE = ''
# The chip value defines what property or attribute of the
# chip class will be used to generate the chip value.
CHIP_VALUE = ''
# The value of the chip field.
CHIP_FIELD = ''
def __init__(self):
"""Initialize the chip."""
self._active = True
self._operator = 'must'
self._chip_field = self.CHIP_FIELD
@property
def active(self):
"""A property that returns whether the chip is active or not."""
return self._active
@active.setter
def active(self, active):
"""Decide whether the chip is active or disabled."""
self._active = bool(active)
@property
def chip(self):
"""A property that returns the chip value."""
return {
'field': self._chip_field,
'type': self.CHIP_TYPE,
'operator': self._operator,
'active': self._active,
'value': getattr(self, self.CHIP_VALUE, ''),
}
def from_dict(self, chip_dict):
"""Configure the chip from a dictionary."""
raise NotImplementedError
def set_include(self):
"""Configure the chip so the content needs to be included in results."""
self._operator = 'must'
def set_exclude(self):
"""Configure the chip so content needs to be excluded in results."""
self._operator = 'must_not'
def set_optional(self):
"""Configure the chip so the content is optional in results."""
self._operator = 'should'
def set_active(self):
"""Set the chip as active."""
self._active = True
def set_disable(self):
"""Disable the chip."""
self._active = False
class DateIntervalChip(Chip):
"""A date interval chip."""
CHIP_TYPE = 'datetime_interval'
CHIP_VALUE = 'interval'
_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S'
def __init__(self):
"""Initialize the chip."""
super().__init__()
self._date = None
self._before = 5
self._after = 5
self._unit = 'm'
def add_interval(self, before, after=None, unit='m'):
"""Set the interval of the chip.
Args:
before (int): the number of units that should be included
before the date.
after (int): optional number of units after the date. If not
provided the value of before is used.
unit (str): optional string with the unit of interval. This can
be s for seconds, m for minutes, d for days and h for hours.
The default value is m (minutes).
Raises:
ValueError if the unit is not correctly formed.
"""
if after is None:
after = before
self.unit = unit
self._before = before
self._after = after
@property
def after(self):
"""Property that returns the time interval after the date."""
return self._after
@after.setter
def after(self, after):
"""Make changes to the time interval after the date."""
self._after = after
@property
def before(self):
"""Property that returns the time interval before the date."""
return self._before
@before.setter
def before(self, before):
"""Make changes to the time interval before the date."""
self._before = before
@property
def date(self):
"""Property that returns back the date."""
if not self._date:
return ''
return self._date.strftime(self._DATE_FORMAT)
@date.setter
def date(self, date):
"""Make changes to the date."""
try:
dt = datetime.datetime.strptime(date, self._DATE_FORMAT)
except ValueError as exc:
logger.error(
'Unable to add date chip, wrong date format', exc_info=True)
raise ValueError('Wrong date format') from exc
self._date = dt
def from_dict(self, chip_dict):
"""Configure the chip from a dictionary."""
value = chip_dict.get('value')
if not value:
return
date, before, after = value.split()
self.unit = before[-1]
self.date = date
self.before = int(before[1:-1])
self.after = int(after[1:-1])
@property
def interval(self):
"""A property that returns back the full interval."""
return (
f'{self.date} -{self.before}{self.unit} +{self.after}{self.unit}')
@property
def unit(self):
"""Property that returns back the unit used."""
return self._unit
@unit.setter
def unit(self, unit):
"""Make changes to the unit."""
if unit not in ('s', 'm', 'd', 'h'):
raise ValueError(
'Unable to add interval, needs to be one of: '
's (seconds), m (minutes), h (hours) or d (days)')
self._unit = unit
class DateRangeChip(Chip):
"""A date range chip."""
CHIP_TYPE = 'datetime_range'
CHIP_VALUE = 'date_range'
_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S'
def __init__(self):
"""Initialize the date range."""
super().__init__()
self._start_date = None
self._end_date = None
def add_end_time(self, end_time):
"""Add an end time to the range.
Args:
end_time (str): date string using the format '%Y-%m-%dT%H:%M:%s'
Raises:
ValueError: if the date format is incorrectly formatted.
"""
try:
dt = datetime.datetime.strptime(end_time, self._DATE_FORMAT)
except ValueError as exc:
logger.error(
'Unable to add date chip, wrong date format', exc_info=True)
raise ValueError('Wrong date format') from exc
self._end_date = dt
def add_start_time(self, start_time):
"""Add a start time to the range.
Args:
start_time (str): date string using the format '%Y-%m-%dT%H:%M:%s'
Raises:
ValueError: if the date format is incorrectly formatted.
"""
try:
dt = datetime.datetime.strptime(start_time, self._DATE_FORMAT)
except ValueError as exc:
logger.error(
'Unable to add date chip, wrong date format', exc_info=True)
raise ValueError('Wrong date format') from exc
self._start_date = dt
@property
def end_time(self):
"""Property that returns the end time of a range."""
if not self._end_date:
return ''
return self._end_date.strftime(self._DATE_FORMAT)
@end_time.setter
def end_time(self, end_time):
"""Sets the new end time."""
self.add_end_time(end_time)
@property
def date_range(self):
"""Property that returns back the range."""
return f'{self.start_time},{self.end_time}'
@date_range.setter
def date_range(self, date_range):
"""Sets the new range of the date range chip."""
start_time, end_time = date_range.split(',')
self.add_start_time(start_time)
self.add_end_time(end_time)
def from_dict(self, chip_dict):
"""Configure the chip from a dictionary."""
chip_value = chip_dict.get('value')
if not chip_value:
return
start, end = chip_value.split(',')
self.start_time = start
self.end_time = end
@property
def start_time(self):
"""Property that returns the start time of a range."""
if not self._start_date:
return ''
return self._start_date.strftime(self._DATE_FORMAT)
@start_time.setter
def start_time(self, start_time):
"""Sets the new start time of a range."""
self.add_start_time(start_time)
class LabelChip(Chip):
"""Label chip."""
CHIP_TYPE = 'label'
CHIP_VALUE = 'label'
def __init__(self):
"""Initialize the chip."""
super().__init__()
self._label = ''
def from_dict(self, chip_dict):
"""Configure the chip from a dictionary."""
chip_value = chip_dict.get('value')
if not chip_value:
return
self.label = chip_value
@property
def label(self):
"""Property that returns back the label."""
return self._label
@label.setter
def label(self, label):
"""Make changes to the label."""
self._label = label
def use_comment_label(self):
"""Use the comment label."""
self._label = '__ts_comment'
def use_star_label(self):
"""Use the star label."""
self._label = '__ts_star'
class TermChip(Chip):
"""Term chip definition."""
CHIP_TYPE = 'term'
CHIP_VALUE = 'query'
def __init__(self):
"""Initialize the chip."""
super().__init__()
self._query = ''
@property
def field(self):
"""Property that returns back the field used to match against."""
return self._chip_field
@field.setter
def field(self, field):
"""Make changes to the field used to match against."""
self._chip_field = field
def from_dict(self, chip_dict):
"""Configure the term chip from a dictionary."""
chip_value = chip_dict.get('value')
if not chip_value:
return
self.field = chip_dict.get('field')
self.query = chip_value
@property
def query(self):
"""Property that returns back the query."""
return self._query
@query.setter
def query(self, query):
"""Make changes to the query."""
self._query = query
class Search(resource.SketchResource):
"""Search object."""
DEFAULT_SIZE_LIMIT = 10000
def __init__(self, sketch):
resource_uri = f'sketches/{sketch.id}/explore/'
super().__init__(sketch=sketch, resource_uri=resource_uri)
self._aggregations = ''
self._chips = []
self._created_at = ''
self._description = ''
self._indices = '_all'
self._max_entries = self.DEFAULT_SIZE_LIMIT
self._name = ''
self._query_dsl = ''
self._query_filter = {}
self._query_string = ''
self._raw_response = None
self._return_fields = ''
self._scrolling = None
self._searchtemplate = ''
self._total_elastic_size = 0
self._updated_at = ''
def _extract_chips(self, query_filter):
"""Extract chips from a query_filter."""
chips = query_filter.get('chips', [])
if not chips:
return
for chip_dict in chips:
chip_type = chip_dict.get('type')
if not chip_type:
continue
chip = None
if chip_type == 'datetime_interval':
chip = DateIntervalChip()
elif chip_type == 'datetime_range':
chip = DateRangeChip()
elif chip_type == 'label':
chip = LabelChip()
elif chip_type == 'term':
chip = TermChip()
if not chip:
continue
chip.from_dict(chip_dict)
active = chip_dict.get('active', True)
chip.active = active
operator = chip_dict.get('operator', 'must')
if operator == 'must':
chip.set_include()
elif operator == 'must_not':
chip.set_exclude()
self.add_chip(chip)
def _execute_query(self, file_name='', count=False):
"""Execute a search request and store the results.
Args:
file_name (str): optional file path to a filename that
all the results will be saved to. If not provided
the results will be stored in the search object.
count (bool): optional boolean that determines whether
we want to execute the query or only count the
number of events that the query would produce.
"""
query_filter = self.query_filter
if not isinstance(query_filter, dict):
raise ValueError(
'Unable to query with a query filter that isn\'t a dict.')
stop_size = self._max_entries
scrolling = not bool(stop_size and (
stop_size < self.DEFAULT_SIZE_LIMIT))
if self.scrolling is not None:
scrolling = self.scrolling
form_data = {
'query': self._query_string,
'filter': query_filter,
'dsl': self._query_dsl,
'count': count,
'fields': self._return_fields,
'enable_scroll': scrolling,
'file_name': file_name,
}
response = self.api.session.post(
f'{self.api.api_root}/{self.resource_uri}', json=form_data)
if not error.check_return_status(response, logger):
error.error_message(
response, message='Unable to query results',
error=ValueError)
if file_name:
with open(file_name, 'wb') as fw:
fw.write(response.content)
return
response_json = error.get_response_json(response, logger)
if count:
meta = response_json.get('meta', {})
self._total_elastic_size = meta.get('total_count', 0)
return
scroll_id = response_json.get('meta', {}).get('scroll_id', '')
form_data['scroll_id'] = scroll_id
count = len(response_json.get('objects', []))
total_count = count
while count > 0:
if self._max_entries and total_count >= self._max_entries:
break
if not scroll_id:
logger.debug('No scroll ID, will stop.')
break
more_response = self.api.session.post(
f'{self.api.api_root}/{self.resource_uri}', json=form_data)
if not error.check_return_status(more_response, logger):
error.error_message(
response, message='Unable to query results',
error=ValueError)
more_response_json = error.get_response_json(more_response, logger)
count = len(more_response_json.get('objects', []))
total_count += count
response_json['objects'].extend(
more_response_json.get('objects', []))
more_meta = more_response_json.get('meta', {})
added_time = more_meta.get('es_time', 0)
response_json['meta']['es_time'] += added_time
self._total_elastic_size = response_json.get(
'meta', {}).get('es_total_count', 0)
if self._total_elastic_size != total_count:
logger.info(
'%d results were returned, but '
'%d records matched the search query',
total_count, self._total_elastic_size)
self._raw_response = response_json
def add_chip(self, chip):
"""Add a chip to the ..."""
self._chips.append(chip)
self.commit()
def add_date_range(self, start_time, end_time):
"""Add a date range chip to the search query.
Args:
start_time (str): a string with the start time of the range,
the format should be '%Y-%m-%dT%H:%M:%S'
end_time (str): a string with the end time of the range,
the format should be '%Y-%m-%dT%H:%M:%S'
"""
chip = DateRangeChip()
chip.start_time = start_time
chip.end_time = end_time
self.add_chip(chip)
@property
def chips(self):
"""Property that returns all the chips in the search object."""
return self._chips
def commit(self):
"""Commit changes to the search object."""
self._raw_response = None
super().commit()
@property
def created_at(self):
"""Property that returns back the creation time of a search."""
return self._created_at
def delete(self):
"""Deletes the saved search from the store."""
if not self._resource_id:
logger.warning(
'Unable to delete the saved search, it does not appear to be '
'saved in the first place.')
return False
resource_url = (
f'{self.api.api_root}/sketches/{self._sketch.id}/views/'
f'{self._resource_id}/')
response = self.api.session.delete(resource_url)
return error.check_return_status(response, logger)
@property
def description(self):
"""Property that returns back the description of the saved search."""
return self._description
@description.setter
def description(self, description):
"""Make changes to the saved search description field."""
self._description = description
self.commit()
@property
def expected_size(self):
"""Property that returns the expected size of the search query."""
if self._total_elastic_size:
return self._total_elastic_size
self._execute_query(count=True)
return self._total_elastic_size
def from_manual( # pylint: disable=arguments-differ
self,
query_string=None,
query_dsl=None,
query_filter=None,
return_fields=None,
max_entries=None,
**kwargs):
"""Explore the sketch.
Args:
query_string (str): Elasticsearch query string.
query_dsl (str): Elasticsearch query DSL as JSON string.
query_filter (dict): Filter for the query as a dict.
return_fields (str): A comma separated string with a list of fields
that should be included in the response. Optional and defaults
to None.
max_entries (int): Optional integer denoting a best effort to limit
the output size to the number of events. Events are read in,
10k at a time so there may be more events in the answer back
than this number denotes, this is a best effort.
kwargs (dict[str, object]): Depending on the resource they may
require different sets of arguments to be able to run a raw
API request.
Raises:
ValueError: if unable to query for the results.
RuntimeError: if the query is missing needed values, or if the
sketch is archived.
"""
super().from_manual(**kwargs)
if not (query_string or query_filter or query_dsl):
raise RuntimeError('You need to supply a query')
self._username = self.api.current_user.username
self._name = 'From Explore'
self._description = 'From Explore'
if query_filter:
self.query_filter = query_filter
self._query_string = query_string
self.query_dsl = query_dsl
self._return_fields = return_fields
if max_entries:
self._max_entries = max_entries
# TODO: Make use of search templates and aggregations.
#self._searchtemplate = data.get('searchtemplate', 0)
#self._aggregations = data.get('aggregation', 0)
self._created_at = datetime.datetime.now(
datetime.timezone.utc).isoformat()
self._updated_at = self._created_at
self.resource_data = {}
def from_saved(self, search_id): # pylint: disable=arguments-differ
"""Initialize the search object from a saved search.
Args:
search_id: integer value for the saved
search (primary key).
"""
resource_uri = f'sketches/{self._sketch.id}/views/{search_id}/'
resource_data = self.api.fetch_resource_data(resource_uri)
data = resource_data.get('objects', [None])[0]
if not data:
logger.error('Unable to get any data back from a saved search.')
return
label_string = data.get('label_string', '')
if label_string:
self._labels = json.loads(label_string)
else:
self._labels = []
self._aggregations = data.get('aggregation', 0)
self._created_at = data.get('created_at', '')
self._description = data.get('description', '')
self._name = data.get('name', '')
self.query_dsl = data.get('query_dsl', '')
query_filter = data.get('query_filter', '')
if query_filter:
filter_dict = json.loads(query_filter)
if 'fields' in filter_dict:
fields = filter_dict.pop('fields')
return_fields = [x.get('field') for x in fields]
self.return_fields = ','.join(return_fields)
self.query_filter = filter_dict
self._query_string = data.get('query_string', '')
self._resource_id = search_id
self._searchtemplate = data.get('searchtemplate', 0)
self._updated_at = data.get('updated_at', '')
self._username = data.get('user', {}).get('username', 'System')
self.resource_data = data
@property
def indices(self):
"""Return the current set of indices used in the search."""
return self._indices
@indices.setter
def indices(self, indices):
"""Make changes to the current set of indices."""
if indices == '_all':
self._indices = '_all'
self.commit()
return
def _is_string_or_int(item):
return isinstance(item, (str, int))
if not isinstance(indices, list):
logger.warning(
'Indices needs to be a list of strings (indices that were '
'passed in were not a list).')
return
if not all(map(_is_string_or_int, indices)):
logger.warning(
'Indices needs to be a list of strings or ints, not all '
'entries in the indices list are valid string/int.')
return
if len(indices) == 1 and indices[0] == '_all':
self._indices = '_all'
self.commit()
return
# Indices here can be either a list of timeline names, IDs or a list
# of search indices. We need to verify that these exist before saving
# them.
valid_ids = set()
timeline_indices = {} # Dict[index] = List[name]
timeline_names = {} # Dict[name] = id
for timeline_object in self._sketch.list_timelines():
timeline_indices.setdefault(timeline_object.index_name, [])
timeline_indices[timeline_object.index_name].append(
timeline_object.name)
valid_ids.add(timeline_object.id)
timeline_names[timeline_object.name] = timeline_object.id
new_indices = []
for index in indices:
# Is this an index name, include all timeline IDs.
if index in timeline_indices:
new_indices.extend(timeline_indices[index])
continue
# Is this a timeline ID?
if isinstance(index, int):
if index in valid_ids:
new_indices.append(str(index))
continue
if index.isdigit():
if int(index) in valid_ids:
new_indices.append(index)
continue
# Is this a timeline name?
if index in timeline_names:
new_indices.append(timeline_names[index])
if not new_indices:
logger.warning('No valid indices found, not changing the value.')
return
self._indices = new_indices
self.commit()
@property
def max_entries(self):
"""Return the maximum number of entries in the return value."""
return self._max_entries
@max_entries.setter
def max_entries(self, max_entries):
"""Make changes to the max entries of return values."""
self._max_entries = max_entries
if max_entries < self.DEFAULT_SIZE_LIMIT:
_ = self.query_filter
self._query_filter['size'] = max_entries
self._query_filter['terminate_after'] = max_entries
self.commit()
@property
def name(self):
"""Property that returns the query name."""
return self._name
@name.setter
def name(self, name):
"""Make changes to the saved search name."""
self._name = name
self.commit()
def order_ascending(self):
"""Set the order of objects returned back ascending."""
# Trigger a creation of a query filter if it does not exist.
_ = self.query_filter
self._query_filter['order'] = 'asc'
self.commit()
def order_descending(self):
"""Set the order of objects returned back descending."""
# Trigger a creation of a query filter if it does not exist.
_ = self.query_filter
self._query_filter['order'] = 'desc'
self.commit()
@property
def query_dsl(self):
"""Property that returns back the query DSL."""
return self._query_dsl
@query_dsl.setter
def query_dsl(self, query_dsl):
"""Make changes to the query DSL of the search."""
if query_dsl and isinstance(query_dsl, str):
query_dsl = json.loads(query_dsl)
# Special condition of an empty DSL.
if query_dsl == '""':
query_dsl = ''
self._query_dsl = query_dsl
self.commit()
@property
def query_filter(self):
"""Property that returns the query filter."""
if not self._query_filter:
self._query_filter = {
'size': self.DEFAULT_SIZE_LIMIT,
'terminate_after': self.DEFAULT_SIZE_LIMIT,
'indices': self.indices,
'order': 'asc',
'chips': [],
}
query_filter = self._query_filter
query_filter['chips'] = [x.chip for x in self._chips]
query_filter['indices'] = self.indices
return query_filter
@query_filter.setter
def query_filter(self, query_filter):
"""Make changes to the query filter."""
if isinstance(query_filter, str):
try:
query_filter = json.loads(query_filter)
except json.JSONDecodeError as exc:
raise ValueError('Unable to parse the string as JSON') from exc
if not isinstance(query_filter, dict):
raise ValueError('Query filter needs to be a dict.')
self._query_filter = query_filter
self._extract_chips(query_filter)
self.commit()
@property
def query_string(self):
"""Property that returns back the query string."""
return self._query_string
@query_string.setter
def query_string(self, query_string):
"""Make changes to the query string of a saved search."""
self._query_string = query_string
self.commit()
def remove_chip(self, chip_index):
"""Remove a chip from the saved search."""
chip_len = len(self._chips)
if chip_index > (chip_len + 1):
raise ValueError(
f'Unable to remove chip, only {chip_len} chips stored '
f'(no index {chip_index})')
try:
_ = self._chips.pop(chip_index)
except IndexError as exc:
raise ValueError(
f'Unable to remove index {chip_index}, out of range') from exc
self.commit()
@property
def return_fields(self):
"""Property that returns the return_fields."""
return self._return_fields
@return_fields.setter
def return_fields(self, return_fields):
"""Make changes to the return fields."""
self._return_fields = return_fields
self.commit()
@property
def return_size(self):
"""Return the maximum number of entries in the return value."""
return self._max_entries
@return_size.setter
def return_size(self, return_size):
"""Make changes to the maximum number of entries in the return."""
self._max_entries = return_size
self.commit()
def save(self):
"""Save the search in the database.
Returns:
String with the identification of the saved search.
Raises:
ValueError: if there are values missing in order to save the query.
RuntimeError: if the search could not be saved.
"""
if not self.name:
raise ValueError(
'No name for the query saved. Please select a name first.')
if not (self.query_string or self.query_dsl):
raise ValueError(
'Need to have either a query DSL or a query string to be '
'able to save the search.')
if not self.description:
logger.warning(
'No description selected for search, saving without one')
if self._resource_id:
resource_url = (
f'{self.api.api_root}/sketches/{self._sketch.id}/views/'
f'{self._resource_id}/')
else:
resource_url = (
f'{self.api.api_root}/sketches/{self._sketch.id}/views/')
query_filter = self.query_filter
if self.return_fields:
sketch_data = self._sketch.data
sketch_meta = sketch_data.get('meta', {})
mappings = sketch_meta.get('mappings', [])
use_mappings = []
for field in self.return_fields.split(','):
field = field.strip().lower()
for map_entry in mappings:
if map_entry.get('field', '').lower() == field:
use_mappings.append(map_entry)
query_filter['fields'] = use_mappings
data = {
'name': self.name,
'description': self.description,
'query': self.query_string,
'filter': query_filter,
'dsl': self.query_dsl,
'labels': json.dumps(self.labels),
}
response = self.api.session.post(resource_url, json=data)
status = error.check_return_status(response, logger)
if not status:
error.error_message(
response, 'Unable to save search', error=RuntimeError)
response_json = error.get_response_json(response, logger)
search_dict = response_json.get('objects', [{}])[0]
self._resource_id = search_dict.get('id', 0)
return f'Saved search to ID: {self._resource_id}'
def save_as_template(self):
"""Save the search as a search template.
Returns:
A search template object (searchtemplate.SearchTemplate).
"""
if not self._resource_id:
logger.warning('Search has not been saved first, saving now.')
return_string = self.save()
logger.info(return_string)
template = searchtemplate.SearchTemplate(self.api)
template.from_search_object(self)
print(template.save())
self._searchtemplate = template.id
return template
@property
def scrolling(self):
"""Returns whether scrolling is enabled or not."""
return self._scrolling
def scrolling_disable(self):
""""Disables scrolling."""
self._scrolling = False
def scrolling_enable(self):
"""Enable scrolling."""
self._scrolling = True
def to_dict(self):
"""Returns a dict with the respone of the query."""
if not self._raw_response:
self._execute_query()
return self._raw_response
def to_file(self, file_name):
"""Saves the content of the query to a file.
Args:
file_name (str): Full path to a file that will store the results
of the query to as a ZIP file. The ZIP file will contain a
METADATA file and a CSV with the results from the query.
Returns:
Boolean that determines if it was successful.
"""
old_scrolling = self.scrolling
self._scrolling = True
self._execute_query(file_name=file_name)
self._scrolling = old_scrolling
return True
def to_pandas(self):
"""Returns a pandas DataFrame with the response of the query."""
if not self._raw_response:
self._execute_query()
return_list = []
timelines = {
t.id: t.name for t in self._sketch.list_timelines()}
return_field_list = []
return_fields = self._return_fields
if return_fields:
if return_fields.startswith('\''):
return_fields = return_fields[1:]
if return_fields.endswith('\''):
return_fields = return_fields[:-1]
return_field_list = return_fields.split(',')
for result in self._raw_response.get('objects', []):
source = result.get('_source', {})
if not return_fields or '_id' in return_field_list:
source['_id'] = result.get('_id')
if not return_fields or '_type' in return_field_list:
source['_type'] = result.get('_type')
if not return_fields or '_index' in return_field_list:
source['_index'] = result.get('_index')
if not return_fields or '_source' in return_field_list:
source['_source'] = timelines.get(
result.get('__ts_timeline_id'))
if not return_fields or '__ts_timeline_id' in return_field_list:
source['_source'] = timelines.get(
result.get('__ts_timeline_id'))
return_list.append(source)
data_frame = pandas.DataFrame(return_list)
if 'datetime' in data_frame:
try:
data_frame['datetime'] = pandas.to_datetime(data_frame.datetime)
except pandas.errors.OutOfBoundsDatetime:
pass
elif 'timestamp' in data_frame:
try:
data_frame['datetime'] = pandas.to_datetime(
data_frame.timestamp / 1e6, utc=True, unit='s')
except pandas.errors.OutOfBoundsDatetime:
pass
return data_frame
@property
def updated_at(self):
"""Property that returns back the updated time of a search."""
return self._updated_at
|
py | 7dffe52500615eac47a034d4cd7ce7d0ab3aedcb | from _utils import test_on
import sys
sys.path.append('.')
from flops_counter import nn
from flops_counter.tensorsize import TensorSize
######
# test on EltAdd
######
eltadd = {
'layers': [
nn.EltAdd() # same shape
],
'ins': [
TensorSize([64, 112, 112])
],
'out_shape': [
TensorSize([64, 112, 112])
],
'out_flops': [
802816
]
}
test_on(eltadd)
######
# test on EltMul
######
eltmul = {
'layers': [
nn.EltMul() # same shape
],
'ins': [
TensorSize([1, 64, 112, 112])
],
'out_shape': [
TensorSize([1, 64, 112, 112])
],
'out_flops': [
802816
]
}
test_on(eltmul) |
py | 7dffe60ffcbf571df9c1fcab10e7f4489d0af199 | # SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
# coding: utf-8
"""
Adds electrical generators and existing hydro storage units to a base network.
Relevant Settings
-----------------
.. code:: yaml
costs:
year:
USD2013_to_EUR2013:
dicountrate:
emission_prices:
electricity:
max_hours:
marginal_cost:
capital_cost:
conventional_carriers:
co2limit:
extendable_carriers:
include_renewable_capacities_from_OPSD:
estimate_renewable_capacities_from_capacity_stats:
load:
scaling_factor:
renewable:
hydro:
carriers:
hydro_max_hours:
hydro_capital_cost:
lines:
length_factor:
.. seealso::
Documentation of the configuration file ``config.yaml`` at :ref:`costs_cf`,
:ref:`electricity_cf`, :ref:`load_cf`, :ref:`renewable_cf`, :ref:`lines_cf`
Inputs
------
- ``data/costs.csv``: The database of cost assumptions for all included technologies for specific years from various sources; e.g. discount rate, lifetime, investment (CAPEX), fixed operation and maintenance (FOM), variable operation and maintenance (VOM), fuel costs, efficiency, carbon-dioxide intensity.
- ``data/bundle/hydro_capacities.csv``: Hydropower plant store/discharge power capacities, energy storage capacity, and average hourly inflow by country.
.. image:: ../img/hydrocapacities.png
:scale: 34 %
- ``data/geth2015_hydro_capacities.csv``: alternative to capacities above; not currently used!
- ``resources/opsd_load.csv`` Hourly per-country load profiles.
- ``resources/regions_onshore.geojson``: confer :ref:`busregions`
- ``resources/nuts3_shapes.geojson``: confer :ref:`shapes`
- ``resources/powerplants.csv``: confer :ref:`powerplants`
- ``resources/profile_{}.nc``: all technologies in ``config["renewables"].keys()``, confer :ref:`renewableprofiles`.
- ``networks/base.nc``: confer :ref:`base`
Outputs
-------
- ``networks/elec.nc``:
.. image:: ../img/elec.png
:scale: 33 %
Description
-----------
The rule :mod:`add_electricity` ties all the different data inputs from the preceding rules together into a detailed PyPSA network that is stored in ``networks/elec.nc``. It includes:
- today's transmission topology and transfer capacities (optionally including lines which are under construction according to the config settings ``lines: under_construction`` and ``links: under_construction``),
- today's thermal and hydro power generation capacities (for the technologies listed in the config setting ``electricity: conventional_carriers``), and
- today's load time-series (upsampled in a top-down approach according to population and gross domestic product)
It further adds extendable ``generators`` with **zero** capacity for
- photovoltaic, onshore and AC- as well as DC-connected offshore wind installations with today's locational, hourly wind and solar capacity factors (but **no** current capacities),
- additional open- and combined-cycle gas turbines (if ``OCGT`` and/or ``CCGT`` is listed in the config setting ``electricity: extendable_carriers``)
"""
import logging
from _helpers import configure_logging, update_p_nom_max
import pypsa
import pandas as pd
import numpy as np
import xarray as xr
import geopandas as gpd
import powerplantmatching as pm
from powerplantmatching.export import map_country_bus
from vresutils.costdata import annuity
from vresutils import transfer as vtransfer
idx = pd.IndexSlice
logger = logging.getLogger(__name__)
def normed(s): return s/s.sum()
def _add_missing_carriers_from_costs(n, costs, carriers):
missing_carriers = pd.Index(carriers).difference(n.carriers.index)
if missing_carriers.empty: return
emissions_cols = costs.columns.to_series()\
.loc[lambda s: s.str.endswith('_emissions')].values
suptechs = missing_carriers.str.split('-').str[0]
emissions = costs.loc[suptechs, emissions_cols].fillna(0.)
emissions.index = missing_carriers
n.import_components_from_dataframe(emissions, 'Carrier')
def load_costs(tech_costs, config, elec_config, Nyears=1.):
# set all asset costs and other parameters
costs = pd.read_csv(tech_costs, index_col=list(range(3))).sort_index()
# correct units to MW and EUR
costs.loc[costs.unit.str.contains("/kW"),"value"] *= 1e3
costs.loc[costs.unit.str.contains("USD"),"value"] *= config['USD2013_to_EUR2013']
costs = (costs.loc[idx[:,config['year'],:], "value"]
.unstack(level=2).groupby("technology").sum(min_count=1))
costs = costs.fillna({"CO2 intensity" : 0,
"FOM" : 0,
"VOM" : 0,
"discount rate" : config['discountrate'],
"efficiency" : 1,
"fuel" : 0,
"investment" : 0,
"lifetime" : 25})
costs["capital_cost"] = ((annuity(costs["lifetime"], costs["discount rate"]) +
costs["FOM"]/100.) *
costs["investment"] * Nyears)
costs.at['OCGT', 'fuel'] = costs.at['gas', 'fuel']
costs.at['CCGT', 'fuel'] = costs.at['gas', 'fuel']
costs['marginal_cost'] = costs['VOM'] + costs['fuel'] / costs['efficiency']
costs = costs.rename(columns={"CO2 intensity": "co2_emissions"})
costs.at['OCGT', 'co2_emissions'] = costs.at['gas', 'co2_emissions']
costs.at['CCGT', 'co2_emissions'] = costs.at['gas', 'co2_emissions']
costs.at['solar', 'capital_cost'] = 0.5*(costs.at['solar-rooftop', 'capital_cost'] +
costs.at['solar-utility', 'capital_cost'])
def costs_for_storage(store, link1, link2=None, max_hours=1.):
capital_cost = link1['capital_cost'] + max_hours * store['capital_cost']
if link2 is not None:
capital_cost += link2['capital_cost']
return pd.Series(dict(capital_cost=capital_cost,
marginal_cost=0.,
co2_emissions=0.))
max_hours = elec_config['max_hours']
costs.loc["battery"] = \
costs_for_storage(costs.loc["battery storage"], costs.loc["battery inverter"],
max_hours=max_hours['battery'])
costs.loc["H2"] = \
costs_for_storage(costs.loc["hydrogen storage"], costs.loc["fuel cell"],
costs.loc["electrolysis"], max_hours=max_hours['H2'])
for attr in ('marginal_cost', 'capital_cost'):
overwrites = config.get(attr)
if overwrites is not None:
overwrites = pd.Series(overwrites)
costs.loc[overwrites.index, attr] = overwrites
return costs
def load_powerplants(ppl_fn):
carrier_dict = {'ocgt': 'OCGT', 'ccgt': 'CCGT', 'bioenergy': 'biomass',
'ccgt, thermal': 'CCGT', 'hard coal': 'coal'}
return (pd.read_csv(ppl_fn, index_col=0, dtype={'bus': 'str'})
.powerplant.to_pypsa_names()
.rename(columns=str.lower).drop(columns=['efficiency'])
.replace({'carrier': carrier_dict}))
def attach_load(n, regions, load, nuts3_shapes, countries, scaling=1.):
substation_lv_i = n.buses.index[n.buses['substation_lv']]
regions = (gpd.read_file(regions).set_index('name')
.reindex(substation_lv_i))
opsd_load = (pd.read_csv(load, index_col=0, parse_dates=True)
.filter(items=countries))
logger.info(f"Load data scaled with scalling factor {scaling}.")
opsd_load *= scaling
nuts3 = gpd.read_file(nuts3_shapes).set_index('index')
def upsample(cntry, group):
l = opsd_load[cntry]
if len(group) == 1:
return pd.DataFrame({group.index[0]: l})
else:
nuts3_cntry = nuts3.loc[nuts3.country == cntry]
transfer = vtransfer.Shapes2Shapes(group, nuts3_cntry.geometry,
normed=False).T.tocsr()
gdp_n = pd.Series(transfer.dot(nuts3_cntry['gdp'].fillna(1.).values),
index=group.index)
pop_n = pd.Series(transfer.dot(nuts3_cntry['pop'].fillna(1.).values),
index=group.index)
# relative factors 0.6 and 0.4 have been determined from a linear
# regression on the country to continent load data
factors = normed(0.6 * normed(gdp_n) + 0.4 * normed(pop_n))
return pd.DataFrame(factors.values * l.values[:,np.newaxis],
index=l.index, columns=factors.index)
load = pd.concat([upsample(cntry, group) for cntry, group
in regions.geometry.groupby(regions.country)], axis=1)
n.madd("Load", substation_lv_i, bus=substation_lv_i, p_set=load)
def update_transmission_costs(n, costs, length_factor=1.0, simple_hvdc_costs=False):
# TODO: line length factor of lines is applied to lines and links.
# Separate the function to distinguish.
n.lines['capital_cost'] = (n.lines['length'] * length_factor *
costs.at['HVAC overhead', 'capital_cost'])
if n.links.empty: return
dc_b = n.links.carrier == 'DC'
# If there are no dc links, then the 'underwater_fraction' column
# may be missing. Therefore we have to return here.
if n.links.loc[dc_b].empty: return
if simple_hvdc_costs:
costs = (n.links.loc[dc_b, 'length'] * length_factor *
costs.at['HVDC overhead', 'capital_cost'])
else:
costs = (n.links.loc[dc_b, 'length'] * length_factor *
((1. - n.links.loc[dc_b, 'underwater_fraction']) *
costs.at['HVDC overhead', 'capital_cost'] +
n.links.loc[dc_b, 'underwater_fraction'] *
costs.at['HVDC submarine', 'capital_cost']) +
costs.at['HVDC inverter pair', 'capital_cost'])
n.links.loc[dc_b, 'capital_cost'] = costs
def attach_wind_and_solar(n, costs, input_profiles, technologies, line_length_factor=1):
# TODO: rename tech -> carrier, technologies -> carriers
for tech in technologies:
if tech == 'hydro': continue
n.add("Carrier", name=tech)
with xr.open_dataset(getattr(input_profiles, 'profile_' + tech)) as ds:
if ds.indexes['bus'].empty: continue
suptech = tech.split('-', 2)[0]
if suptech == 'offwind':
underwater_fraction = ds['underwater_fraction'].to_pandas()
connection_cost = (line_length_factor *
ds['average_distance'].to_pandas() *
(underwater_fraction *
costs.at[tech + '-connection-submarine', 'capital_cost'] +
(1. - underwater_fraction) *
costs.at[tech + '-connection-underground', 'capital_cost']))
capital_cost = (costs.at['offwind', 'capital_cost'] +
costs.at[tech + '-station', 'capital_cost'] +
connection_cost)
logger.info("Added connection cost of {:0.0f}-{:0.0f} Eur/MW/a to {}"
.format(connection_cost.min(), connection_cost.max(), tech))
else:
capital_cost = costs.at[tech, 'capital_cost']
n.madd("Generator", ds.indexes['bus'], ' ' + tech,
bus=ds.indexes['bus'],
carrier=tech,
p_nom_extendable=True,
p_nom_max=ds['p_nom_max'].to_pandas(),
weight=ds['weight'].to_pandas(),
marginal_cost=costs.at[suptech, 'marginal_cost'],
capital_cost=capital_cost,
efficiency=costs.at[suptech, 'efficiency'],
p_max_pu=ds['profile'].transpose('time', 'bus').to_pandas())
def attach_conventional_generators(n, costs, ppl, carriers):
_add_missing_carriers_from_costs(n, costs, carriers)
ppl = (ppl.query('carrier in @carriers').join(costs, on='carrier')
.rename(index=lambda s: 'C' + str(s)))
logger.info('Adding {} generators with capacities [MW] \n{}'
.format(len(ppl), ppl.groupby('carrier').p_nom.sum()))
n.madd("Generator", ppl.index,
carrier=ppl.carrier,
bus=ppl.bus,
p_nom=ppl.p_nom,
efficiency=ppl.efficiency,
marginal_cost=ppl.marginal_cost,
capital_cost=0)
logger.warning(f'Capital costs for conventional generators put to 0 EUR/MW.')
def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **config):
_add_missing_carriers_from_costs(n, costs, carriers)
ppl = ppl.query('carrier == "hydro"').reset_index(drop=True)\
.rename(index=lambda s: str(s) + ' hydro')
ror = ppl.query('technology == "Run-Of-River"')
phs = ppl.query('technology == "Pumped Storage"')
hydro = ppl.query('technology == "Reservoir"')
country = ppl['bus'].map(n.buses.country).rename("country")
inflow_idx = ror.index.union(hydro.index)
if not inflow_idx.empty:
dist_key = ppl.loc[inflow_idx, 'p_nom'].groupby(country).transform(normed)
with xr.open_dataarray(profile_hydro) as inflow:
inflow_countries = pd.Index(country[inflow_idx])
missing_c = (inflow_countries.unique()
.difference(inflow.indexes['countries']))
assert missing_c.empty, (f"'{profile_hydro}' is missing "
f"inflow time-series for at least one country: {', '.join(missing_c)}")
inflow_t = (inflow.sel(countries=inflow_countries)
.rename({'countries': 'name'})
.assign_coords(name=inflow_idx)
.transpose('time', 'name')
.to_pandas()
.multiply(dist_key, axis=1))
if 'ror' in carriers and not ror.empty:
n.madd("Generator", ror.index,
carrier='ror',
bus=ror['bus'],
p_nom=ror['p_nom'],
efficiency=costs.at['ror', 'efficiency'],
capital_cost=costs.at['ror', 'capital_cost'],
weight=ror['p_nom'],
p_max_pu=(inflow_t[ror.index]
.divide(ror['p_nom'], axis=1)
.where(lambda df: df<=1., other=1.)))
if 'PHS' in carriers and not phs.empty:
# fill missing max hours to config value and
# assume no natural inflow due to lack of data
max_hours = config.get('PHS_max_hours', 6)
phs = phs.replace({'max_hours': {0: max_hours}})
n.madd('StorageUnit', phs.index,
carrier='PHS',
bus=phs['bus'],
p_nom=phs['p_nom'],
capital_cost=costs.at['PHS', 'capital_cost'],
max_hours=phs['max_hours'],
efficiency_store=np.sqrt(costs.at['PHS','efficiency']),
efficiency_dispatch=np.sqrt(costs.at['PHS','efficiency']),
cyclic_state_of_charge=True)
if 'hydro' in carriers and not hydro.empty:
hydro_max_hours = config.get('hydro_max_hours')
assert hydro_max_hours is not None, "No path for hydro capacities given."
hydro_stats = pd.read_csv(hydro_capacities,
comment="#", na_values='-', index_col=0)
e_target = hydro_stats["E_store[TWh]"].clip(lower=0.2) * 1e6
e_installed = hydro.eval('p_nom * max_hours').groupby(hydro.country).sum()
e_missing = e_target - e_installed
missing_mh_i = hydro.query('max_hours == 0').index
if hydro_max_hours == 'energy_capacity_totals_by_country':
# watch out some p_nom values like IE's are totally underrepresented
max_hours_country = e_missing / \
hydro.loc[missing_mh_i].groupby('country').p_nom.sum()
elif hydro_max_hours == 'estimate_by_large_installations':
max_hours_country = hydro_stats['E_store[TWh]'] * 1e3 / \
hydro_stats['p_nom_discharge[GW]']
missing_countries = (pd.Index(hydro['country'].unique())
.difference(max_hours_country.dropna().index))
if not missing_countries.empty:
logger.warning("Assuming max_hours=6 for hydro reservoirs in the countries: {}"
.format(", ".join(missing_countries)))
hydro_max_hours = hydro.max_hours.where(hydro.max_hours > 0,
hydro.country.map(max_hours_country)).fillna(6)
n.madd('StorageUnit', hydro.index, carrier='hydro',
bus=hydro['bus'],
p_nom=hydro['p_nom'],
max_hours=hydro_max_hours,
capital_cost=costs.at['hydro', 'capital_cost'],
marginal_cost=costs.at['hydro', 'marginal_cost'],
p_max_pu=1., # dispatch
p_min_pu=0., # store
efficiency_dispatch=costs.at['hydro', 'efficiency'],
efficiency_store=0.,
cyclic_state_of_charge=True,
inflow=inflow_t.loc[:, hydro.index])
def attach_extendable_generators(n, costs, ppl, carriers):
_add_missing_carriers_from_costs(n, costs, carriers)
for tech in carriers:
if tech.startswith('OCGT'):
ocgt = ppl.query("carrier in ['OCGT', 'CCGT']").groupby('bus', as_index=False).first()
n.madd('Generator', ocgt.index,
suffix=' OCGT',
bus=ocgt['bus'],
carrier=tech,
p_nom_extendable=True,
p_nom=0.,
capital_cost=costs.at['OCGT', 'capital_cost'],
marginal_cost=costs.at['OCGT', 'marginal_cost'],
efficiency=costs.at['OCGT', 'efficiency'])
elif tech.startswith('CCGT'):
ccgt = ppl.query("carrier in ['OCGT', 'CCGT']").groupby('bus', as_index=False).first()
n.madd('Generator', ccgt.index,
suffix=' CCGT',
bus=ccgt['bus'],
carrier=tech,
p_nom_extendable=True,
p_nom=0.,
capital_cost=costs.at['CCGT', 'capital_cost'],
marginal_cost=costs.at['CCGT', 'marginal_cost'],
efficiency=costs.at['CCGT', 'efficiency'])
elif tech.startswith('nuclear'):
nuclear = ppl.query("carrier == 'nuclear'").groupby('bus', as_index=False).first()
n.madd('Generator', nuclear.index,
suffix=' nuclear',
bus=nuclear['bus'],
carrier=tech,
p_nom_extendable=True,
p_nom=0.,
capital_cost=costs.at['nuclear', 'capital_cost'],
marginal_cost=costs.at['nuclear', 'marginal_cost'],
efficiency=costs.at['nuclear', 'efficiency'])
else:
raise NotImplementedError(f"Adding extendable generators for carrier "
"'{tech}' is not implemented, yet. "
"Only OCGT, CCGT and nuclear are allowed at the moment.")
def attach_OPSD_renewables(n, techs):
available = ['DE', 'FR', 'PL', 'CH', 'DK', 'CZ', 'SE', 'GB']
tech_map = {'Onshore': 'onwind', 'Offshore': 'offwind', 'Solar': 'solar'}
countries = set(available) & set(n.buses.country)
tech_map = {k: v for k, v in tech_map.items() if v in techs}
if not tech_map:
return
logger.info(f'Using OPSD renewable capacities in {", ".join(countries)} '
f'for technologies {", ".join(tech_map.values())}.')
df = pd.concat([pm.data.OPSD_VRE_country(c) for c in countries])
technology_b = ~df.Technology.isin(['Onshore', 'Offshore'])
df['Fueltype'] = df.Fueltype.where(technology_b, df.Technology)
df = df.query('Fueltype in @tech_map').powerplant.convert_country_to_alpha2()
for fueltype, carrier_like in tech_map.items():
gens = n.generators[lambda df: df.carrier.str.contains(carrier_like)]
buses = n.buses.loc[gens.bus.unique()]
gens_per_bus = gens.groupby('bus').p_nom.count()
caps = map_country_bus(df.query('Fueltype == @fueltype'), buses)
caps = caps.groupby(['bus']).Capacity.sum()
caps = caps / gens_per_bus.reindex(caps.index, fill_value=1)
n.generators.p_nom.update(gens.bus.map(caps).dropna())
n.generators.p_nom_min.update(gens.bus.map(caps).dropna())
def estimate_renewable_capacities(n, tech_map):
if len(tech_map) == 0: return
capacities = (pm.data.Capacity_stats().powerplant.convert_country_to_alpha2()
[lambda df: df.Energy_Source_Level_2]
.set_index(['Fueltype', 'Country']).sort_index())
countries = n.buses.country.unique()
if len(countries) == 0: return
logger.info('heuristics applied to distribute renewable capacities [MW] \n{}'
.format(capacities.query('Fueltype in @tech_map.keys() and Capacity >= 0.1')
.groupby('Country').agg({'Capacity': 'sum'})))
for ppm_fueltype, techs in tech_map.items():
tech_capacities = capacities.loc[ppm_fueltype, 'Capacity']\
.reindex(countries, fill_value=0.)
#tech_i = n.generators.query('carrier in @techs').index
tech_i = (n.generators.query('carrier in @techs')
[n.generators.query('carrier in @techs')
.bus.map(n.buses.country).isin(countries)].index)
n.generators.loc[tech_i, 'p_nom'] = (
(n.generators_t.p_max_pu[tech_i].mean() *
n.generators.loc[tech_i, 'p_nom_max']) # maximal yearly generation
.groupby(n.generators.bus.map(n.buses.country))
.transform(lambda s: normed(s) * tech_capacities.at[s.name])
.where(lambda s: s>0.1, 0.)) # only capacities above 100kW
n.generators.loc[tech_i, 'p_nom_min'] = n.generators.loc[tech_i, 'p_nom']
def add_nice_carrier_names(n, config):
carrier_i = n.carriers.index
nice_names = (pd.Series(config['plotting']['nice_names'])
.reindex(carrier_i).fillna(carrier_i.to_series().str.title()))
n.carriers['nice_name'] = nice_names
colors = pd.Series(config['plotting']['tech_colors']).reindex(carrier_i)
if colors.isna().any():
missing_i = list(colors.index[colors.isna()])
logger.warning(f'tech_colors for carriers {missing_i} not defined in config.')
n.carriers['color'] = colors
if __name__ == "__main__":
if 'snakemake' not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('add_electricity')
configure_logging(snakemake)
n = pypsa.Network(snakemake.input.base_network)
Nyears = n.snapshot_weightings.objective.sum() / 8760.
costs = load_costs(snakemake.input.tech_costs, snakemake.config['costs'], snakemake.config['electricity'], Nyears)
ppl = load_powerplants(snakemake.input.powerplants)
attach_load(n, snakemake.input.regions, snakemake.input.load, snakemake.input.nuts3_shapes,
snakemake.config['countries'], snakemake.config['load']['scaling_factor'])
update_transmission_costs(n, costs, snakemake.config['lines']['length_factor'])
carriers = snakemake.config['electricity']['conventional_carriers']
attach_conventional_generators(n, costs, ppl, carriers)
carriers = snakemake.config['renewable']
attach_wind_and_solar(n, costs, snakemake.input, carriers, snakemake.config['lines']['length_factor'])
if 'hydro' in snakemake.config['renewable']:
carriers = snakemake.config['renewable']['hydro'].pop('carriers', [])
attach_hydro(n, costs, ppl, snakemake.input.profile_hydro, snakemake.input.hydro_capacities,
carriers, **snakemake.config['renewable']['hydro'])
carriers = snakemake.config['electricity']['extendable_carriers']['Generator']
attach_extendable_generators(n, costs, ppl, carriers)
tech_map = snakemake.config['electricity'].get('estimate_renewable_capacities_from_capacity_stats', {})
estimate_renewable_capacities(n, tech_map)
techs = snakemake.config['electricity'].get('renewable_capacities_from_OPSD', [])
attach_OPSD_renewables(n, techs)
update_p_nom_max(n)
add_nice_carrier_names(n, snakemake.config)
n.export_to_netcdf(snakemake.output[0])
|
py | 7dffe635f55d3f7c553169e0c438704c41b7b177 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = [
'SkuArgs',
]
@pulumi.input_type
class SkuArgs:
def __init__(__self__, *,
name: pulumi.Input['SkuName']):
"""
The resource model definition representing SKU
:param pulumi.Input['SkuName'] name: The name of the HealthBot SKU
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> pulumi.Input['SkuName']:
"""
The name of the HealthBot SKU
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input['SkuName']):
pulumi.set(self, "name", value)
|
py | 7dffe76743a03783c9ad5a26839a28acbc800a43 | import os
import warnings
warnings.filterwarnings("ignore")
import argparse
import numpy as np
from imageio import imread, imsave
import shutil
from googletrans import Translator, constants
import pywt
from pytorch_wavelets import DWTForward, DWTInverse
from pytorch_wavelets import DTCWTForward, DTCWTInverse
import torch
import torchvision
import torch.nn.functional as F
import clip
os.environ['KMP_DUPLICATE_LIB_OK']='True'
from sentence_transformers import SentenceTransformer
import lpips
from utils import slice_imgs, derivat, basename, img_list, img_read, plot_text, txt_clean, checkout
import transforms
try: # progress bar for notebooks
get_ipython().__class__.__name__
from progress_bar import ProgressIPy as ProgressBar
except: # normal console
from progress_bar import ProgressBar
clip_models = ['ViT-B/16', 'ViT-B/32', 'RN101', 'RN50x16', 'RN50x4', 'RN50']
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--in_img', default=None, help='input image')
parser.add_argument('-t', '--in_txt', default=None, help='input text')
parser.add_argument('-t2', '--in_txt2', default=None, help='input text - style')
parser.add_argument('-t0', '--in_txt0', default=None, help='input text to subtract')
parser.add_argument( '--out_dir', default='_out')
parser.add_argument('-s', '--size', default='1280-720', help='Output resolution')
parser.add_argument('-r', '--resume', default=None, help='Path to saved FFT snapshots, to resume from')
parser.add_argument( '--fstep', default=1, type=int, help='Saving step')
parser.add_argument('-tr', '--translate', action='store_true', help='Translate text with Google Translate')
parser.add_argument('-ml', '--multilang', action='store_true', help='Use SBERT multilanguage model for text')
parser.add_argument( '--save_pt', action='store_true', help='Save FFT snapshots for further use')
parser.add_argument('-v', '--verbose', default=True, type=bool)
# training
parser.add_argument('-m', '--model', default='ViT-B/32', choices=clip_models, help='Select CLIP model to use')
parser.add_argument( '--steps', default=200, type=int, help='Total iterations')
parser.add_argument( '--samples', default=200, type=int, help='Samples to evaluate')
parser.add_argument( '--lrate', default=0.05, type=float, help='Learning rate')
parser.add_argument('-p', '--prog', action='store_true', help='Enable progressive lrate growth (up to double a.lrate)')
# wavelet
parser.add_argument( '--dwt', action='store_true', help='Use DWT instead of FFT')
parser.add_argument('-w', '--wave', default='coif2', help='wavelets: db[1..], coif[1..], haar, dmey')
# tweaks
parser.add_argument('-a', '--align', default='uniform', choices=['central', 'uniform', 'overscan'], help='Sampling distribution')
parser.add_argument('-tf', '--transform', action='store_true', help='use augmenting transforms?')
parser.add_argument( '--contrast', default=0.9, type=float)
parser.add_argument( '--colors', default=1.5, type=float)
parser.add_argument( '--decay', default=1.5, type=float)
parser.add_argument('-sh', '--sharp', default=0.3, type=float)
parser.add_argument('-mm', '--macro', default=0.4, type=float, help='Endorse macro forms 0..1 ')
parser.add_argument('-e', '--enhance', default=0, type=float, help='Enhance consistency, boosts training')
parser.add_argument('-n', '--noise', default=0, type=float, help='Add noise to suppress accumulation') # < 0.05 ?
parser.add_argument('-nt', '--notext', default=0, type=float, help='Subtract typed text as image (avoiding graffiti?), [0..1]')
parser.add_argument('-c', '--sync', default=0, type=float, help='Sync output to input image')
parser.add_argument( '--invert', action='store_true', help='Invert criteria')
a = parser.parse_args()
if a.size is not None: a.size = [int(s) for s in a.size.split('-')][::-1]
if len(a.size)==1: a.size = a.size * 2
if a.in_img is not None and a.sync > 0: a.align = 'overscan'
if a.multilang is True: a.model = 'ViT-B/32' # sbert model is trained with ViT
a.diverse = -a.enhance
a.expand = abs(a.enhance)
return a
### FFT from Lucent library ### https://github.com/greentfrapp/lucent
def to_valid_rgb(image_f, colors=1., decorrelate=True):
color_correlation_svd_sqrt = np.asarray([[0.26, 0.09, 0.02],
[0.27, 0.00, -0.05],
[0.27, -0.09, 0.03]]).astype("float32")
color_correlation_svd_sqrt /= np.asarray([colors, 1., 1.]) # saturate, empirical
max_norm_svd_sqrt = np.max(np.linalg.norm(color_correlation_svd_sqrt, axis=0))
color_correlation_normalized = color_correlation_svd_sqrt / max_norm_svd_sqrt
def _linear_decorrelate_color(tensor):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
t_permute = tensor.permute(0,2,3,1)
t_permute = torch.matmul(t_permute, torch.tensor(color_correlation_normalized.T).to(device))
tensor = t_permute.permute(0,3,1,2)
return tensor
def inner(*args, **kwargs):
image = image_f(*args, **kwargs)
if decorrelate:
image = _linear_decorrelate_color(image)
return torch.sigmoid(image)
return inner
def init_dwt(resume=None, shape=None, wave=None, colors=None):
size = None
wp_fake = pywt.WaveletPacket2D(data=np.zeros(shape[2:]), wavelet='db1', mode='symmetric')
xfm = DWTForward(J=wp_fake.maxlevel, wave=wave, mode='symmetric').cuda()
# xfm = DTCWTForward(J=lvl, biort='near_sym_b', qshift='qshift_b').cuda() # 4x more params, biort ['antonini','legall','near_sym_a','near_sym_b']
ifm = DWTInverse(wave=wave, mode='symmetric').cuda() # symmetric zero periodization
# ifm = DTCWTInverse(biort='near_sym_b', qshift='qshift_b').cuda() # 4x more params, biort ['antonini','legall','near_sym_a','near_sym_b']
if resume is None: # random init
Yl_in, Yh_in = xfm(torch.zeros(shape).cuda())
Ys = [torch.randn(*Y.shape).cuda() for Y in [Yl_in, *Yh_in]]
elif isinstance(resume, str):
if os.path.isfile(resume):
if os.path.splitext(resume)[1].lower()[1:] in ['jpg','png','tif','bmp']:
img_in = imread(resume)
Ys = img2dwt(img_in, wave=wave, colors=colors)
print(' loaded image', resume, img_in.shape, 'level', len(Ys)-1)
size = img_in.shape[:2]
wp_fake = pywt.WaveletPacket2D(data=np.zeros(size), wavelet='db1', mode='symmetric')
xfm = DWTForward(J=wp_fake.maxlevel, wave=wave, mode='symmetric').cuda()
else:
Ys = torch.load(resume)
Ys = [y.detach().cuda() for y in Ys]
else: print(' Snapshot not found:', resume); exit()
else:
Ys = [y.cuda() for y in resume]
# print('level', len(Ys)-1, 'low freq', Ys[0].cpu().numpy().shape)
return Ys, xfm, ifm, size
def dwt_image(shape, wave='coif2', sharp=0.3, colors=1., resume=None):
Ys, _, ifm, size = init_dwt(resume, shape, wave, colors)
Ys = [y.requires_grad_(True) for y in Ys]
scale = dwt_scale(Ys, sharp)
def inner(shift=None, contrast=1.):
image = ifm((Ys[0], [Ys[i+1] * float(scale[i]) for i in range(len(Ys)-1)]))
image = image * contrast / image.std() # keep contrast, empirical *1.33
return image
return Ys, inner, size
def dwt_scale(Ys, sharp):
scale = []
[h0,w0] = Ys[1].shape[3:5]
for i in range(len(Ys)-1):
[h,w] = Ys[i+1].shape[3:5]
scale.append( ((h0*w0)/(h*w)) ** (1.-sharp) )
# print(i+1, Ys[i+1].shape)
return scale
def img2dwt(img_in, wave='coif2', sharp=0.3, colors=1.):
if not isinstance(img_in, torch.Tensor):
img_in = torch.Tensor(img_in).cuda().permute(2,0,1).unsqueeze(0).float() / 255.
img_in = un_rgb(img_in, colors=colors)
with torch.no_grad():
wp_fake = pywt.WaveletPacket2D(data=np.zeros(img_in.shape[2:]), wavelet='db1', mode='zero')
lvl = wp_fake.maxlevel
# print(img_in.shape, lvl)
xfm = DWTForward(J=lvl, wave=wave, mode='symmetric').cuda()
Yl_in, Yh_in = xfm(img_in.cuda())
Ys = [Yl_in, *Yh_in]
scale = dwt_scale(Ys, sharp)
for i in range(len(Ys)-1):
Ys[i+1] /= scale[i]
return Ys
def pixel_image(shape, resume=None, sd=1., *noargs, **nokwargs):
size = None
if resume is None:
tensor = torch.randn(*shape) * sd
elif isinstance(resume, str):
if os.path.isfile(resume):
img_in = imread(resume) / 255.
tensor = torch.Tensor(img_in).permute(2,0,1).unsqueeze(0).float()
tensor = un_rgb(tensor-0.5, colors=2.) # experimental
size = img_in.shape[:2]
print(resume, size)
else: print(' Image not found:', resume); exit()
else:
if isinstance(resume, list): resume = resume[0]
tensor = resume
tensor = tensor.cuda().requires_grad_(True)
def inner(shift=None, contrast=1.): # *noargs, **nokwargs
image = tensor * contrast / tensor.std()
return image
return [tensor], inner, size # lambda: tensor
# From https://github.com/tensorflow/lucid/blob/master/lucid/optvis/param/spatial.py
def rfft2d_freqs(h, w):
"""Computes 2D spectrum frequencies."""
fy = np.fft.fftfreq(h)[:, None]
# when we have an odd input dimension we need to keep one additional frequency and later cut off 1 pixel
w2 = (w+1)//2 if w%2 == 1 else w//2+1
fx = np.fft.fftfreq(w)[:w2]
return np.sqrt(fx * fx + fy * fy)
def resume_fft(resume=None, shape=None, decay=None, colors=1.6, sd=0.01):
size = None
if resume is None: # random init
params_shape = [*shape[:3], shape[3]//2+1, 2] # [1,3,512,257,2] for 512x512 (2 for imaginary and real components)
params = 0.01 * torch.randn(*params_shape).cuda()
elif isinstance(resume, str):
if os.path.isfile(resume):
if os.path.splitext(resume)[1].lower()[1:] in ['jpg','png','tif','bmp']:
img_in = imread(resume)
params = img2fft(img_in, decay, colors)
size = img_in.shape[:2]
else:
params = torch.load(resume)
if isinstance(params, list): params = params[0]
params = params.detach().cuda()
params *= sd
else: print(' Snapshot not found:', resume); exit()
else:
if isinstance(resume, list): resume = resume[0]
params = resume.cuda()
return params, size
def fft_image(shape, sd=0.01, decay_power=1.0, resume=None): # decay ~ blur
params, size = resume_fft(resume, shape, decay_power, sd=sd)
spectrum_real_imag_t = params.requires_grad_(True)
if size is not None: shape[2:] = size
[h,w] = list(shape[2:])
freqs = rfft2d_freqs(h,w)
scale = 1. / np.maximum(freqs, 4./max(h,w)) ** decay_power
scale *= np.sqrt(h*w)
scale = torch.tensor(scale).float()[None, None, ..., None].cuda()
def inner(shift=None, contrast=1.):
scaled_spectrum_t = scale * spectrum_real_imag_t
if shift is not None:
scaled_spectrum_t += scale * shift
if float(torch.__version__[:3]) < 1.8:
image = torch.irfft(scaled_spectrum_t, 2, normalized=True, signal_sizes=(h, w))
else:
if type(scaled_spectrum_t) is not torch.complex64:
scaled_spectrum_t = torch.view_as_complex(scaled_spectrum_t)
image = torch.fft.irfftn(scaled_spectrum_t, s=(h, w), norm='ortho')
image = image * contrast / image.std() # keep contrast, empirical
return image
return [spectrum_real_imag_t], inner, size
def inv_sigmoid(x):
eps = 1.e-12
x = torch.clamp(x.double(), eps, 1-eps)
y = torch.log(x/(1-x))
return y.float()
def un_rgb(image, colors=1.):
color_correlation_svd_sqrt = np.asarray([[0.26, 0.09, 0.02], [0.27, 0.00, -0.05], [0.27, -0.09, 0.03]]).astype("float32")
color_correlation_svd_sqrt /= np.asarray([colors, 1., 1.])
max_norm_svd_sqrt = np.max(np.linalg.norm(color_correlation_svd_sqrt, axis=0))
color_correlation_normalized = color_correlation_svd_sqrt / max_norm_svd_sqrt
color_uncorrelate = np.linalg.inv(color_correlation_normalized)
image = inv_sigmoid(image)
t_permute = image.permute(0,2,3,1)
t_permute = torch.matmul(t_permute, torch.tensor(color_uncorrelate.T).cuda())
image = t_permute.permute(0,3,1,2)
return image
def un_spectrum(spectrum, decay_power):
h = spectrum.shape[2]
w = (spectrum.shape[3]-1)*2
freqs = rfft2d_freqs(h, w)
scale = 1.0 / np.maximum(freqs, 1.0 / max(w, h)) ** decay_power
scale *= np.sqrt(w*h)
scale = torch.tensor(scale).float()[None, None, ..., None].cuda()
return spectrum / scale
def img2fft(img_in, decay=1., colors=1.):
h, w = img_in.shape[0], img_in.shape[1]
img_in = torch.Tensor(img_in).cuda().permute(2,0,1).unsqueeze(0) / 255.
img_in = un_rgb(img_in, colors=colors)
with torch.no_grad():
if float(torch.__version__[:3]) < 1.8:
spectrum = torch.rfft(img_in, 2, normalized=True) # 1.7
else:
spectrum = torch.fft.rfftn(img_in, s=(h, w), dim=[2,3], norm='ortho') # 1.8
spectrum = torch.view_as_real(spectrum)
spectrum = un_spectrum(spectrum, decay_power=decay)
spectrum *= 500000. # [sic!!!]
return spectrum
def main():
a = get_args()
prev_enc = 0
def train(i):
loss = 0
noise = a.noise * torch.rand(1, 1, *params[0].shape[2:4], 1).cuda() if a.noise > 0 else None
img_out = image_f(noise)
img_sliced = slice_imgs([img_out], a.samples, a.modsize, trform_f, a.align, macro=a.macro)[0]
out_enc = model_clip.encode_image(img_sliced)
if a.in_txt is not None: # input text
loss += sign * torch.cosine_similarity(txt_enc, out_enc, dim=-1).mean()
if a.notext > 0:
loss -= sign * a.notext * torch.cosine_similarity(txt_plot_enc, out_enc, dim=-1).mean()
if a.in_txt2 is not None: # input text - style
loss += sign * 0.5 * torch.cosine_similarity(txt_enc2, out_enc, dim=-1).mean()
if a.in_txt0 is not None: # subtract text
loss += -sign * torch.cosine_similarity(txt_enc0, out_enc, dim=-1).mean()
if a.in_img is not None and os.path.isfile(a.in_img): # input image
loss += sign * 0.5 * torch.cosine_similarity(img_enc, out_enc, dim=-1).mean()
if a.sync > 0 and a.in_img is not None and os.path.isfile(a.in_img): # image composition
prog_sync = (a.steps // a.fstep - i) / (a.steps // a.fstep)
loss += prog_sync * a.sync * sim_loss(F.interpolate(img_out, sim_size).float(), img_in, normalize=True).squeeze()
if a.sharp != 0 and a.dwt is not True: # scharr|sobel|default
loss -= a.sharp * derivat(img_out, mode='sobel')
# loss -= a.sharp * derivat(img_sliced, mode='scharr')
if a.diverse != 0:
img_sliced = slice_imgs([image_f(noise)], a.samples, a.modsize, trform_f, a.align, macro=a.macro)[0]
out_enc2 = model_clip.encode_image(img_sliced)
loss += a.diverse * torch.cosine_similarity(out_enc, out_enc2, dim=-1).mean()
del out_enc2; torch.cuda.empty_cache()
if a.expand > 0:
global prev_enc
if i > 0:
loss += a.expand * torch.cosine_similarity(out_enc, prev_enc, dim=-1).mean()
prev_enc = out_enc.detach()
del img_out, img_sliced, out_enc; torch.cuda.empty_cache()
assert not isinstance(loss, int), ' Loss not defined, check the inputs'
if a.prog is True:
lr_cur = lr0 + (i / a.steps) * (lr1 - lr0)
for g in optimizer.param_groups:
g['lr'] = lr_cur
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % a.fstep == 0:
with torch.no_grad():
img = image_f(contrast=a.contrast).cpu().numpy()[0]
# empirical tone mapping
if (a.sync > 0 and a.in_img is not None):
img = img **1.3
elif a.sharp != 0:
img = img ** (1 + a.sharp/2.)
checkout(img, os.path.join(tempdir, '%04d.jpg' % (i // a.fstep)), verbose=a.verbose)
pbar.upd()
# Load CLIP models
use_jit = True if float(torch.__version__[:3]) < 1.8 else False
model_clip, _ = clip.load(a.model, jit=use_jit)
try:
a.modsize = model_clip.visual.input_resolution
except:
a.modsize = 288 if a.model == 'RN50x4' else 384 if a.model == 'RN50x16' else 224
if a.verbose is True: print(' using model', a.model)
xmem = {'ViT-B/16':0.25, 'RN50':0.5, 'RN50x4':0.16, 'RN50x16':0.06, 'RN101':0.33}
if a.model in xmem.keys():
a.samples = int(a.samples * xmem[a.model])
if a.multilang is True:
model_lang = SentenceTransformer('clip-ViT-B-32-multilingual-v1').cuda()
def enc_text(txt):
if a.multilang is True:
emb = model_lang.encode([txt], convert_to_tensor=True, show_progress_bar=False)
else:
emb = model_clip.encode_text(clip.tokenize(txt).cuda())
return emb.detach().clone()
if a.diverse != 0:
a.samples = int(a.samples * 0.5)
if a.sync > 0:
a.samples = int(a.samples * 0.5)
if a.transform is True:
# trform_f = transforms.transforms_custom
trform_f = transforms.transforms_elastic
a.samples = int(a.samples * 0.95)
else:
trform_f = transforms.normalize()
out_name = []
if a.in_txt is not None:
if a.verbose is True: print(' topic text: ', basename(a.in_txt))
if a.translate:
translator = Translator()
a.in_txt = translator.translate(a.in_txt, dest='en').text
if a.verbose is True: print(' translated to:', a.in_txt)
txt_enc = enc_text(a.in_txt)
out_name.append(txt_clean(a.in_txt))
if a.notext > 0:
txt_plot = torch.from_numpy(plot_text(a.in_txt, a.modsize)/255.).unsqueeze(0).permute(0,3,1,2).cuda()
txt_plot_enc = model_clip.encode_image(txt_plot).detach().clone()
if a.in_txt2 is not None:
if a.verbose is True: print(' style text:', basename(a.in_txt2))
a.samples = int(a.samples * 0.75)
if a.translate:
translator = Translator()
a.in_txt2 = translator.translate(a.in_txt2, dest='en').text
if a.verbose is True: print(' translated to:', a.in_txt2)
txt_enc2 = enc_text(a.in_txt2)
out_name.append(txt_clean(a.in_txt2))
if a.in_txt0 is not None:
if a.verbose is True: print(' subtract text:', basename(a.in_txt0))
a.samples = int(a.samples * 0.75)
if a.translate:
translator = Translator()
a.in_txt0 = translator.translate(a.in_txt0, dest='en').text
if a.verbose is True: print(' translated to:', a.in_txt0)
txt_enc0 = enc_text(a.in_txt0)
out_name.append('off-' + txt_clean(a.in_txt0))
if a.multilang is True: del model_lang
if a.in_img is not None and os.path.isfile(a.in_img):
if a.verbose is True: print(' ref image:', basename(a.in_img))
img_in = torch.from_numpy(img_read(a.in_img)/255.).unsqueeze(0).permute(0,3,1,2).cuda()
img_in = img_in[:,:3,:,:] # fix rgb channels
in_sliced = slice_imgs([img_in], a.samples, a.modsize, transforms.normalize(), a.align)[0]
img_enc = model_clip.encode_image(in_sliced).detach().clone()
if a.sync > 0:
sim_loss = lpips.LPIPS(net='vgg', verbose=False).cuda()
sim_size = [s//2 for s in a.size]
img_in = F.interpolate(img_in, sim_size).float()
else:
del img_in
del in_sliced; torch.cuda.empty_cache()
out_name.append(basename(a.in_img).replace(' ', '_'))
shape = [1, 3, *a.size]
if a.dwt is True:
params, image_f, sz = dwt_image(shape, a.wave, a.sharp, a.colors, a.resume)
else:
params, image_f, sz = fft_image(shape, 0.01, a.decay, a.resume)
if sz is not None: a.size = sz
image_f = to_valid_rgb(image_f, colors = a.colors)
if a.prog is True:
lr1 = a.lrate * 2
lr0 = lr1 * 0.01
else:
lr0 = a.lrate
optimizer = torch.optim.AdamW(params, lr0, weight_decay=0.01, amsgrad=True)
sign = 1. if a.invert is True else -1.
if a.verbose is True: print(' samples:', a.samples)
out_name = '-'.join(out_name)
out_name += '-%s' % a.model if 'RN' in a.model.upper() else ''
tempdir = os.path.join(a.out_dir, out_name)
os.makedirs(tempdir, exist_ok=True)
pbar = ProgressBar(a.steps // a.fstep)
for i in range(a.steps):
train(i)
os.system('ffmpeg -v warning -y -i %s\%%04d.jpg "%s.mp4"' % (tempdir, os.path.join(a.out_dir, out_name)))
shutil.copy(img_list(tempdir)[-1], os.path.join(a.out_dir, '%s-%d.jpg' % (out_name, a.steps)))
if a.save_pt is True:
torch.save(params, '%s.pt' % os.path.join(a.out_dir, out_name))
if __name__ == '__main__':
main()
|
py | 7dffe8af69318aa8c4e30c41b5ca342f23036ffb | # -*- coding: utf-8 -*-
"""Implementation of NTN."""
from typing import Any, ClassVar, Mapping, Optional
import torch
from torch import nn
from ..base import EntityEmbeddingModel
from ...constants import DEFAULT_EMBEDDING_HPO_EMBEDDING_DIM_RANGE
from ...losses import Loss
from ...nn.emb import EmbeddingSpecification
from ...regularizers import Regularizer
from ...triples import CoreTriplesFactory
from ...typing import DeviceHint, Hint, Initializer
__all__ = [
'NTN',
]
class NTN(EntityEmbeddingModel):
r"""An implementation of NTN from [socher2013]_.
NTN uses a bilinear tensor layer instead of a standard linear neural network layer:
.. math::
f(h,r,t) = \textbf{u}_{r}^{T} \cdot \tanh(\textbf{h} \mathfrak{W}_{r} \textbf{t}
+ \textbf{V}_r [\textbf{h};\textbf{t}] + \textbf{b}_r)
where $\mathfrak{W}_r \in \mathbb{R}^{d \times d \times k}$ is the relation specific tensor, and the weight
matrix $\textbf{V}_r \in \mathbb{R}^{k \times 2d}$, and the bias vector $\textbf{b}_r$ and
the weight vector $\textbf{u}_r \in \mathbb{R}^k$ are the standard
parameters of a neural network, which are also relation specific. The result of the tensor product
$\textbf{h} \mathfrak{W}_{r} \textbf{t}$ is a vector $\textbf{x} \in \mathbb{R}^k$ where each entry $x_i$ is
computed based on the slice $i$ of the tensor $\mathfrak{W}_{r}$:
$\textbf{x}_i = \textbf{h}\mathfrak{W}_{r}^{i} \textbf{t}$. As indicated by the interaction model, NTN defines
for each relation a separate neural network which makes the model very expressive, but at the same time
computationally expensive.
.. seealso::
- Original Implementation (Matlab): `<https://github.com/khurram18/NeuralTensorNetworks>`_
- TensorFlow: `<https://github.com/dddoss/tensorflow-socher-ntn>`_
- Keras: `<https://github.com/dapurv5/keras-neural-tensor-layer (Keras)>`_
---
citation:
author: Socher
year: 2013
link: https://dl.acm.org/doi/10.5555/2999611.2999715
github: khurram18/NeuralTensorNetworks
"""
#: The default strategy for optimizing the model's hyper-parameters
hpo_default: ClassVar[Mapping[str, Any]] = dict(
embedding_dim=DEFAULT_EMBEDDING_HPO_EMBEDDING_DIM_RANGE,
num_slices=dict(type=int, low=2, high=4),
)
def __init__(
self,
triples_factory: CoreTriplesFactory,
embedding_dim: int = 100,
num_slices: int = 4,
loss: Optional[Loss] = None,
preferred_device: DeviceHint = None,
random_seed: Optional[int] = None,
non_linearity: Optional[nn.Module] = None,
regularizer: Optional[Regularizer] = None,
entity_initializer: Hint[Initializer] = None,
) -> None:
r"""Initialize NTN.
:param embedding_dim: The entity embedding dimension $d$. Is usually $d \in [50, 350]$.
:param num_slices:
:param non_linearity: A non-linear activation function. Defaults to the hyperbolic
tangent :class:`torch.nn.Tanh`.
"""
super().__init__(
triples_factory=triples_factory,
loss=loss,
preferred_device=preferred_device,
random_seed=random_seed,
regularizer=regularizer,
entity_representations=EmbeddingSpecification(
embedding_dim=embedding_dim,
initializer=entity_initializer,
),
)
self.num_slices = num_slices
self.w = nn.Parameter(data=torch.empty(
triples_factory.num_relations,
num_slices,
embedding_dim,
embedding_dim,
device=self.device,
), requires_grad=True)
self.vh = nn.Parameter(data=torch.empty(
triples_factory.num_relations,
num_slices,
embedding_dim,
device=self.device,
), requires_grad=True)
self.vt = nn.Parameter(data=torch.empty(
triples_factory.num_relations,
num_slices,
embedding_dim,
device=self.device,
), requires_grad=True)
self.b = nn.Parameter(data=torch.empty(
triples_factory.num_relations,
num_slices,
device=self.device,
), requires_grad=True)
self.u = nn.Parameter(data=torch.empty(
triples_factory.num_relations,
num_slices,
device=self.device,
), requires_grad=True)
if non_linearity is None:
non_linearity = nn.Tanh()
self.non_linearity = non_linearity
def _reset_parameters_(self): # noqa: D102
super()._reset_parameters_()
nn.init.normal_(self.w)
nn.init.normal_(self.vh)
nn.init.normal_(self.vt)
nn.init.normal_(self.b)
nn.init.normal_(self.u)
def _score(
self,
h_indices: Optional[torch.LongTensor] = None,
r_indices: Optional[torch.LongTensor] = None,
t_indices: Optional[torch.LongTensor] = None,
slice_size: int = None,
) -> torch.FloatTensor:
"""
Compute scores for NTN.
:param h_indices: shape: (batch_size,)
:param r_indices: shape: (batch_size,)
:param t_indices: shape: (batch_size,)
:return: shape: (batch_size, num_entities)
"""
assert r_indices is not None
#: shape: (batch_size, num_entities, d)
h_all = self.entity_embeddings.get_in_canonical_shape(indices=h_indices)
t_all = self.entity_embeddings.get_in_canonical_shape(indices=t_indices)
if slice_size is None:
return self._interaction_function(h=h_all, t=t_all, r_indices=r_indices)
if h_all.shape[1] > t_all.shape[1]:
h_was_split = True
split_tensor = torch.split(h_all, slice_size, dim=1)
constant_tensor = t_all
else:
h_was_split = False
split_tensor = torch.split(t_all, slice_size, dim=1)
constant_tensor = h_all
scores_arr = []
for split in split_tensor:
if h_was_split:
h = split
t = constant_tensor
else:
h = constant_tensor
t = split
score = self._interaction_function(h=h, t=t, r_indices=r_indices)
scores_arr.append(score)
return torch.cat(scores_arr, dim=1)
def _interaction_function(
self,
h: torch.FloatTensor,
t: torch.FloatTensor,
r_indices: Optional[torch.LongTensor] = None,
) -> torch.FloatTensor:
#: Prepare h: (b, e, d) -> (b, e, 1, 1, d)
h_for_w = h.unsqueeze(dim=-2).unsqueeze(dim=-2)
#: Prepare t: (b, e, d) -> (b, e, 1, d, 1)
t_for_w = t.unsqueeze(dim=-2).unsqueeze(dim=-1)
#: Prepare w: (R, k, d, d) -> (b, k, d, d) -> (b, 1, k, d, d)
w_r = self.w.index_select(dim=0, index=r_indices).unsqueeze(dim=1)
# h.T @ W @ t, shape: (b, e, k, 1, 1)
hwt = (h_for_w @ w_r @ t_for_w)
#: reduce (b, e, k, 1, 1) -> (b, e, k)
hwt = hwt.squeeze(dim=-1).squeeze(dim=-1)
#: Prepare vh: (R, k, d) -> (b, k, d) -> (b, 1, k, d)
vh_r = self.vh.index_select(dim=0, index=r_indices).unsqueeze(dim=1)
#: Prepare h: (b, e, d) -> (b, e, d, 1)
h_for_v = h.unsqueeze(dim=-1)
# V_h @ h, shape: (b, e, k, 1)
vhh = vh_r @ h_for_v
#: reduce (b, e, k, 1) -> (b, e, k)
vhh = vhh.squeeze(dim=-1)
#: Prepare vt: (R, k, d) -> (b, k, d) -> (b, 1, k, d)
vt_r = self.vt.index_select(dim=0, index=r_indices).unsqueeze(dim=1)
#: Prepare t: (b, e, d) -> (b, e, d, 1)
t_for_v = t.unsqueeze(dim=-1)
# V_t @ t, shape: (b, e, k, 1)
vtt = vt_r @ t_for_v
#: reduce (b, e, k, 1) -> (b, e, k)
vtt = vtt.squeeze(dim=-1)
#: Prepare b: (R, k) -> (b, k) -> (b, 1, k)
b = self.b.index_select(dim=0, index=r_indices).unsqueeze(dim=1)
# a = f(h.T @ W @ t + Vh @ h + Vt @ t + b), shape: (b, e, k)
pre_act = hwt + vhh + vtt + b
act = self.non_linearity(pre_act)
# prepare u: (R, k) -> (b, k) -> (b, 1, k, 1)
u = self.u.index_select(dim=0, index=r_indices).unsqueeze(dim=1).unsqueeze(dim=-1)
# prepare act: (b, e, k) -> (b, e, 1, k)
act = act.unsqueeze(dim=-2)
# compute score, shape: (b, e, 1, 1)
score = act @ u
# reduce
score = score.squeeze(dim=-1).squeeze(dim=-1)
return score
def score_hrt(self, hrt_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
return self._score(h_indices=hrt_batch[:, 0], r_indices=hrt_batch[:, 1], t_indices=hrt_batch[:, 2])
def score_t(self, hr_batch: torch.LongTensor, slice_size: int = None) -> torch.FloatTensor: # noqa: D102
return self._score(h_indices=hr_batch[:, 0], r_indices=hr_batch[:, 1], slice_size=slice_size)
def score_h(self, rt_batch: torch.LongTensor, slice_size: int = None) -> torch.FloatTensor: # noqa: D102
return self._score(r_indices=rt_batch[:, 0], t_indices=rt_batch[:, 1], slice_size=slice_size)
|
py | 7dffe918e3c23cfd2f2ec66aa61da713cbd80756 | # Copyright 2021 Internet Corporation for Assigned Names and Numbers.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at https://mozilla.org/MPL/2.0/.
#
# Developed by Sinodun IT (sinodun.com)
import logging
import pathlib
import re
import sys
ACTION_APPLY = 0
ACTION_ROLLBACK = 1
DDL_RE = re.compile(r"[0-9]+\.sql")
DDL_ROLLBACK_RE_FORMAT = r"0*{}\-rollback\.sql"
class DDLActions:
def apply_ddl(self, ddl_file, version, action=ACTION_APPLY):
"""Action the DDL file as the given version and action type.
Action types are apply or rollback. Once the DDL is applied,
also record the version, date/time and action type in the DDL
history."""
raise NotImplementedError
def read_ddl_info(self, ddl_path):
"""Return DDL action history - list of (version, date/time of action, action).
This method should read the DDL history table. If not
present, it should create it and return an empty history."""
raise NotImplementedError
def ddl_files_matching_re(ddl_path, re):
# Globbing is too imprecise to pick the exact files.
return sorted([f for f in ddl_path.glob('*.sql') if re.fullmatch(f.name)])
def ddl_files(ddl_path):
return ddl_files_matching_re(ddl_path, DDL_RE)
def ddl_rollback_files(ddl_path, version):
return ddl_files_matching_re(ddl_path, re.compile(DDL_ROLLBACK_RE_FORMAT.format(version)))
def active_ddls(ddl_info):
res = {}
for ddl in ddl_info:
if ddl[2]:
del res[ddl[0]]
else:
res[ddl[0]] = ddl[1]
return res
def apply(ddl_actions, ddl_info, ddl_path, args):
active = active_ddls(ddl_info)
for ddl_file in ddl_files(ddl_path):
ver = int(ddl_file.stem)
if ver not in active:
if not args.dry_run:
ddl_actions.apply_ddl(ddl_file, ver, ACTION_APPLY)
if not args.quiet:
print('Applied {}.'.format(ver))
else:
if not args.quiet:
print('Dry run - apply {}.'.format(ver))
else:
if args.verbose and not args.quiet:
print('Skipping {}, already applied.'.format(ver))
return 0
def rollback(ddl_actions, ddl_info, ddl_path, args):
active = active_ddls(ddl_info)
if active:
top = sorted(active.keys())[-1]
ddl_files = ddl_rollback_files(ddl_path, top)
if ddl_files:
for ddl_file in ddl_files:
if not args.dry_run:
ddl_actions.apply_ddl(ddl_file, top, ACTION_ROLLBACK)
if not args.quiet:
print('Rolled back {}.'.format(top))
else:
if not args.quiet:
print('Dry run - roll back {}.'.format(top))
return 0
else:
if not args.quiet:
print('No rollback available for {}.'.format(top))
return 1
else:
if not args.quiet:
print('No DDLs applied.')
return 0
def status(ddl_info, ddl_path, args):
active = active_ddls(ddl_info)
if not args.quiet:
if args.verbose:
for ddl in ddl_info:
print('{ddl:5} {verb} at {time}'.format(
ddl=ddl[0],
verb='rollback' if ddl[2] else ' applied',
time=ddl[1]))
else:
ddls = sorted(active.keys())
if len(ddls) > args.last:
print('Limiting output to last {} items.'.format(args.last))
ddls = ddls[-args.last:]
for ddl in ddls:
print('{ddl:5} applied at {time}'.format(
ddl=ddl,
time=active[ddl]))
res = 0
for ddl_file in ddl_files(ddl_path):
ver = int(ddl_file.stem)
if ver not in active:
res = 1
if not args.quiet:
print('{:5} to be applied.'.format(ver))
return res
def add_args(parser):
parser.add_argument('-r', '--update-required',
dest='req_update', action='store_true', default=False,
help='just check whether an update is required')
parser.add_argument('-v', '--verbose',
action='store_true', default=False,
help='produce more detailed output messages')
parser.add_argument('-q', '--quiet',
action='store_true', default=False,
help='disable output messages')
parser.add_argument('-a', '--action',
dest='action', action='store', default='update',
choices=['update', 'rollback', 'status'],
help='update, rollback or status',
metavar='ACTION')
parser.add_argument('-l', '--last',
dest='last', action='store', type=int, default=10,
help='list last N items only, default 10',
metavar='N')
parser.add_argument('-n', '--dry-run',
dest='dry_run', action='store_true', default=False,
help='perform a trial run')
parser.add_argument('ddl_path',
nargs='?', default=None,
help='path to directory with DDL files',
metavar='DDLPATH')
def main(args, ddl_actions):
ddl_path = pathlib.Path(args.ddl_path)
if args.req_update:
if args.action != 'update':
print("--update-required only valid with action 'update'.", file=sys.stderr)
return 2
args.action = 'status'
args.quiet = True
try:
ddl_info = ddl_actions.read_ddl_info(ddl_path)
if args.action == 'update':
return apply(ddl_actions, ddl_info, ddl_path, args)
elif args.action == 'rollback':
return rollback(ddl_actions, ddl_info, ddl_path, args)
else:
return status(ddl_info, ddl_path, args)
except Exception as e:
logging.error('Exception {exc} ({args})'.format(
exc=type(e).__name__,
args=str(e)))
print('Error {exc} ({args}).'.format(
exc=type(e).__name__,
args=str(e)), file=sys.stderr)
return 2
|
py | 7dffebd0371de8c73f8ad704d2e57d6d8668141b | import pytest
from ogusa import SS, TPI
import time
import os
from ogusa.execute import runner
from ogusa.utils import safe_read_pickle
import ogusa.output_tables as ot
SS.ENFORCE_SOLUTION_CHECKS = False
TPI.ENFORCE_SOLUTION_CHECKS = False
CUR_PATH = os.path.abspath(os.path.dirname(__file__))
def run_micro_macro(iit_reform, og_spec, guid):
guid = ''
start_time = time.time()
REFORM_DIR = "./OUTPUT_REFORM_" + guid
BASELINE_DIR = "./OUTPUT_BASELINE_" + guid
tax_func_path_baseline = os.path.join(CUR_PATH, 'OUTPUT_BASELINE',
'TxFuncEst_baseline.pkl')
tax_func_path_reform = os.path.join(CUR_PATH, 'OUTPUT_REFORM',
'TxFuncEst_policy.pkl')
# Add start year from reform to user parameters
start_year = sorted(iit_reform.keys())[0]
og_spec['start_year'] = start_year
with open("log_{}.log".format(guid), 'w') as f:
f.write("guid: {}\n".format(guid))
f.write("iit_reform: {}\n".format(iit_reform))
f.write("og_spec: {}\n".format(og_spec))
'''
------------------------------------------------------------------------
Run baseline
------------------------------------------------------------------------
'''
output_base = BASELINE_DIR
kwargs = {'output_base': output_base, 'baseline_dir': BASELINE_DIR,
'test': True, 'time_path': True, 'baseline': True,
'og_spec': og_spec, 'run_micro': False,
'tax_func_path': tax_func_path_baseline, 'guid': guid}
runner(**kwargs)
'''
------------------------------------------------------------------------
Run reform
------------------------------------------------------------------------
'''
output_base = REFORM_DIR
kwargs = {'output_base': output_base, 'baseline_dir': BASELINE_DIR,
'test': True, 'time_path': True, 'baseline': False,
'iit_reform': iit_reform, 'og_spec': og_spec,
'guid': guid, 'run_micro': False,
'tax_func_path': tax_func_path_reform}
runner(**kwargs)
time.sleep(0.5)
base_tpi = safe_read_pickle(
os.path.join(BASELINE_DIR, 'TPI', 'TPI_vars.pkl'))
base_params = safe_read_pickle(
os.path.join(BASELINE_DIR, 'model_params.pkl'))
reform_tpi = safe_read_pickle(
os.path.join(REFORM_DIR, 'TPI', 'TPI_vars.pkl'))
reform_params = safe_read_pickle(
os.path.join(REFORM_DIR, 'model_params.pkl'))
ans = ot.macro_table(
base_tpi, base_params, reform_tpi=reform_tpi,
reform_params=reform_params,
var_list=['Y', 'C', 'K', 'L', 'r', 'w'], output_type='pct_diff',
num_years=10, start_year=og_spec['start_year'])
print("total time was ", (time.time() - start_time))
return ans
@pytest.mark.full_run
def test_run_micro_macro():
iit_reform = {
2018: {
'_II_rt1': [.09],
'_II_rt2': [.135],
'_II_rt3': [.225],
'_II_rt4': [.252],
'_II_rt5': [.297],
'_II_rt6': [.315],
'_II_rt7': [0.3564],
}, }
run_micro_macro(iit_reform=iit_reform, og_spec={
'frisch': 0.44, 'g_y_annual': 0.021}, guid='abc')
|
py | 7dffece55f8cc0a4dc34cdd6954f81b2aeba6f28 | """Migration database v2.0 schema
Revision ID: 61cd8752746f
Revises:
Create Date: 2021-04-03 17:16:50.858940
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "61cd8752746f"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"request",
"productcategoryid",
existing_type=sa.SMALLINT(),
nullable=True,
existing_server_default=sa.text(
"nextval('request_productcategoryid_seq'::regclass)"
),
)
op.drop_constraint("request_product_category_fk", "request", type_="foreignkey")
op.create_foreign_key(
None,
"request",
"product_category",
["productcategoryid"],
["productcategoryid"],
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, "request", type_="foreignkey")
op.create_foreign_key(
"request_product_category_fk",
"request",
"product_category",
["productcategoryid"],
["productcategoryid"],
onupdate="RESTRICT",
ondelete="RESTRICT",
)
op.alter_column(
"request",
"productcategoryid",
existing_type=sa.SMALLINT(),
nullable=False,
existing_server_default=sa.text(
"nextval('request_productcategoryid_seq'::regclass)"
),
)
# ### end Alembic commands ###
|
py | 7dffed06b907377c0c417f6f0bfb7a09a2751c85 | import lyricsgenius
import os
from bs4 import BeautifulSoup
import re
from lyricsgenius.api import Genius
from lyricsgenius.song import Song
from lyricsgenius.artist import Artist
from lyricsgenius.utils import sanitize_filename
import dandelion
import nltk
from nltk.corpus import cmudict
from nltk.corpus import timit
import random
from random import randint
from nltk import compat
from nltk.tree import Tree
from nltk.internals import import_from_stdlib
from nltk.corpus.reader.util import *
from nltk.corpus.reader.api import *
nltk.download("timit")
timitdict = nltk.corpus.timit.transcription_dict()
def rhyme(inp, level):
entries = nltk.corpus.cmudict.entries()
syllables = [(word, syl) for word, syl in entries if word == inp]
rhymes = []
for (word, syllable) in syllables:
rhymes += [word for word, pron in entries if pron[-level:] == syllable[-level:]]
return set(rhymes)
def doTheyRhyme(word1, word2):
# first, we don't want to report 'glue' and 'unglue' as rhyming words
# those kind of rhymes are LAME
if word1.find(word2) == len(word1) - len(word2):
return False
if word2.find(word1) == len(word2) - len(word1):
return False
return word1 in rhyme(word2, 1)
#######################################
#Sql setup
import psycopg2
connection = psycopg2.connect(user = "postgres",
password = "password",
host = "localhost",
port = "5432",
database = "LilPump1")
connection.autocommit = True
cursor = connection.cursor()
cur2 = connection.cursor()
# Print PostgreSQL Connection properties
print ( connection.get_dsn_parameters(),"\n")
# Print PostgreSQL version
cursor.execute("SELECT version();")
record = cursor.fetchone()
print("You are connected to - ", record,"\n")
##########################################
#Take all words into a dictionary
#wordDict = { "Test": 1}
#find every item of the list and add it to the dictionary
query = "select lyrics, songID, lineNumber from lyrics"
print ("Writing Query: " + query)
cursor.execute(query)
dictionaryList = []
wordDict = {}
offset = 0
def swiperNoSwearing(string):
string = string.replace("Bitch", "B****")
string = string.replace("bitch", "b****")
#string = string.replace("-", " ")
string = string.replace("Fuck", "F***")
string = string.replace("fuck", "f***")
string = string.replace("Dick", "D***")
string = string.replace("dick", "d***")
string = string.replace(" ass", " a**")
string = string.replace("Ass ", "A** ")
string = string.replace("nigga", "buddy")
string = string.replace("Nigga", "Buddy")
return string
curID = 1
tempLine = "^" #for unassigned line
phoneticList = []
adList = []
for lyrics, songID, lineNumber in cursor:
#print(lyrics, " END ")
string = str(lyrics)
string = string.replace(")", "^") #Ad-libs should be separate
string = string.replace(" (", "^")
l = (string.split("^")) #[0]
if len(l) > 1:
adList.append(l[1])
string = l[0]
string = string.replace("\n", "")
string = string.replace(" ", "")
#string = string.replace("-", " ")
string = string.replace("!", "")
string = string.replace("?", "")
string = string.replace("]", "")
string = string.replace("[", "")
string = string.replace(",", "")
string = swiperNoSwearing(string) #comment in/out to remove swearing
string = string.lower() #to avoid uppercases changing distinct words
#lyrics[0] = str(lyrics[0].strip('\n') )
string.strip('\n')
sentence = string.split(" ")
#print("Sentence: ", sentence)
try:
#print(str(timitdict[sentence[len(sentence) - 1] ]) )
t = timitdict[sentence[len(sentence) - 1] ]
dd = str(t[len(t) - 1]) + str(t[len(t) - 2]) #--the rhyme of the sentence
query = ("UPDATE lyrics SET rhyme = '" + dd + "' WHERE songID = " + str(songID) + " and lineNumber = " + str(lineNumber) + " ;")
print("QUERY IS:::: ", query)
cur2.execute(query)
except:
ing = 2
#print("WWWWWWWWWWWW") |
py | 7dffee29455c54ee2b3a03e69cca8293f5f2bf65 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""Test the disaster recovery service"""
import os.path
import sys
import unittest
from typing import Callable, List
from unittest import mock
import tests.pgsmo_tests.utils as pg_utils
from ossdbtoolsservice.connection import ConnectionInfo, ConnectionService
from ossdbtoolsservice.connection.contracts import ConnectionDetails
from ossdbtoolsservice.disaster_recovery import (DisasterRecoveryService,
disaster_recovery_service)
from ossdbtoolsservice.disaster_recovery.contracts import (BackupParams,
RestoreParams)
from ossdbtoolsservice.tasks import Task, TaskService, TaskStatus
from ossdbtoolsservice.utils import constants
from tests import utils
class TestDisasterRecoveryService(unittest.TestCase):
"""Methods for testing the disaster recovery service"""
def setUp(self):
"""Set up the tests with a disaster recovery service and connection service with mock connection info"""
self.disaster_recovery_service = DisasterRecoveryService()
self.connection_service = ConnectionService()
self.task_service = TaskService()
self.disaster_recovery_service._service_provider = utils.get_mock_service_provider({
constants.CONNECTION_SERVICE_NAME: self.connection_service,
constants.TASK_SERVICE_NAME: self.task_service})
# Create connection information for use in the tests
self.connection_details = ConnectionDetails()
self.host = 'test_host'
self.dbname = 'test_db'
self.username = 'user'
self.connection_details.options = {
'host': self.host,
'dbname': self.dbname,
'user': self.username,
'port': 5432
}
self.test_uri = 'test_uri'
self.connection_info = ConnectionInfo(self.test_uri, self.connection_details)
# Create backup parameters for the tests
self.request_context = utils.MockRequestContext()
self.backup_path = 'mock/path/test.sql'
self.backup_type = 'sql'
self.data_only = False
self.no_owner = True
self.schema = 'test_schema'
self.backup_params = BackupParams.from_dict({
'ownerUri': self.test_uri,
'backupInfo': {
'type': self.backup_type,
'path': self.backup_path,
'data_only': self.data_only,
'no_owner': self.no_owner,
'schema': self.schema
}
})
self.restore_path = 'mock/path/test.dump'
self.restore_params = RestoreParams.from_dict({
'ownerUri': self.test_uri,
'options': {
'path': self.restore_path,
'data_only': self.data_only,
'no_owner': self.no_owner,
'schema': self.schema
}
})
self.pg_dump_exe = 'pg_dump'
self.pg_restore_exe = 'pg_restore'
# Create the mock task for the tests
self.mock_action = mock.Mock()
self.mock_task = Task(None, None, None, None, None, self.request_context, self.mock_action)
self.mock_task.start = mock.Mock()
def test_get_pg_exe_path_local_linux(self):
"""Test the get_pg_exe_path function for linux when the service is running from source code"""
# Back up these values so that the test can overwrite them
old_arg0 = sys.argv[0]
old_platform = sys.platform
return_value = [
('/ossdbtoolsservice/ossdbtoolsservice/pg_exes/linux', ('10', '11'), ()),
('/ossdbtoolsservice/ossdbtoolsservice/pg_exes/linux/10', ('bin', 'lib'), ()),
('/ossdbtoolsservice/ossdbtoolsservice/pg_exes/linux/11', ('bin', 'lib'), ())
]
try:
with mock.patch('os.walk', new=mock.Mock(return_value=return_value)):
with mock.patch('os.path.exists', new=mock.Mock(return_value=True)):
# Override sys.argv[0] to simulate running the code directly from source
sys.argv[0] = os.path.normpath('/ossdbtoolsservice/ossdbtoolsservice/ossdbtoolsservice_main.py')
# If I get the executable path on Linux
sys.platform = 'linux'
path = disaster_recovery_service._get_pg_exe_path(self.pg_dump_exe, (10, 0))
# Then the path uses the linux directory and does not have a trailing .exe
self.assertEqual(path, os.path.normpath('/ossdbtoolsservice/ossdbtoolsservice/pg_exes/linux/10/bin/pg_dump'))
finally:
sys.argv[0] = old_arg0
sys.platform = old_platform
def test_get_pg_exe_path_local_mac(self):
"""Test the get_pg_exe_path function for mac when the service is running from source code"""
# Back up these values so that the test can overwrite them
old_arg0 = sys.argv[0]
old_platform = sys.platform
return_value = [
('/ossdbtoolsservice/ossdbtoolsservice/pg_exes/mac', ('9.5', '9.6'), ()),
('/ossdbtoolsservice/ossdbtoolsservice/pg_exes/mac/9.5', ('bin', 'lib'), ()),
('/ossdbtoolsservice/ossdbtoolsservice/pg_exes/mac/9.6', ('bin', 'lib'), ())
]
try:
with mock.patch('os.walk', new=mock.Mock(return_value=return_value)):
with mock.patch('os.path.exists', new=mock.Mock(return_value=True)):
# Override sys.argv[0] to simulate running the code directly from source
sys.argv[0] = os.path.normpath('/ossdbtoolsservice/ossdbtoolsservice/ossdbtoolsservice_main.py')
# If I get the executable path on Mac
sys.platform = 'darwin'
path = disaster_recovery_service._get_pg_exe_path(self.pg_dump_exe, (9, 6))
# Then the path uses the mac directory and does not have a trailing .exe
self.assertEqual(path, os.path.normpath('/ossdbtoolsservice/ossdbtoolsservice/pg_exes/mac/9.6/bin/pg_dump'))
finally:
sys.argv[0] = old_arg0
sys.platform = old_platform
def test_get_pg_exe_path_local_win(self):
"""Test the get_pg_exe_path function for windows when the service is running from source code"""
# Back up these values so that the test can overwrite them
old_arg0 = sys.argv[0]
old_platform = sys.platform
return_value = [
('/ossdbtoolsservice/ossdbtoolsservice/pg_exes/win', ('11', '12'), ()),
('/ossdbtoolsservice/ossdbtoolsservice/pg_exes/win/11', (), ('pg_dump', 'pg_restore')),
('/ossdbtoolsservice/ossdbtoolsservice/pg_exes/win/12', (), ('pg_dump', 'pg_restore'))
]
try:
with mock.patch('os.walk', new=mock.Mock(return_value=return_value)):
with mock.patch('os.path.exists', new=mock.Mock(return_value=True)):
# Override sys.argv[0] to simulate running the code directly from source
sys.argv[0] = os.path.normpath('/ossdbtoolsservice/ossdbtoolsservice/ossdbtoolsservice_main.py')
# If I get the executable path on Windows
sys.platform = 'win32'
path = disaster_recovery_service._get_pg_exe_path(self.pg_dump_exe, (11, 0))
# Then the path uses the win directory and does have a trailing .exe
self.assertEqual(path, os.path.normpath('/ossdbtoolsservice/ossdbtoolsservice/pg_exes/win/11/pg_dump.exe'))
finally:
sys.argv[0] = old_arg0
sys.platform = old_platform
def test_get_pg_exe_path_frozen_linux(self):
"""Test the get_pg_exe_path function for linux when the service is running from a cx_freeze build"""
# Back up these values so that the test can overwrite them
old_arg0 = sys.argv[0]
old_platform = sys.platform
return_value = [
('/ossdbtoolsservice/build/ossdbtoolsservice/pg_exes/linux', ('10', '11'), ()),
('/ossdbtoolsservice/build/ossdbtoolsservice/pg_exes/linux/10', ('bin', 'lib'), ()),
('/ossdbtoolsservice/build/ossdbtoolsservice/pg_exes/linux/11', ('bin', 'lib'), ())
]
try:
with mock.patch('os.walk', new=mock.Mock(return_value=return_value)):
with mock.patch('os.path.exists', new=mock.Mock(return_value=True)):
# Override sys.argv[0] to simulate running the code from a cx_freeze build
sys.argv[0] = os.path.normpath('/ossdbtoolsservice/build/ossdbtoolsservice/ossdbtoolsservice_main')
# If I get the executable path on Linux
sys.platform = 'linux'
path = disaster_recovery_service._get_pg_exe_path(self.pg_dump_exe, (10, 0))
# Then the path uses the linux directory and does not have a trailing .exe
self.assertEqual(path, os.path.normpath('/ossdbtoolsservice/build/ossdbtoolsservice/pg_exes/linux/10/bin/pg_dump'))
finally:
sys.argv[0] = old_arg0
sys.platform = old_platform
def test_get_pg_exe_path_frozen_mac(self):
"""Test the get_pg_exe_path function for mac when the service is running from a cx_freeze build"""
# Back up these values so that the test can overwrite them
old_arg0 = sys.argv[0]
old_platform = sys.platform
return_value = [
('/ossdbtoolsservice/build/ossdbtoolsservice/pg_exes/mac', ('9.5', '9.6'), ()),
('/ossdbtoolsservice/build/ossdbtoolsservice/pg_exes/mac/9.5', ('bin', 'lib'), ()),
('/ossdbtoolsservice/build/ossdbtoolsservice/pg_exes/mac/9.6', ('bin', 'lib'), ())
]
try:
with mock.patch('os.walk', new=mock.Mock(return_value=return_value)):
with mock.patch('os.path.exists', new=mock.Mock(return_value=True)):
# Override sys.argv[0] to simulate running the code from a cx_freeze build
sys.argv[0] = os.path.normpath('/ossdbtoolsservice/build/ossdbtoolsservice/ossdbtoolsservice_main')
# If I get the executable path on Mac
sys.platform = 'darwin'
path = disaster_recovery_service._get_pg_exe_path(self.pg_dump_exe, (9, 6))
# Then the path uses the mac directory and does not have a trailing .exe
self.assertEqual(path, os.path.normpath('/ossdbtoolsservice/build/ossdbtoolsservice/pg_exes/mac/9.6/bin/pg_dump'))
finally:
sys.argv[0] = old_arg0
sys.platform = old_platform
def test_get_pg_exe_path_frozen_win(self):
"""Test the get_pg_exe_path function for windows when the service is running from a cx_freeze build"""
# Back up these values so that the test can overwrite them
old_arg0 = sys.argv[0]
old_platform = sys.platform
return_value = [
('/ossdbtoolsservice/build/ossdbtoolsservice/pg_exes/win', ('11', '12'), ()),
('/ossdbtoolsservice/build/ossdbtoolsservice/pg_exes/win/11', (), ('pg_dump', 'pg_restore')),
('/ossdbtoolsservice/build/ossdbtoolsservice/pg_exes/win/12', (), ('pg_dump', 'pg_restore'))
]
try:
with mock.patch('os.walk', new=mock.Mock(return_value=return_value)):
with mock.patch('os.path.exists', new=mock.Mock(return_value=True)):
# Override sys.argv[0] to simulate running the code from a cx_freeze build
sys.argv[0] = os.path.normpath('/ossdbtoolsservice/build/ossdbtoolsservice/ossdbtoolsservice_main')
# If I get the executable path on Windows
sys.platform = 'win32'
path = disaster_recovery_service._get_pg_exe_path(self.pg_dump_exe, (11, 0))
# Then the path uses the win directory and does have a trailing .exe
self.assertEqual(path, os.path.normpath('/ossdbtoolsservice/build/ossdbtoolsservice/pg_exes/win/11/pg_dump.exe'))
finally:
sys.argv[0] = old_arg0
sys.platform = old_platform
def test_get_pg_exe_path_does_not_exist(self):
"""Test that the get_pg_exe_path function throws an error if the executable being searched for does not exist"""
return_value = [
('/ossdbtoolsservice/ossdbtoolsservice/pg_exes/win', ('11', '12'), ()),
('/ossdbtoolsservice/ossdbtoolsservice/pg_exes/win/11', (), ('pg_dump', 'pg_restore')),
('/ossdbtoolsservice/ossdbtoolsservice/pg_exes/win/12', (), ('pg_dump', 'pg_restore'))
]
with mock.patch('os.walk', new=mock.Mock(return_value=return_value)):
with mock.patch('os.path.exists', new=mock.Mock(return_value=False)), self.assertRaises(ValueError):
disaster_recovery_service._get_pg_exe_path('not_pg_dump', (11, 0))
def test_get_pg_server_folder_does_not_exist(self):
"""Test that the get_pg_exe_path function throws an error if the server version folder being searched for does not exist"""
return_value = [
('/ossdbtoolsservice/ossdbtoolsservice/pg_exes/win', ('11', '12'), ()),
('/ossdbtoolsservice/ossdbtoolsservice/pg_exes/win/11', (), ('pg_dump', 'pg_restore')),
('/ossdbtoolsservice/ossdbtoolsservice/pg_exes/win/12', (), ('pg_dump', 'pg_restore'))
]
with mock.patch('os.walk', new=mock.Mock(return_value=return_value)), self.assertRaises(ValueError):
disaster_recovery_service._get_pg_exe_path('pg_dump', (9, 6))
def test_perform_backup(self):
"""Test that the perform_backup method passes the correct parameters to pg_dump"""
exe_name = self.pg_dump_exe
test_method = disaster_recovery_service._perform_backup
test_params = self.backup_params
expected_args = [
f'--file={self.backup_path}',
'--format=p',
f'--dbname={self.dbname}',
f'--host={self.host}',
f'--username={self.username}',
'--no-owner',
f'--schema={self.schema}',
f'--port=5432'
]
self._test_perform_backup_restore_internal(exe_name, test_method, test_params, expected_args)
def test_perform_restore(self):
"""Test that the perform_restore method passes the correct parameters to pg_restore"""
exe_name = self.pg_restore_exe
test_method = disaster_recovery_service._perform_restore
test_params = self.restore_params
expected_args = [
f'{self.restore_path}',
f'--dbname={self.dbname}',
f'--host={self.host}',
f'--username={self.username}',
'--no-owner',
f'--schema={self.schema}',
f'--port=5432'
]
self._test_perform_backup_restore_internal(exe_name, test_method, test_params, expected_args)
def _test_perform_backup_restore_internal(self, exe_name: str, test_method: Callable, test_params, expected_args: List[str]):
mock_pg_path = f'mock/{exe_name}'
mock_process = mock.Mock()
mock_process.returncode = 0
mock_process.communicate = mock.Mock(return_value=(b'', b''))
mockConnection = pg_utils.MockPGServerConnection(None)
with mock.patch('ossdbtoolsservice.disaster_recovery.disaster_recovery_service._get_pg_exe_path', new=mock.Mock(return_value=mock_pg_path)) \
as mock_get_path, mock.patch('subprocess.Popen', new=mock.Mock(return_value=mock_process)) as mock_popen:
# If I perform a backup/restore
with mock.patch('ossdbtoolsservice.connection.ConnectionInfo.get_connection', new=mock.Mock(return_value=mockConnection)):
task_result = test_method(self.connection_info, test_params, self.mock_task)
# Then the code got the path of the executable
mock_get_path.assert_called_once_with(exe_name, mockConnection.server_version)
# And ran the executable as a subprocess
mock_popen.assert_called_once()
# And then called communicate on the process
mock_process.communicate.assert_called_once_with()
# And the arguments for the subprocess.Popen call were the expected values
actual_args = mock_popen.call_args[0][0]
self.assertEqual(actual_args[0], mock_pg_path)
pg_exe_flags = actual_args[1:]
for expected_arg in expected_args:
self.assertIn(expected_arg, pg_exe_flags)
self.assertEqual(len(expected_args), len(pg_exe_flags))
# And the task returns a successful result
self.assertIs(task_result.status, TaskStatus.SUCCEEDED)
def test_perform_backup_fails(self):
"""Test that the perform_backup method handles failures by recording pg_dump's stderr output and marking the task failed"""
self._test_perform_backup_restore_fails_internal(self.pg_dump_exe, disaster_recovery_service._perform_backup, self.backup_params)
def test_perform_restore_fails(self):
"""Test that the perform_restore method handles failures by recording pg_dump's stderr output and marking the task failed"""
self._test_perform_backup_restore_fails_internal(self.pg_restore_exe, disaster_recovery_service._perform_restore, self.restore_params)
def _test_perform_backup_restore_fails_internal(self, exe_name: str, test_method: Callable, test_params):
mock_pg_path = f'mock/{exe_name}'
mock_process = mock.Mock()
mock_process.returncode = 1
test_error_message = b'test error message'
mock_process.communicate = mock.Mock(return_value=(b'', test_error_message))
with mock.patch('ossdbtoolsservice.disaster_recovery.disaster_recovery_service._get_pg_exe_path',
new=mock.Mock(return_value=mock_pg_path)), mock.patch('subprocess.Popen', new=mock.Mock(return_value=mock_process)):
with mock.patch('ossdbtoolsservice.connection.ConnectionInfo.get_connection', new=mock.Mock(return_value=pg_utils.MockPGServerConnection(None))):
# If I perform a backup/restore that fails
task_result = test_method(self.connection_info, test_params, self.mock_task)
# Then the task returns a failed result
self.assertIs(task_result.status, TaskStatus.FAILED)
# And the task contains the error message from stderr
self.assertEqual(task_result.error_message, str(test_error_message, 'utf-8'))
def test_perform_backup_no_exe(self):
"""Test that the perform_backup task fails when the pg_dump exe is not found"""
self._test_perform_backup_restore_no_exe_internal(disaster_recovery_service._perform_backup, self.backup_params)
def test_perform_restore_no_exe(self):
"""Test that the perform_restore task fails when the pg_restore exe is not found"""
self._test_perform_backup_restore_no_exe_internal(disaster_recovery_service._perform_restore, self.restore_params)
def _test_perform_backup_restore_no_exe_internal(self, test_method: Callable, test_params):
mockConnection = pg_utils.MockPGServerConnection(None)
with mock.patch('os.path.exists', new=mock.Mock(return_value=False)), mock.patch('subprocess.Popen') as mock_popen:
with mock.patch('ossdbtoolsservice.connection.ConnectionInfo.get_connection', new=mock.Mock(return_value=mockConnection)):
# If I perform a restore when the pg_restore executable cannot be found
task_result = test_method(self.connection_info, test_params, mock.Mock())
# Then the task fails and does try to kick off a new process
self.assertIs(task_result.status, TaskStatus.FAILED)
mock_popen.assert_not_called()
def test_handle_backup_request(self):
"""Test that the backup request handler responds properly and kicks off a task to perform the backup"""
self._test_handle_backup_restore_internal(self.disaster_recovery_service.handle_backup_request, disaster_recovery_service._perform_backup,
self.backup_params)
def test_handle_restore_request(self):
"""Test that the restore request handler responds properly and kicks off a task to perform the restore"""
self._test_handle_backup_restore_internal(self.disaster_recovery_service.handle_restore_request, disaster_recovery_service._perform_restore,
self.restore_params)
def _test_handle_backup_restore_internal(self, test_handler: Callable, test_method: Callable, test_params):
# Set up the connection service to return the test's connection information
self.connection_service.owner_to_connection_map[self.test_uri] = self.connection_info
# Set up a mock task so that the restore code does not actually run in a separate thread
with mock.patch('ossdbtoolsservice.disaster_recovery.disaster_recovery_service.Task', new=mock.Mock(return_value=self.mock_task)) \
as mock_task_constructor, mock.patch('functools.partial', new=mock.Mock(return_value=self.mock_action)) as mock_partial:
# When I call the backup/restore request handler
test_handler(self.request_context, test_params)
# Then a mock task is created and started
mock_task_constructor.assert_called_once()
self.mock_task.start.assert_called_once()
# And the mock task was initialized with the expected parameters
parameters = mock_task_constructor.call_args[0]
self.assertEqual(parameters[2], constants.PG_PROVIDER_NAME)
self.assertEqual(parameters[3], self.host)
self.assertEqual(parameters[4], self.dbname)
self.assertIs(parameters[6], self.mock_action)
mock_partial.assert_called_once_with(test_method, self.connection_info, test_params)
# And the handler sends an empty response to indicate success
self.assertEqual(self.request_context.last_response_params, {})
def test_handle_backup_request_no_connection(self):
"""Test that the backup request handler responds with an error if there is no connection for the given owner URI"""
self._test_handle_backup_restore_request_no_connection(self.disaster_recovery_service.handle_backup_request, self.backup_params)
def test_handle_restore_request_no_connection(self):
"""Test that the restore request handler responds with an error if there is no connection for the given owner URI"""
self._test_handle_backup_restore_request_no_connection(self.disaster_recovery_service.handle_restore_request, self.restore_params)
def _test_handle_backup_restore_request_no_connection(self, test_handler: Callable, test_params):
# Set up a mock task so that the restore code does not actually run in a separate thread
with mock.patch('ossdbtoolsservice.disaster_recovery.disaster_recovery_service.Task', new=mock.Mock(return_value=self.mock_task)) \
as mock_task_constructor, mock.patch('functools.partial', new=mock.Mock(return_value=self.mock_action)):
# If I call the request handler and there is no connection corresponding to the given owner URI
test_handler(self.request_context, test_params)
# Then a mock task is not created
mock_task_constructor.assert_not_called()
# And the handler sends an error response to indicate failure
self.assertIsNotNone(self.request_context.last_error_message)
def test_canceled_task_does_not_spawn_process(self):
"""Test that the pg_dump/pg_restore process is not created if the task has been canceled"""
# Set up the task to be canceled
self.mock_task.canceled = True
mockConnection = pg_utils.MockPGServerConnection(None)
with mock.patch('subprocess.Popen', new=mock.Mock()) as mock_popen:
with mock.patch('ossdbtoolsservice.connection.ConnectionInfo.get_connection', new=mock.Mock(return_value=mockConnection)):
# If I try to perform a backup/restore for a canceled task
disaster_recovery_service._perform_backup_restore(self.connection_info, [], {}, self.mock_task)
# Then the process was not created
mock_popen.assert_not_called()
def test_cancel_backup(self):
"""Test that backups can be canceled by calling terminate on the pg_dump process"""
# Set up the task to be canceled when communicate would normally be called
def cancel_function(*_, **__):
self.mock_task.cancel()
return mock.DEFAULT
self.mock_task.status = TaskStatus.IN_PROGRESS
mock_process = mock.Mock()
mock_process.communicate = mock.Mock(return_value=(None, None), side_effect=cancel_function)
mock_process.terminate = mock.Mock()
mock_process.returncode = 0
with mock.patch('subprocess.Popen', new=mock.Mock(return_value=mock_process)):
# If I perform a backup/restore that kicks off the subprocess and then I cancel the task
disaster_recovery_service._perform_backup_restore(self.connection_info, [], {}, self.mock_task)
# Then the backup/restore process was terminated
mock_process.terminate.assert_called_once()
|
py | 7dffee5cb743c1b1e80a8db864f50dc4286aaaf4 | #!/usr/bin/env python
# encoding: utf-8
"""
ldapUserSync.py
This little script will sync ldap admin user with openvas user. A work around to openvas per-user ldap limitation
Created by lhan on 2015-07-17.
"""
import os
import sys
import getopt
import shlex
import subprocess
from sets import Set
from config import config
from os import environ
def get_config(key):
try:
envKey = key.upper()
value = environ[envKey]
except:
value = config.get(key)
return value
help_message = '''
Sync admin user from ldap to openvas
'''
# LDAP Configuration
host = get_config('ldap_host')
bindDN = get_config('ldap_bind_dn')
baseDN = get_config('ldap_base_dn')
ldapFilter = get_config('ldap_admin_filter')
ldapPwd = get_config('ldap_password')
# OpenVAS configuration
ovUser = 'admin'
ovPwd = get_config('ov_password')
ADMIN_ROLE_ID = '7a8cb5b4-b74d-11e2-8187-406186ea4fc5'
UID_ATT = get_config('ldap_username_attr')
ldapUsers = Set([])
ovUsers = Set([])
# Utils
BASH = lambda x: (subprocess.Popen(shlex.split(x), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False).communicate()[0])
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def main(argv=None):
if argv is None:
argv = sys.argv
try:
try:
opts, args = getopt.getopt(argv[1:], "hv:H:D:b:w:f:u:W:", ["help", "host=", "bind=", "base=", "ldap-pass=", "ldap-filter=", "username=", "password="])
except getopt.error, msg:
raise Usage(msg)
# option processing
for option, value in opts:
if option == "-v":
verbose = True
if option in ("-h", "--help"):
raise Usage(help_message)
# ldap host
if option in ("-H", "--host"):
global host
host = value
# ldap bindDN(-D)
if option in ("-D", "--bind"):
global bindDN
bindDN = value
# ldap baseDN(-b)
if option in ("-b", "--base"):
global baseDN
baseDN = value
# ldap password(-w)
if option in ("-w", "--ldap-pass"):
global ldapPwd
ldapPwd = value
# filter(-f)
if option in ("-f", "--ldap-filter"):
global ldapFilter
ldapFilter = value
# openvas username (-u)
if option in ("-u", "--username"):
global ovUser
ovUser = value
# openvas password(-W)
if option in ("-W", "--password"):
global ovPwd
ovPwd = value
except Usage, err:
print >> sys.stderr, sys.argv[0].split("/")[-1] + ": " + str(err.msg)
print >> sys.stderr, "\t for help use --help"
return 2
syncUsers()
def getLdapUser():
global ldapUsers
if len(ldapUsers) == 0:
ldapUsersCmd = "ldapsearch -H ldaps://%s -D %s -b %s -w %s \'(%s)\' %s"%(host, bindDN, baseDN, ldapPwd, ldapFilter, UID_ATT)
ldapUsersCmdResponse = BASH(ldapUsersCmd)
uidAttrP = '%s: '%(UID_ATT)
for line in ldapUsersCmdResponse.split('\n'):
if line.find(uidAttrP) != -1 :
ldapUsers.add(line.split(uidAttrP)[1])
return ldapUsers
def getOpenVasUsers():
global ovUsers
if len(ovUsers) == 0:
ovUsersCmd = "openvasmd --get-users"
ovUsersCmdResponse = BASH(ovUsersCmd)
for line in ovUsersCmdResponse.split('\n'):
if len(line) > 0:
ovUsers.add(line)
return ovUsers
def createUser(userName):
cmd = '''omp -u %s -w %s -X "<create_user><name>%s</name><role id='%s'/><sources><source>ldap_connect</source></sources></create_user>"'''%(ovUser, ovPwd, userName, ADMIN_ROLE_ID)
resp = BASH(cmd)
if resp.find("OK, resource created") != -1:
print "Sucess to create user %s"%(userName)
return True
else:
print "Fail to create user %s: %s"%(userName, resp)
return False
def syncUsers():
ldapUsers = getLdapUser()
ovUsers = getOpenVasUsers()
usersToCreate = ldapUsers - ovUsers
map(createUser, usersToCreate)
if __name__ == "__main__":
sys.exit(main())
|
py | 7dffeeee5ba288fbf97835e6cd94e468d8ee1e4f | # Copyright (c) 2008-2014 Simplistix Ltd
# See license.txt for license details.
import os
from mock import Mock
from tempfile import mkdtemp
from testfixtures import Replacer, ShouldRaise, TempDirectory, compare, tempdir
from unittest import TestCase
from ..rmtree import rmtree
class TestTempDir(TestCase):
@tempdir()
def test_simple(self,d):
d.write('something', b'stuff')
d.write('.svn', b'stuff')
d.check(
'.svn',
'something',
)
@tempdir()
def test_subdirs(self,d):
subdir = ['some','thing']
d.write(subdir+['something'], b'stuff')
d.write(subdir+['.svn'], b'stuff')
d.check_dir(subdir,
'.svn',
'something',
)
@tempdir()
def test_not_same(self,d):
d.write('something', b'stuff')
with ShouldRaise(AssertionError(
"Sequence not as expected:\n\nsame:\n()\n\nfirst:\n('.svn', 'something')\n\nsecond:\n('something',)"
)):
d.check('.svn', 'something')
@tempdir(ignore=('.svn',))
def test_ignore(self,d):
d.write('something', b'stuff')
d.write('.svn', b'stuff')
d.check('something', )
def test_cleanup_properly(self):
r = Replacer()
try:
m = Mock()
d = mkdtemp()
m.return_value = d
r.replace('testfixtures.tempdirectory.mkdtemp',m)
self.failUnless(os.path.exists(d))
self.assertFalse(m.called)
@tempdir()
def test_method(d):
d.write('something', b'stuff')
d.check('something', )
self.assertFalse(m.called)
compare(os.listdir(d),[])
test_method()
self.assertTrue(m.called)
self.failIf(os.path.exists(d))
finally:
r.restore()
if os.path.exists(d):
# only runs if the test fails!
rmtree(d) # pragma: no cover
@tempdir()
def test_cleanup_test_okay_with_deleted_dir(self,d):
rmtree(d.path)
@tempdir()
def test_decorator_returns_tempdirectory(self,d):
# check for what we get, so we only have to write
# tests in test_tempdirectory.py
self.failUnless(isinstance(d,TempDirectory))
def test_dont_create_or_cleanup_with_path(self):
with Replacer() as r:
m = Mock()
r.replace('testfixtures.tempdirectory.mkdtemp',m)
r.replace('testfixtures.tempdirectory.rmtree',m)
@tempdir(path='foo')
def test_method(d):
compare(d.path,'foo')
test_method()
self.assertFalse(m.called)
|
py | 7dffef7772017a2ee7ccd78b7d6186144db5294b | """
Implementation of math operations on Array objects.
"""
import math
from collections import namedtuple
from enum import IntEnum
from functools import partial
import operator
import llvmlite.ir
import numpy as np
from numba import generated_jit
from numba.core import types, cgutils
from numba.core.extending import overload, overload_method, register_jitable
from numba.np.numpy_support import as_dtype, type_can_asarray
from numba.np.numpy_support import numpy_version
from numba.np.numpy_support import is_nonelike, check_is_integer
from numba.core.imputils import (lower_builtin, impl_ret_borrowed,
impl_ret_new_ref, impl_ret_untracked)
from numba.core.typing import signature
from numba.np.arrayobj import make_array, load_item, store_item, _empty_nd_impl
from numba.np.linalg import ensure_blas
from numba.core.extending import intrinsic
from numba.core.errors import (RequireLiteralValue, TypingError,
NumbaValueError, NumbaNotImplementedError,
NumbaTypeError)
from numba.core.overload_glue import glue_lowering
from numba.cpython.unsafe.tuple import tuple_setitem
def _check_blas():
# Checks if a BLAS is available so e.g. dot will work
try:
ensure_blas()
except ImportError:
return False
return True
_HAVE_BLAS = _check_blas()
@intrinsic
def _create_tuple_result_shape(tyctx, shape_list, shape_tuple):
"""
This routine converts shape list where the axis dimension has already
been popped to a tuple for indexing of the same size. The original shape
tuple is also required because it contains a length field at compile time
whereas the shape list does not.
"""
# The new tuple's size is one less than the original tuple since axis
# dimension removed.
nd = len(shape_tuple) - 1
# The return type of this intrinsic is an int tuple of length nd.
tupty = types.UniTuple(types.intp, nd)
# The function signature for this intrinsic.
function_sig = tupty(shape_list, shape_tuple)
def codegen(cgctx, builder, signature, args):
lltupty = cgctx.get_value_type(tupty)
# Create an empty int tuple.
tup = cgutils.get_null_value(lltupty)
# Get the shape list from the args and we don't need shape tuple.
[in_shape, _] = args
def array_indexer(a, i):
return a[i]
# loop to fill the tuple
for i in range(nd):
dataidx = cgctx.get_constant(types.intp, i)
# compile and call array_indexer
data = cgctx.compile_internal(builder, array_indexer,
types.intp(shape_list, types.intp),
[in_shape, dataidx])
tup = builder.insert_value(tup, data, i)
return tup
return function_sig, codegen
@intrinsic
def _gen_index_tuple(tyctx, shape_tuple, value, axis):
"""
Generates a tuple that can be used to index a specific slice from an
array for sum with axis. shape_tuple is the size of the dimensions of
the input array. 'value' is the value to put in the indexing tuple
in the axis dimension and 'axis' is that dimension. For this to work,
axis has to be a const.
"""
if not isinstance(axis, types.Literal):
raise RequireLiteralValue('axis argument must be a constant')
# Get the value of the axis constant.
axis_value = axis.literal_value
# The length of the indexing tuple to be output.
nd = len(shape_tuple)
# If the axis value is impossible for the given size array then
# just fake it like it was for axis 0. This will stop compile errors
# when it looks like it could be called from array_sum_axis but really
# can't because that routine checks the axis mismatch and raise an
# exception.
if axis_value >= nd:
axis_value = 0
# Calculate the type of the indexing tuple. All the non-axis
# dimensions have slice2 type and the axis dimension has int type.
before = axis_value
after = nd - before - 1
types_list = []
types_list += [types.slice2_type] * before
types_list += [types.intp]
types_list += [types.slice2_type] * after
# Creates the output type of the function.
tupty = types.Tuple(types_list)
# Defines the signature of the intrinsic.
function_sig = tupty(shape_tuple, value, axis)
def codegen(cgctx, builder, signature, args):
lltupty = cgctx.get_value_type(tupty)
# Create an empty indexing tuple.
tup = cgutils.get_null_value(lltupty)
# We only need value of the axis dimension here.
# The rest are constants defined above.
[_, value_arg, _] = args
def create_full_slice():
return slice(None, None)
# loop to fill the tuple with slice(None,None) before
# the axis dimension.
# compile and call create_full_slice
slice_data = cgctx.compile_internal(builder, create_full_slice,
types.slice2_type(),
[])
for i in range(0, axis_value):
tup = builder.insert_value(tup, slice_data, i)
# Add the axis dimension 'value'.
tup = builder.insert_value(tup, value_arg, axis_value)
# loop to fill the tuple with slice(None,None) after
# the axis dimension.
for i in range(axis_value + 1, nd):
tup = builder.insert_value(tup, slice_data, i)
return tup
return function_sig, codegen
#----------------------------------------------------------------------------
# Basic stats and aggregates
@lower_builtin(np.sum, types.Array)
@lower_builtin("array.sum", types.Array)
def array_sum(context, builder, sig, args):
zero = sig.return_type(0)
def array_sum_impl(arr):
c = zero
for v in np.nditer(arr):
c += v.item()
return c
res = context.compile_internal(builder, array_sum_impl, sig, args,
locals=dict(c=sig.return_type))
return impl_ret_borrowed(context, builder, sig.return_type, res)
@register_jitable
def _array_sum_axis_nop(arr, v):
return arr
def gen_sum_axis_impl(is_axis_const, const_axis_val, op, zero):
def inner(arr, axis):
"""
function that performs sums over one specific axis
The third parameter to gen_index_tuple that generates the indexing
tuples has to be a const so we can't just pass "axis" through since
that isn't const. We can check for specific values and have
different instances that do take consts. Supporting axis summation
only up to the fourth dimension for now.
typing/arraydecl.py:sum_expand defines the return type for sum with
axis. It is one dimension less than the input array.
"""
ndim = arr.ndim
if not is_axis_const:
# Catch where axis is negative or greater than 3.
if axis < 0 or axis > 3:
raise ValueError("Numba does not support sum with axis "
"parameter outside the range 0 to 3.")
# Catch the case where the user misspecifies the axis to be
# more than the number of the array's dimensions.
if axis >= ndim:
raise ValueError("axis is out of bounds for array")
# Convert the shape of the input array to a list.
ashape = list(arr.shape)
# Get the length of the axis dimension.
axis_len = ashape[axis]
# Remove the axis dimension from the list of dimensional lengths.
ashape.pop(axis)
# Convert this shape list back to a tuple using above intrinsic.
ashape_without_axis = _create_tuple_result_shape(ashape, arr.shape)
# Tuple needed here to create output array with correct size.
result = np.full(ashape_without_axis, zero, type(zero))
# Iterate through the axis dimension.
for axis_index in range(axis_len):
if is_axis_const:
# constant specialized version works for any valid axis value
index_tuple_generic = _gen_index_tuple(arr.shape, axis_index,
const_axis_val)
result += arr[index_tuple_generic]
else:
# Generate a tuple used to index the input array.
# The tuple is ":" in all dimensions except the axis
# dimension where it is "axis_index".
if axis == 0:
index_tuple1 = _gen_index_tuple(arr.shape, axis_index, 0)
result += arr[index_tuple1]
elif axis == 1:
index_tuple2 = _gen_index_tuple(arr.shape, axis_index, 1)
result += arr[index_tuple2]
elif axis == 2:
index_tuple3 = _gen_index_tuple(arr.shape, axis_index, 2)
result += arr[index_tuple3]
elif axis == 3:
index_tuple4 = _gen_index_tuple(arr.shape, axis_index, 3)
result += arr[index_tuple4]
return op(result, 0)
return inner
@lower_builtin(np.sum, types.Array, types.intp, types.DTypeSpec)
@lower_builtin(np.sum, types.Array, types.IntegerLiteral, types.DTypeSpec)
@lower_builtin("array.sum", types.Array, types.intp, types.DTypeSpec)
@lower_builtin("array.sum", types.Array, types.IntegerLiteral, types.DTypeSpec)
def array_sum_axis_dtype(context, builder, sig, args):
retty = sig.return_type
zero = getattr(retty, 'dtype', retty)(0)
# if the return is scalar in type then "take" the 0th element of the
# 0d array accumulator as the return value
if getattr(retty, 'ndim', None) is None:
op = np.take
else:
op = _array_sum_axis_nop
[ty_array, ty_axis, ty_dtype] = sig.args
is_axis_const = False
const_axis_val = 0
if isinstance(ty_axis, types.Literal):
# this special-cases for constant axis
const_axis_val = ty_axis.literal_value
# fix negative axis
if const_axis_val < 0:
const_axis_val = ty_array.ndim + const_axis_val
if const_axis_val < 0 or const_axis_val > ty_array.ndim:
raise ValueError("'axis' entry is out of bounds")
ty_axis = context.typing_context.resolve_value_type(const_axis_val)
axis_val = context.get_constant(ty_axis, const_axis_val)
# rewrite arguments
args = args[0], axis_val, args[2]
# rewrite sig
sig = sig.replace(args=[ty_array, ty_axis, ty_dtype])
is_axis_const = True
gen_impl = gen_sum_axis_impl(is_axis_const, const_axis_val, op, zero)
compiled = register_jitable(gen_impl)
def array_sum_impl_axis(arr, axis, dtype):
return compiled(arr, axis)
res = context.compile_internal(builder, array_sum_impl_axis, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin(np.sum, types.Array, types.DTypeSpec)
@lower_builtin("array.sum", types.Array, types.DTypeSpec)
def array_sum_dtype(context, builder, sig, args):
zero = sig.return_type(0)
def array_sum_impl(arr, dtype):
c = zero
for v in np.nditer(arr):
c += v.item()
return c
res = context.compile_internal(builder, array_sum_impl, sig, args,
locals=dict(c=sig.return_type))
return impl_ret_borrowed(context, builder, sig.return_type, res)
@lower_builtin(np.sum, types.Array, types.intp)
@lower_builtin(np.sum, types.Array, types.IntegerLiteral)
@lower_builtin("array.sum", types.Array, types.intp)
@lower_builtin("array.sum", types.Array, types.IntegerLiteral)
def array_sum_axis(context, builder, sig, args):
retty = sig.return_type
zero = getattr(retty, 'dtype', retty)(0)
# if the return is scalar in type then "take" the 0th element of the
# 0d array accumulator as the return value
if getattr(retty, 'ndim', None) is None:
op = np.take
else:
op = _array_sum_axis_nop
[ty_array, ty_axis] = sig.args
is_axis_const = False
const_axis_val = 0
if isinstance(ty_axis, types.Literal):
# this special-cases for constant axis
const_axis_val = ty_axis.literal_value
# fix negative axis
if const_axis_val < 0:
const_axis_val = ty_array.ndim + const_axis_val
if const_axis_val < 0 or const_axis_val > ty_array.ndim:
msg = f"'axis' entry ({const_axis_val}) is out of bounds"
raise NumbaValueError(msg)
ty_axis = context.typing_context.resolve_value_type(const_axis_val)
axis_val = context.get_constant(ty_axis, const_axis_val)
# rewrite arguments
args = args[0], axis_val
# rewrite sig
sig = sig.replace(args=[ty_array, ty_axis])
is_axis_const = True
gen_impl = gen_sum_axis_impl(is_axis_const, const_axis_val, op, zero)
compiled = register_jitable(gen_impl)
def array_sum_impl_axis(arr, axis):
return compiled(arr, axis)
res = context.compile_internal(builder, array_sum_impl_axis, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin(np.prod, types.Array)
@lower_builtin("array.prod", types.Array)
def array_prod(context, builder, sig, args):
def array_prod_impl(arr):
c = 1
for v in np.nditer(arr):
c *= v.item()
return c
res = context.compile_internal(builder, array_prod_impl, sig, args,
locals=dict(c=sig.return_type))
return impl_ret_borrowed(context, builder, sig.return_type, res)
@lower_builtin(np.cumsum, types.Array)
@lower_builtin("array.cumsum", types.Array)
def array_cumsum(context, builder, sig, args):
scalar_dtype = sig.return_type.dtype
dtype = as_dtype(scalar_dtype)
zero = scalar_dtype(0)
def array_cumsum_impl(arr):
out = np.empty(arr.size, dtype)
c = zero
for idx, v in enumerate(arr.flat):
c += v
out[idx] = c
return out
res = context.compile_internal(builder, array_cumsum_impl, sig, args,
locals=dict(c=scalar_dtype))
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin(np.cumprod, types.Array)
@lower_builtin("array.cumprod", types.Array)
def array_cumprod(context, builder, sig, args):
scalar_dtype = sig.return_type.dtype
dtype = as_dtype(scalar_dtype)
def array_cumprod_impl(arr):
out = np.empty(arr.size, dtype)
c = 1
for idx, v in enumerate(arr.flat):
c *= v
out[idx] = c
return out
res = context.compile_internal(builder, array_cumprod_impl, sig, args,
locals=dict(c=scalar_dtype))
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin(np.mean, types.Array)
@lower_builtin("array.mean", types.Array)
def array_mean(context, builder, sig, args):
zero = sig.return_type(0)
def array_mean_impl(arr):
# Can't use the naive `arr.sum() / arr.size`, as it would return
# a wrong result on integer sum overflow.
c = zero
for v in np.nditer(arr):
c += v.item()
return c / arr.size
res = context.compile_internal(builder, array_mean_impl, sig, args,
locals=dict(c=sig.return_type))
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin(np.var, types.Array)
@lower_builtin("array.var", types.Array)
def array_var(context, builder, sig, args):
def array_var_impl(arr):
# Compute the mean
m = arr.mean()
# Compute the sum of square diffs
ssd = 0
for v in np.nditer(arr):
val = (v.item() - m)
ssd += np.real(val * np.conj(val))
return ssd / arr.size
res = context.compile_internal(builder, array_var_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_builtin(np.std, types.Array)
@lower_builtin("array.std", types.Array)
def array_std(context, builder, sig, args):
def array_std_impl(arry):
return arry.var() ** 0.5
res = context.compile_internal(builder, array_std_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
def zero_dim_msg(fn_name):
msg = ("zero-size array to reduction operation "
"{0} which has no identity".format(fn_name))
return msg
def _is_nat(x):
pass
@overload(_is_nat)
def ol_is_nat(x):
if numpy_version >= (1, 18):
return lambda x: np.isnat(x)
else:
nat = x('NaT')
return lambda x: x == nat
@lower_builtin(np.min, types.Array)
@lower_builtin("array.min", types.Array)
def array_min(context, builder, sig, args):
ty = sig.args[0].dtype
MSG = zero_dim_msg('minimum')
if isinstance(ty, (types.NPDatetime, types.NPTimedelta)):
# NP < 1.18: NaT is smaller than every other value, but it is
# ignored as far as min() is concerned.
# NP >= 1.18: NaT dominates like NaN
def array_min_impl(arry):
if arry.size == 0:
raise ValueError(MSG)
it = np.nditer(arry)
min_value = next(it).take(0)
if _is_nat(min_value):
return min_value
for view in it:
v = view.item()
if _is_nat(v):
if numpy_version >= (1, 18):
return v
else:
continue
if v < min_value:
min_value = v
return min_value
elif isinstance(ty, types.Complex):
def array_min_impl(arry):
if arry.size == 0:
raise ValueError(MSG)
it = np.nditer(arry)
min_value = next(it).take(0)
for view in it:
v = view.item()
if v.real < min_value.real:
min_value = v
elif v.real == min_value.real:
if v.imag < min_value.imag:
min_value = v
return min_value
elif isinstance(ty, types.Float):
def array_min_impl(arry):
if arry.size == 0:
raise ValueError(MSG)
it = np.nditer(arry)
min_value = next(it).take(0)
if np.isnan(min_value):
return min_value
for view in it:
v = view.item()
if np.isnan(v):
return v
if v < min_value:
min_value = v
return min_value
else:
def array_min_impl(arry):
if arry.size == 0:
raise ValueError(MSG)
it = np.nditer(arry)
min_value = next(it).take(0)
for view in it:
v = view.item()
if v < min_value:
min_value = v
return min_value
res = context.compile_internal(builder, array_min_impl, sig, args)
return impl_ret_borrowed(context, builder, sig.return_type, res)
@lower_builtin(np.max, types.Array)
@lower_builtin("array.max", types.Array)
def array_max(context, builder, sig, args):
ty = sig.args[0].dtype
MSG = zero_dim_msg('maximum')
if isinstance(ty, (types.NPDatetime, types.NPTimedelta)):
# NP < 1.18: NaT is smaller than every other value, but it is
# ignored as far as min() is concerned.
# NP >= 1.18: NaT dominates like NaN
def array_max_impl(arry):
if arry.size == 0:
raise ValueError(MSG)
it = np.nditer(arry)
max_value = next(it).take(0)
if _is_nat(max_value):
return max_value
for view in it:
v = view.item()
if _is_nat(v):
if numpy_version >= (1, 18):
return v
else:
continue
if v > max_value:
max_value = v
return max_value
elif isinstance(ty, types.Complex):
def array_max_impl(arry):
if arry.size == 0:
raise ValueError(MSG)
it = np.nditer(arry)
max_value = next(it).take(0)
for view in it:
v = view.item()
if v.real > max_value.real:
max_value = v
elif v.real == max_value.real:
if v.imag > max_value.imag:
max_value = v
return max_value
elif isinstance(ty, types.Float):
def array_max_impl(arry):
if arry.size == 0:
raise ValueError(MSG)
it = np.nditer(arry)
max_value = next(it).take(0)
if np.isnan(max_value):
return max_value
for view in it:
v = view.item()
if np.isnan(v):
return v
if v > max_value:
max_value = v
return max_value
else:
def array_max_impl(arry):
if arry.size == 0:
raise ValueError(MSG)
it = np.nditer(arry)
max_value = next(it).take(0)
for view in it:
v = view.item()
if v > max_value:
max_value = v
return max_value
res = context.compile_internal(builder, array_max_impl, sig, args)
return impl_ret_borrowed(context, builder, sig.return_type, res)
@register_jitable
def array_argmin_impl_datetime(arry):
if arry.size == 0:
raise ValueError("attempt to get argmin of an empty sequence")
it = np.nditer(arry)
min_value = next(it).take(0)
min_idx = 0
if _is_nat(min_value):
return min_idx
idx = 1
for view in it:
v = view.item()
if _is_nat(v):
if numpy_version >= (1, 18):
return idx
else:
idx += 1
continue
if v < min_value:
min_value = v
min_idx = idx
idx += 1
return min_idx
@register_jitable
def array_argmin_impl_float(arry):
if arry.size == 0:
raise ValueError("attempt to get argmin of an empty sequence")
for v in arry.flat:
min_value = v
min_idx = 0
break
if np.isnan(min_value):
return min_idx
idx = 0
for v in arry.flat:
if np.isnan(v):
return idx
if v < min_value:
min_value = v
min_idx = idx
idx += 1
return min_idx
@register_jitable
def array_argmin_impl_generic(arry):
if arry.size == 0:
raise ValueError("attempt to get argmin of an empty sequence")
for v in arry.flat:
min_value = v
min_idx = 0
break
else:
raise RuntimeError('unreachable')
idx = 0
for v in arry.flat:
if v < min_value:
min_value = v
min_idx = idx
idx += 1
return min_idx
@overload(np.argmin)
@overload_method(types.Array, "argmin")
def array_argmin(arr, axis=None):
if isinstance(arr.dtype, (types.NPDatetime, types.NPTimedelta)):
flatten_impl = array_argmin_impl_datetime
elif isinstance(arr.dtype, types.Float):
flatten_impl = array_argmin_impl_float
else:
flatten_impl = array_argmin_impl_generic
if is_nonelike(axis):
def array_argmin_impl(arr, axis=None):
return flatten_impl(arr)
else:
array_argmin_impl = build_argmax_or_argmin_with_axis_impl(
arr, axis, flatten_impl
)
return array_argmin_impl
@register_jitable
def array_argmax_impl_datetime(arry):
if arry.size == 0:
raise ValueError("attempt to get argmax of an empty sequence")
it = np.nditer(arry)
max_value = next(it).take(0)
max_idx = 0
if _is_nat(max_value):
return max_idx
idx = 1
for view in it:
v = view.item()
if _is_nat(v):
if numpy_version >= (1, 18):
return idx
else:
idx += 1
continue
if v > max_value:
max_value = v
max_idx = idx
idx += 1
return max_idx
@register_jitable
def array_argmax_impl_float(arry):
if arry.size == 0:
raise ValueError("attempt to get argmax of an empty sequence")
for v in arry.flat:
max_value = v
max_idx = 0
break
if np.isnan(max_value):
return max_idx
idx = 0
for v in arry.flat:
if np.isnan(v):
return idx
if v > max_value:
max_value = v
max_idx = idx
idx += 1
return max_idx
@register_jitable
def array_argmax_impl_generic(arry):
if arry.size == 0:
raise ValueError("attempt to get argmax of an empty sequence")
for v in arry.flat:
max_value = v
max_idx = 0
break
idx = 0
for v in arry.flat:
if v > max_value:
max_value = v
max_idx = idx
idx += 1
return max_idx
def build_argmax_or_argmin_with_axis_impl(arr, axis, flatten_impl):
"""
Given a function that implements the logic for handling a flattened
array, return the implementation function.
"""
check_is_integer(axis, "axis")
retty = types.intp
tuple_buffer = tuple(range(arr.ndim))
def impl(arr, axis=None):
if axis < 0:
axis = arr.ndim + axis
if axis < 0 or axis >= arr.ndim:
raise ValueError("axis is out of bounds")
# Short circuit 1-dimensional arrays:
if arr.ndim == 1:
return flatten_impl(arr)
# Make chosen axis the last axis:
tmp = tuple_buffer
for i in range(axis, arr.ndim - 1):
tmp = tuple_setitem(tmp, i, i + 1)
transpose_index = tuple_setitem(tmp, arr.ndim - 1, axis)
transposed_arr = arr.transpose(transpose_index)
# Flatten along that axis; since we've transposed, we can just get
# batches off the overall flattened array.
m = transposed_arr.shape[-1]
raveled = transposed_arr.ravel()
assert raveled.size == arr.size
assert transposed_arr.size % m == 0
out = np.empty(transposed_arr.size // m, retty)
for i in range(out.size):
out[i] = flatten_impl(raveled[i * m:(i + 1) * m])
# Reshape based on axis we didn't flatten over:
return out.reshape(transposed_arr.shape[:-1])
return impl
@overload(np.argmax)
@overload_method(types.Array, "argmax")
def array_argmax(arr, axis=None):
if isinstance(arr.dtype, (types.NPDatetime, types.NPTimedelta)):
flatten_impl = array_argmax_impl_datetime
elif isinstance(arr.dtype, types.Float):
flatten_impl = array_argmax_impl_float
else:
flatten_impl = array_argmax_impl_generic
if is_nonelike(axis):
def array_argmax_impl(arr, axis=None):
return flatten_impl(arr)
else:
array_argmax_impl = build_argmax_or_argmin_with_axis_impl(
arr, axis, flatten_impl
)
return array_argmax_impl
@overload(np.all)
@overload_method(types.Array, "all")
def np_all(a):
def flat_all(a):
for v in np.nditer(a):
if not v.item():
return False
return True
return flat_all
@overload(np.any)
@overload_method(types.Array, "any")
def np_any(a):
def flat_any(a):
for v in np.nditer(a):
if v.item():
return True
return False
return flat_any
@overload(np.average)
def np_average(arr, axis=None, weights=None):
if weights is None or isinstance(weights, types.NoneType):
def np_average_impl(arr, axis=None, weights=None):
arr = np.asarray(arr)
return np.mean(arr)
else:
if axis is None or isinstance(axis, types.NoneType):
def np_average_impl(arr, axis=None, weights=None):
arr = np.asarray(arr)
weights = np.asarray(weights)
if arr.shape != weights.shape:
if axis is None:
raise TypeError(
"Numba does not support average when shapes of "
"a and weights differ.")
if weights.ndim != 1:
raise TypeError(
"1D weights expected when shapes of "
"a and weights differ.")
scl = np.sum(weights)
if scl == 0.0:
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized.")
avg = np.sum(np.multiply(arr, weights)) / scl
return avg
else:
def np_average_impl(arr, axis=None, weights=None):
raise TypeError("Numba does not support average with axis.")
return np_average_impl
def get_isnan(dtype):
"""
A generic isnan() function
"""
if isinstance(dtype, (types.Float, types.Complex)):
return np.isnan
else:
@register_jitable
def _trivial_isnan(x):
return False
return _trivial_isnan
@overload(np.iscomplex)
def np_iscomplex(x):
if type_can_asarray(x):
# NumPy uses asanyarray here!
return lambda x: np.asarray(x).imag != 0
return None
@overload(np.isreal)
def np_isreal(x):
if type_can_asarray(x):
# NumPy uses asanyarray here!
return lambda x: np.asarray(x).imag == 0
return None
@overload(np.iscomplexobj)
def iscomplexobj(x):
# Implementation based on NumPy
# https://github.com/numpy/numpy/blob/d9b1e32cb8ef90d6b4a47853241db2a28146a57d/numpy/lib/type_check.py#L282-L320
dt = determine_dtype(x)
if isinstance(x, types.Optional):
dt = determine_dtype(x.type)
iscmplx = np.issubdtype(dt, np.complexfloating)
if isinstance(x, types.Optional):
def impl(x):
if x is None:
return False
return iscmplx
else:
def impl(x):
return iscmplx
return impl
@overload(np.isrealobj)
def isrealobj(x):
# Return True if x is not a complex type.
# Implementation based on NumPy
# https://github.com/numpy/numpy/blob/ccfbcc1cd9a4035a467f2e982a565ab27de25b6b/numpy/lib/type_check.py#L290-L322
def impl(x):
return not np.iscomplexobj(x)
return impl
@overload(np.isscalar)
def np_isscalar(num):
res = isinstance(num, (types.Number, types.UnicodeType, types.Boolean))
def impl(num):
return res
return impl
def is_np_inf_impl(x, out, fn):
# if/else branch should be unified after PR #5606 is merged
if is_nonelike(out):
def impl(x, out=None):
return np.logical_and(np.isinf(x), fn(np.signbit(x)))
else:
def impl(x, out=None):
return np.logical_and(np.isinf(x), fn(np.signbit(x)), out)
return impl
@overload(np.isneginf)
def isneginf(x, out=None):
fn = register_jitable(lambda x: x)
return is_np_inf_impl(x, out, fn)
@overload(np.isposinf)
def isposinf(x, out=None):
fn = register_jitable(lambda x: ~x)
return is_np_inf_impl(x, out, fn)
@register_jitable
def less_than(a, b):
return a < b
@register_jitable
def greater_than(a, b):
return a > b
@register_jitable
def check_array(a):
if a.size == 0:
raise ValueError('zero-size array to reduction operation not possible')
def nan_min_max_factory(comparison_op, is_complex_dtype):
if is_complex_dtype:
def impl(a):
arr = np.asarray(a)
check_array(arr)
it = np.nditer(arr)
return_val = next(it).take(0)
for view in it:
v = view.item()
if np.isnan(return_val.real) and not np.isnan(v.real):
return_val = v
else:
if comparison_op(v.real, return_val.real):
return_val = v
elif v.real == return_val.real:
if comparison_op(v.imag, return_val.imag):
return_val = v
return return_val
else:
def impl(a):
arr = np.asarray(a)
check_array(arr)
it = np.nditer(arr)
return_val = next(it).take(0)
for view in it:
v = view.item()
if not np.isnan(v):
if not comparison_op(return_val, v):
return_val = v
return return_val
return impl
real_nanmin = register_jitable(
nan_min_max_factory(less_than, is_complex_dtype=False)
)
real_nanmax = register_jitable(
nan_min_max_factory(greater_than, is_complex_dtype=False)
)
complex_nanmin = register_jitable(
nan_min_max_factory(less_than, is_complex_dtype=True)
)
complex_nanmax = register_jitable(
nan_min_max_factory(greater_than, is_complex_dtype=True)
)
@overload(np.nanmin)
def np_nanmin(a):
dt = determine_dtype(a)
if np.issubdtype(dt, np.complexfloating):
return complex_nanmin
else:
return real_nanmin
@overload(np.nanmax)
def np_nanmax(a):
dt = determine_dtype(a)
if np.issubdtype(dt, np.complexfloating):
return complex_nanmax
else:
return real_nanmax
@overload(np.nanmean)
def np_nanmean(a):
if not isinstance(a, types.Array):
return
isnan = get_isnan(a.dtype)
def nanmean_impl(a):
c = 0.0
count = 0
for view in np.nditer(a):
v = view.item()
if not isnan(v):
c += v.item()
count += 1
# np.divide() doesn't raise ZeroDivisionError
return np.divide(c, count)
return nanmean_impl
@overload(np.nanvar)
def np_nanvar(a):
if not isinstance(a, types.Array):
return
isnan = get_isnan(a.dtype)
def nanvar_impl(a):
# Compute the mean
m = np.nanmean(a)
# Compute the sum of square diffs
ssd = 0.0
count = 0
for view in np.nditer(a):
v = view.item()
if not isnan(v):
val = (v.item() - m)
ssd += np.real(val * np.conj(val))
count += 1
# np.divide() doesn't raise ZeroDivisionError
return np.divide(ssd, count)
return nanvar_impl
@overload(np.nanstd)
def np_nanstd(a):
if not isinstance(a, types.Array):
return
def nanstd_impl(a):
return np.nanvar(a) ** 0.5
return nanstd_impl
@overload(np.nansum)
def np_nansum(a):
if not isinstance(a, types.Array):
return
if isinstance(a.dtype, types.Integer):
retty = types.intp
else:
retty = a.dtype
zero = retty(0)
isnan = get_isnan(a.dtype)
def nansum_impl(a):
c = zero
for view in np.nditer(a):
v = view.item()
if not isnan(v):
c += v
return c
return nansum_impl
@overload(np.nanprod)
def np_nanprod(a):
if not isinstance(a, types.Array):
return
if isinstance(a.dtype, types.Integer):
retty = types.intp
else:
retty = a.dtype
one = retty(1)
isnan = get_isnan(a.dtype)
def nanprod_impl(a):
c = one
for view in np.nditer(a):
v = view.item()
if not isnan(v):
c *= v
return c
return nanprod_impl
@overload(np.nancumprod)
def np_nancumprod(a):
if not isinstance(a, types.Array):
return
if isinstance(a.dtype, (types.Boolean, types.Integer)):
# dtype cannot possibly contain NaN
return lambda a: np.cumprod(a)
else:
retty = a.dtype
is_nan = get_isnan(retty)
one = retty(1)
def nancumprod_impl(a):
out = np.empty(a.size, retty)
c = one
for idx, v in enumerate(a.flat):
if ~is_nan(v):
c *= v
out[idx] = c
return out
return nancumprod_impl
@overload(np.nancumsum)
def np_nancumsum(a):
if not isinstance(a, types.Array):
return
if isinstance(a.dtype, (types.Boolean, types.Integer)):
# dtype cannot possibly contain NaN
return lambda a: np.cumsum(a)
else:
retty = a.dtype
is_nan = get_isnan(retty)
zero = retty(0)
def nancumsum_impl(a):
out = np.empty(a.size, retty)
c = zero
for idx, v in enumerate(a.flat):
if ~is_nan(v):
c += v
out[idx] = c
return out
return nancumsum_impl
@register_jitable
def prepare_ptp_input(a):
arr = _asarray(a)
if len(arr) == 0:
raise ValueError('zero-size array reduction not possible')
else:
return arr
def _compute_current_val_impl_gen(op):
def _compute_current_val_impl(current_val, val):
if isinstance(current_val, types.Complex):
# The sort order for complex numbers is lexicographic. If both the
# real and imaginary parts are non-nan then the order is determined
# by the real parts except when they are equal, in which case the
# order is determined by the imaginary parts.
# https://github.com/numpy/numpy/blob/577a86e/numpy/core/fromnumeric.py#L874-L877 # noqa: E501
def impl(current_val, val):
if op(val.real, current_val.real):
return val
elif (val.real == current_val.real
and op(val.imag, current_val.imag)):
return val
return current_val
else:
def impl(current_val, val):
return val if op(val, current_val) else current_val
return impl
return _compute_current_val_impl
_compute_a_max = generated_jit(_compute_current_val_impl_gen(greater_than))
_compute_a_min = generated_jit(_compute_current_val_impl_gen(less_than))
@generated_jit
def _early_return(val):
UNUSED = 0
if isinstance(val, types.Complex):
def impl(val):
if np.isnan(val.real):
if np.isnan(val.imag):
return True, np.nan + np.nan * 1j
else:
return True, np.nan + 0j
else:
return False, UNUSED
elif isinstance(val, types.Float):
def impl(val):
if np.isnan(val):
return True, np.nan
else:
return False, UNUSED
else:
def impl(val):
return False, UNUSED
return impl
@overload_method(types.Array, 'ptp')
@overload(np.ptp)
def np_ptp(a):
if hasattr(a, 'dtype'):
if isinstance(a.dtype, types.Boolean):
raise TypingError("Boolean dtype is unsupported (as per NumPy)")
# Numpy raises a TypeError
def np_ptp_impl(a):
arr = prepare_ptp_input(a)
a_flat = arr.flat
a_min = a_flat[0]
a_max = a_flat[0]
for i in range(arr.size):
val = a_flat[i]
take_branch, retval = _early_return(val)
if take_branch:
return retval
a_max = _compute_a_max(a_max, val)
a_min = _compute_a_min(a_min, val)
return a_max - a_min
return np_ptp_impl
#----------------------------------------------------------------------------
# Median and partitioning
@register_jitable
def nan_aware_less_than(a, b):
if np.isnan(a):
return False
else:
if np.isnan(b):
return True
else:
return a < b
def _partition_factory(pivotimpl):
def _partition(A, low, high):
mid = (low + high) >> 1
# NOTE: the pattern of swaps below for the pivot choice and the
# partitioning gives good results (i.e. regular O(n log n))
# on sorted, reverse-sorted, and uniform arrays. Subtle changes
# risk breaking this property.
# Use median of three {low, middle, high} as the pivot
if pivotimpl(A[mid], A[low]):
A[low], A[mid] = A[mid], A[low]
if pivotimpl(A[high], A[mid]):
A[high], A[mid] = A[mid], A[high]
if pivotimpl(A[mid], A[low]):
A[low], A[mid] = A[mid], A[low]
pivot = A[mid]
A[high], A[mid] = A[mid], A[high]
i = low
j = high - 1
while True:
while i < high and pivotimpl(A[i], pivot):
i += 1
while j >= low and pivotimpl(pivot, A[j]):
j -= 1
if i >= j:
break
A[i], A[j] = A[j], A[i]
i += 1
j -= 1
# Put the pivot back in its final place (all items before `i`
# are smaller than the pivot, all items at/after `i` are larger)
A[i], A[high] = A[high], A[i]
return i
return _partition
_partition = register_jitable(_partition_factory(less_than))
_partition_w_nan = register_jitable(_partition_factory(nan_aware_less_than))
def _select_factory(partitionimpl):
def _select(arry, k, low, high):
"""
Select the k'th smallest element in array[low:high + 1].
"""
i = partitionimpl(arry, low, high)
while i != k:
if i < k:
low = i + 1
i = partitionimpl(arry, low, high)
else:
high = i - 1
i = partitionimpl(arry, low, high)
return arry[k]
return _select
_select = register_jitable(_select_factory(_partition))
_select_w_nan = register_jitable(_select_factory(_partition_w_nan))
@register_jitable
def _select_two(arry, k, low, high):
"""
Select the k'th and k+1'th smallest elements in array[low:high + 1].
This is significantly faster than doing two independent selections
for k and k+1.
"""
while True:
assert high > low # by construction
i = _partition(arry, low, high)
if i < k:
low = i + 1
elif i > k + 1:
high = i - 1
elif i == k:
_select(arry, k + 1, i + 1, high)
break
else: # i == k + 1
_select(arry, k, low, i - 1)
break
return arry[k], arry[k + 1]
@register_jitable
def _median_inner(temp_arry, n):
"""
The main logic of the median() call. *temp_arry* must be disposable,
as this function will mutate it.
"""
low = 0
high = n - 1
half = n >> 1
if n & 1 == 0:
a, b = _select_two(temp_arry, half - 1, low, high)
return (a + b) / 2
else:
return _select(temp_arry, half, low, high)
@overload(np.median)
def np_median(a):
if not isinstance(a, types.Array):
return
def median_impl(a):
# np.median() works on the flattened array, and we need a temporary
# workspace anyway
temp_arry = a.flatten()
n = temp_arry.shape[0]
return _median_inner(temp_arry, n)
return median_impl
@register_jitable
def _collect_percentiles_inner(a, q):
#TODO: This needs rewriting to be closer to NumPy, particularly the nan/inf
# handling which is generally subject to algorithmic changes.
n = len(a)
if n == 1:
# single element array; output same for all percentiles
out = np.full(len(q), a[0], dtype=np.float64)
else:
out = np.empty(len(q), dtype=np.float64)
for i in range(len(q)):
percentile = q[i]
# bypass pivoting where requested percentile is 100
if percentile == 100:
val = np.max(a)
# heuristics to handle infinite values a la NumPy
if ~np.all(np.isfinite(a)):
if ~np.isfinite(val):
val = np.nan
# bypass pivoting where requested percentile is 0
elif percentile == 0:
val = np.min(a)
# convoluted heuristics to handle infinite values a la NumPy
if ~np.all(np.isfinite(a)):
num_pos_inf = np.sum(a == np.inf)
num_neg_inf = np.sum(a == -np.inf)
num_finite = n - (num_neg_inf + num_pos_inf)
if num_finite == 0:
val = np.nan
if num_pos_inf == 1 and n == 2:
val = np.nan
if num_neg_inf > 1:
val = np.nan
if num_finite == 1:
if num_pos_inf > 1:
if num_neg_inf != 1:
val = np.nan
else:
# linear interp between closest ranks
rank = 1 + (n - 1) * np.true_divide(percentile, 100.0)
f = math.floor(rank)
m = rank - f
lower, upper = _select_two(a, k=int(f - 1), low=0, high=(n - 1))
val = lower * (1 - m) + upper * m
out[i] = val
return out
@register_jitable
def _can_collect_percentiles(a, nan_mask, skip_nan):
if skip_nan:
a = a[~nan_mask]
if len(a) == 0:
return False # told to skip nan, but no elements remain
else:
if np.any(nan_mask):
return False # told *not* to skip nan, but nan encountered
if len(a) == 1: # single element array
val = a[0]
return np.isfinite(val) # can collect percentiles if element is finite
else:
return True
@register_jitable
def check_valid(q, q_upper_bound):
valid = True
# avoid expensive reductions where possible
if q.ndim == 1 and q.size < 10:
for i in range(q.size):
if q[i] < 0.0 or q[i] > q_upper_bound or np.isnan(q[i]):
valid = False
break
else:
if np.any(np.isnan(q)) or np.any(q < 0.0) or np.any(q > q_upper_bound):
valid = False
return valid
@register_jitable
def percentile_is_valid(q):
if not check_valid(q, q_upper_bound=100.0):
raise ValueError('Percentiles must be in the range [0, 100]')
@register_jitable
def quantile_is_valid(q):
if not check_valid(q, q_upper_bound=1.0):
raise ValueError('Quantiles must be in the range [0, 1]')
@register_jitable
def _collect_percentiles(a, q, check_q, factor, skip_nan):
q = np.asarray(q, dtype=np.float64).flatten()
check_q(q)
q = q * factor
temp_arry = np.asarray(a, dtype=np.float64).flatten()
nan_mask = np.isnan(temp_arry)
if _can_collect_percentiles(temp_arry, nan_mask, skip_nan):
temp_arry = temp_arry[~nan_mask]
out = _collect_percentiles_inner(temp_arry, q)
else:
out = np.full(len(q), np.nan)
return out
def _percentile_quantile_inner(a, q, skip_nan, factor, check_q):
"""
The underlying algorithm to find percentiles and quantiles
is the same, hence we converge onto the same code paths
in this inner function implementation
"""
dt = determine_dtype(a)
if np.issubdtype(dt, np.complexfloating):
raise TypingError('Not supported for complex dtype')
# this could be supported, but would require a
# lexicographic comparison
def np_percentile_q_scalar_impl(a, q):
return _collect_percentiles(a, q, check_q, factor, skip_nan)[0]
def np_percentile_impl(a, q):
return _collect_percentiles(a, q, check_q, factor, skip_nan)
if isinstance(q, (types.Number, types.Boolean)):
return np_percentile_q_scalar_impl
elif isinstance(q, types.Array) and q.ndim == 0:
return np_percentile_q_scalar_impl
else:
return np_percentile_impl
@overload(np.percentile)
def np_percentile(a, q):
return _percentile_quantile_inner(
a, q, skip_nan=False, factor=1.0, check_q=percentile_is_valid
)
@overload(np.nanpercentile)
def np_nanpercentile(a, q):
return _percentile_quantile_inner(
a, q, skip_nan=True, factor=1.0, check_q=percentile_is_valid
)
@overload(np.quantile)
def np_quantile(a, q):
return _percentile_quantile_inner(
a, q, skip_nan=False, factor=100.0, check_q=quantile_is_valid
)
@overload(np.nanquantile)
def np_nanquantile(a, q):
return _percentile_quantile_inner(
a, q, skip_nan=True, factor=100.0, check_q=quantile_is_valid
)
@overload(np.nanmedian)
def np_nanmedian(a):
if not isinstance(a, types.Array):
return
isnan = get_isnan(a.dtype)
def nanmedian_impl(a):
# Create a temporary workspace with only non-NaN values
temp_arry = np.empty(a.size, a.dtype)
n = 0
for view in np.nditer(a):
v = view.item()
if not isnan(v):
temp_arry[n] = v
n += 1
# all NaNs
if n == 0:
return np.nan
return _median_inner(temp_arry, n)
return nanmedian_impl
@register_jitable
def np_partition_impl_inner(a, kth_array):
# allocate and fill empty array rather than copy a and mutate in place
# as the latter approach fails to preserve strides
out = np.empty_like(a)
idx = np.ndindex(a.shape[:-1]) # Numpy default partition axis is -1
for s in idx:
arry = a[s].copy()
low = 0
high = len(arry) - 1
for kth in kth_array:
_select_w_nan(arry, kth, low, high)
low = kth # narrow span of subsequent partition
out[s] = arry
return out
@register_jitable
def valid_kths(a, kth):
"""
Returns a sorted, unique array of kth values which serve
as indexers for partitioning the input array, a.
If the absolute value of any of the provided values
is greater than a.shape[-1] an exception is raised since
we are partitioning along the last axis (per Numpy default
behaviour).
Values less than 0 are transformed to equivalent positive
index values.
"""
# cast boolean to int, where relevant
kth_array = _asarray(kth).astype(np.int64)
if kth_array.ndim != 1:
raise ValueError('kth must be scalar or 1-D')
# numpy raises ValueError: object too deep for desired array
if np.any(np.abs(kth_array) >= a.shape[-1]):
raise ValueError("kth out of bounds")
out = np.empty_like(kth_array)
for index, val in np.ndenumerate(kth_array):
if val < 0:
out[index] = val + a.shape[-1] # equivalent positive index
else:
out[index] = val
return np.unique(out)
@overload(np.partition)
def np_partition(a, kth):
if not isinstance(a, (types.Array, types.Sequence, types.Tuple)):
raise TypeError('The first argument must be an array-like')
if isinstance(a, types.Array) and a.ndim == 0:
raise TypeError('The first argument must be at least 1-D (found 0-D)')
kthdt = getattr(kth, 'dtype', kth)
if not isinstance(kthdt, (types.Boolean, types.Integer)):
# bool gets cast to int subsequently
raise TypeError('Partition index must be integer')
def np_partition_impl(a, kth):
a_tmp = _asarray(a)
if a_tmp.size == 0:
return a_tmp.copy()
else:
kth_array = valid_kths(a_tmp, kth)
return np_partition_impl_inner(a_tmp, kth_array)
return np_partition_impl
#----------------------------------------------------------------------------
# Building matrices
@register_jitable
def _tri_impl(N, M, k):
shape = max(0, N), max(0, M) # numpy floors each dimension at 0
out = np.empty(shape, dtype=np.float64) # numpy default dtype
for i in range(shape[0]):
m_max = min(max(0, i + k + 1), shape[1])
out[i, :m_max] = 1
out[i, m_max:] = 0
return out
@overload(np.tri)
def np_tri(N, M=None, k=0):
# we require k to be integer, unlike numpy
check_is_integer(k, 'k')
def tri_impl(N, M=None, k=0):
if M is None:
M = N
return _tri_impl(N, M, k)
return tri_impl
@register_jitable
def _make_square(m):
"""
Takes a 1d array and tiles it to form a square matrix
- i.e. a facsimile of np.tile(m, (len(m), 1))
"""
assert m.ndim == 1
len_m = len(m)
out = np.empty((len_m, len_m), dtype=m.dtype)
for i in range(len_m):
out[i] = m
return out
@register_jitable
def np_tril_impl_2d(m, k=0):
mask = np.tri(m.shape[-2], M=m.shape[-1], k=k).astype(np.uint)
return np.where(mask, m, np.zeros_like(m, dtype=m.dtype))
@overload(np.tril)
def my_tril(m, k=0):
# we require k to be integer, unlike numpy
check_is_integer(k, 'k')
def np_tril_impl_1d(m, k=0):
m_2d = _make_square(m)
return np_tril_impl_2d(m_2d, k)
def np_tril_impl_multi(m, k=0):
mask = np.tri(m.shape[-2], M=m.shape[-1], k=k).astype(np.uint)
idx = np.ndindex(m.shape[:-2])
z = np.empty_like(m)
zero_opt = np.zeros_like(mask, dtype=m.dtype)
for sel in idx:
z[sel] = np.where(mask, m[sel], zero_opt)
return z
if m.ndim == 1:
return np_tril_impl_1d
elif m.ndim == 2:
return np_tril_impl_2d
else:
return np_tril_impl_multi
@overload(np.tril_indices)
def np_tril_indices(n, k=0, m=None):
# we require integer arguments, unlike numpy
check_is_integer(n, 'n')
check_is_integer(k, 'k')
if not is_nonelike(m):
check_is_integer(m, 'm')
def np_tril_indices_impl(n, k=0, m=None):
return np.nonzero(np.tri(n, m, k=k))
return np_tril_indices_impl
@overload(np.tril_indices_from)
def np_tril_indices_from(arr, k=0):
# we require k to be integer, unlike numpy
check_is_integer(k, 'k')
if arr.ndim != 2:
raise TypingError("input array must be 2-d")
def np_tril_indices_from_impl(arr, k=0):
return np.tril_indices(arr.shape[0], k=k, m=arr.shape[1])
return np_tril_indices_from_impl
@register_jitable
def np_triu_impl_2d(m, k=0):
mask = np.tri(m.shape[-2], M=m.shape[-1], k=k - 1).astype(np.uint)
return np.where(mask, np.zeros_like(m, dtype=m.dtype), m)
@overload(np.triu)
def my_triu(m, k=0):
# we require k to be integer, unlike numpy
check_is_integer(k, 'k')
def np_triu_impl_1d(m, k=0):
m_2d = _make_square(m)
return np_triu_impl_2d(m_2d, k)
def np_triu_impl_multi(m, k=0):
mask = np.tri(m.shape[-2], M=m.shape[-1], k=k - 1).astype(np.uint)
idx = np.ndindex(m.shape[:-2])
z = np.empty_like(m)
zero_opt = np.zeros_like(mask, dtype=m.dtype)
for sel in idx:
z[sel] = np.where(mask, zero_opt, m[sel])
return z
if m.ndim == 1:
return np_triu_impl_1d
elif m.ndim == 2:
return np_triu_impl_2d
else:
return np_triu_impl_multi
@overload(np.triu_indices)
def np_triu_indices(n, k=0, m=None):
# we require integer arguments, unlike numpy
check_is_integer(n, 'n')
check_is_integer(k, 'k')
if not is_nonelike(m):
check_is_integer(m, 'm')
def np_triu_indices_impl(n, k=0, m=None):
return np.nonzero(1 - np.tri(n, m, k=k - 1))
return np_triu_indices_impl
@overload(np.triu_indices_from)
def np_triu_indices_from(arr, k=0):
# we require k to be integer, unlike numpy
check_is_integer(k, 'k')
if arr.ndim != 2:
raise TypingError("input array must be 2-d")
def np_triu_indices_from_impl(arr, k=0):
return np.triu_indices(arr.shape[0], k=k, m=arr.shape[1])
return np_triu_indices_from_impl
def _prepare_array(arr):
pass
@overload(_prepare_array)
def _prepare_array_impl(arr):
if arr in (None, types.none):
return lambda arr: np.array(())
else:
return lambda arr: _asarray(arr).ravel()
def _dtype_of_compound(inobj):
obj = inobj
while True:
if isinstance(obj, (types.Number, types.Boolean)):
return as_dtype(obj)
l = getattr(obj, '__len__', None)
if l is not None and l() == 0: # empty tuple or similar
return np.float64
dt = getattr(obj, 'dtype', None)
if dt is None:
raise TypeError("type has no dtype attr")
if isinstance(obj, types.Sequence):
obj = obj.dtype
else:
return as_dtype(dt)
@overload(np.ediff1d)
def np_ediff1d(ary, to_end=None, to_begin=None):
if isinstance(ary, types.Array):
if isinstance(ary.dtype, types.Boolean):
raise NumbaTypeError("Boolean dtype is unsupported (as per NumPy)")
# Numpy tries to do this: return ary[1:] - ary[:-1] which
# results in a TypeError exception being raised
# since np 1.16 there are casting checks for to_end and to_begin to make
# sure they are compatible with the ary
if numpy_version >= (1, 16):
ary_dt = _dtype_of_compound(ary)
to_begin_dt = None
if not(is_nonelike(to_begin)):
to_begin_dt = _dtype_of_compound(to_begin)
to_end_dt = None
if not(is_nonelike(to_end)):
to_end_dt = _dtype_of_compound(to_end)
if to_begin_dt is not None and not np.can_cast(to_begin_dt, ary_dt):
msg = "dtype of to_begin must be compatible with input ary"
raise NumbaTypeError(msg)
if to_end_dt is not None and not np.can_cast(to_end_dt, ary_dt):
msg = "dtype of to_end must be compatible with input ary"
raise NumbaTypeError(msg)
def np_ediff1d_impl(ary, to_end=None, to_begin=None):
# transform each input into an equivalent 1d array
start = _prepare_array(to_begin)
mid = _prepare_array(ary)
end = _prepare_array(to_end)
out_dtype = mid.dtype
# output array dtype determined by ary dtype, per NumPy
# (for the most part); an exception to the rule is a zero length
# array-like, where NumPy falls back to np.float64; this behaviour
# is *not* replicated
if len(mid) > 0:
out = np.empty((len(start) + len(mid) + len(end) - 1),
dtype=out_dtype)
start_idx = len(start)
mid_idx = len(start) + len(mid) - 1
out[:start_idx] = start
out[start_idx:mid_idx] = np.diff(mid)
out[mid_idx:] = end
else:
out = np.empty((len(start) + len(end)), dtype=out_dtype)
start_idx = len(start)
out[:start_idx] = start
out[start_idx:] = end
return out
return np_ediff1d_impl
def _select_element(arr):
pass
@overload(_select_element)
def _select_element_impl(arr):
zerod = getattr(arr, 'ndim', None) == 0
if zerod:
def impl(arr):
x = np.array((1,), dtype=arr.dtype)
x[:] = arr
return x[0]
return impl
else:
def impl(arr):
return arr
return impl
def _get_d(dx, x):
pass
@overload(_get_d)
def get_d_impl(x, dx):
if is_nonelike(x):
def impl(x, dx):
return np.asarray(dx)
else:
def impl(x, dx):
return np.diff(np.asarray(x))
return impl
@overload(np.trapz)
def np_trapz(y, x=None, dx=1.0):
if isinstance(y, (types.Number, types.Boolean)):
raise TypingError('y cannot be a scalar')
elif isinstance(y, types.Array) and y.ndim == 0:
raise TypingError('y cannot be 0D')
# NumPy raises IndexError: list assignment index out of range
# inspired by:
# https://github.com/numpy/numpy/blob/7ee52003/numpy/lib/function_base.py#L4040-L4065 # noqa: E501
def impl(y, x=None, dx=1.0):
yarr = np.asarray(y)
d = _get_d(x, dx)
y_ave = (yarr[..., slice(1, None)] + yarr[..., slice(None, -1)]) / 2.0
ret = np.sum(d * y_ave, -1)
processed = _select_element(ret)
return processed
return impl
@register_jitable
def _np_vander(x, N, increasing, out):
"""
Generate an N-column Vandermonde matrix from a supplied 1-dimensional
array, x. Store results in an output matrix, out, which is assumed to
be of the required dtype.
Values are accumulated using np.multiply to match the floating point
precision behaviour of numpy.vander.
"""
m, n = out.shape
assert m == len(x)
assert n == N
if increasing:
for i in range(N):
if i == 0:
out[:, i] = 1
else:
out[:, i] = np.multiply(x, out[:, (i - 1)])
else:
for i in range(N - 1, -1, -1):
if i == N - 1:
out[:, i] = 1
else:
out[:, i] = np.multiply(x, out[:, (i + 1)])
@register_jitable
def _check_vander_params(x, N):
if x.ndim > 1:
raise ValueError('x must be a one-dimensional array or sequence.')
if N < 0:
raise ValueError('Negative dimensions are not allowed')
@overload(np.vander)
def np_vander(x, N=None, increasing=False):
if N not in (None, types.none):
if not isinstance(N, types.Integer):
raise TypingError('Second argument N must be None or an integer')
def np_vander_impl(x, N=None, increasing=False):
if N is None:
N = len(x)
_check_vander_params(x, N)
# allocate output matrix using dtype determined in closure
out = np.empty((len(x), int(N)), dtype=dtype)
_np_vander(x, N, increasing, out)
return out
def np_vander_seq_impl(x, N=None, increasing=False):
if N is None:
N = len(x)
x_arr = np.array(x)
_check_vander_params(x_arr, N)
# allocate output matrix using dtype inferred when x_arr was created
out = np.empty((len(x), int(N)), dtype=x_arr.dtype)
_np_vander(x_arr, N, increasing, out)
return out
if isinstance(x, types.Array):
x_dt = as_dtype(x.dtype)
# replicate numpy behaviour w.r.t.type promotion
dtype = np.promote_types(x_dt, int)
return np_vander_impl
elif isinstance(x, (types.Tuple, types.Sequence)):
return np_vander_seq_impl
@overload(np.roll)
def np_roll(a, shift):
if not isinstance(shift, (types.Integer, types.Boolean)):
raise TypingError('shift must be an integer')
def np_roll_impl(a, shift):
arr = np.asarray(a)
out = np.empty(arr.shape, dtype=arr.dtype)
# empty_like might result in different contiguity vs NumPy
arr_flat = arr.flat
for i in range(arr.size):
idx = (i + shift) % arr.size
out.flat[idx] = arr_flat[i]
return out
if isinstance(a, (types.Number, types.Boolean)):
return lambda a, shift: np.asarray(a)
else:
return np_roll_impl
#----------------------------------------------------------------------------
# Mathematical functions
LIKELY_IN_CACHE_SIZE = 8
@register_jitable
def binary_search_with_guess(key, arr, length, guess):
# NOTE: Do not refactor... see note in np_interp function impl below
# this is a facsimile of binary_search_with_guess prior to 1.15:
# https://github.com/numpy/numpy/blob/maintenance/1.15.x/numpy/core/src/multiarray/compiled_base.c # noqa: E501
# Permanent reference:
# https://github.com/numpy/numpy/blob/3430d78c01a3b9a19adad75f1acb5ae18286da73/numpy/core/src/multiarray/compiled_base.c#L447 # noqa: E501
imin = 0
imax = length
# Handle keys outside of the arr range first
if key > arr[length - 1]:
return length
elif key < arr[0]:
return -1
# If len <= 4 use linear search.
# From above we know key >= arr[0] when we start.
if length <= 4:
i = 1
while i < length and key >= arr[i]:
i += 1
return i - 1
if guess > length - 3:
guess = length - 3
if guess < 1:
guess = 1
# check most likely values: guess - 1, guess, guess + 1
if key < arr[guess]:
if key < arr[guess - 1]:
imax = guess - 1
# last attempt to restrict search to items in cache
if guess > LIKELY_IN_CACHE_SIZE and \
key >= arr[guess - LIKELY_IN_CACHE_SIZE]:
imin = guess - LIKELY_IN_CACHE_SIZE
else:
# key >= arr[guess - 1]
return guess - 1
else:
# key >= arr[guess]
if key < arr[guess + 1]:
return guess
else:
# key >= arr[guess + 1]
if key < arr[guess + 2]:
return guess + 1
else:
# key >= arr[guess + 2]
imin = guess + 2
# last attempt to restrict search to items in cache
if (guess < (length - LIKELY_IN_CACHE_SIZE - 1)) and \
(key < arr[guess + LIKELY_IN_CACHE_SIZE]):
imax = guess + LIKELY_IN_CACHE_SIZE
# finally, find index by bisection
while imin < imax:
imid = imin + ((imax - imin) >> 1)
if key >= arr[imid]:
imin = imid + 1
else:
imax = imid
return imin - 1
@register_jitable
def np_interp_impl_complex_fp_inner(x, xp, fp, dtype):
# NOTE: Do not refactor... see note in np_interp function impl below
# this is a facsimile of arr_interp_complex prior to 1.16:
# https://github.com/numpy/numpy/blob/maintenance/1.15.x/numpy/core/src/multiarray/compiled_base.c # noqa: E501
# Permanent reference:
# https://github.com/numpy/numpy/blob/3430d78c01a3b9a19adad75f1acb5ae18286da73/numpy/core/src/multiarray/compiled_base.c#L683 # noqa: E501
dz = np.asarray(x)
dx = np.asarray(xp)
dy = np.asarray(fp)
if len(dx) == 0:
raise ValueError('array of sample points is empty')
if len(dx) != len(dy):
raise ValueError('fp and xp are not of the same size.')
if dx.size == 1:
return np.full(dz.shape, fill_value=dy[0], dtype=dtype)
dres = np.empty(dz.shape, dtype=dtype)
lenx = dz.size
lenxp = len(dx)
lval = dy[0]
rval = dy[lenxp - 1]
if lenxp == 1:
xp_val = dx[0]
fp_val = dy[0]
for i in range(lenx):
x_val = dz.flat[i]
if x_val < xp_val:
dres.flat[i] = lval
elif x_val > xp_val:
dres.flat[i] = rval
else:
dres.flat[i] = fp_val
else:
j = 0
# only pre-calculate slopes if there are relatively few of them.
if lenxp <= lenx:
slopes = np.empty((lenxp - 1), dtype=dtype)
else:
slopes = np.empty(0, dtype=dtype)
if slopes.size:
for i in range(lenxp - 1):
inv_dx = 1 / (dx[i + 1] - dx[i])
real = (dy[i + 1].real - dy[i].real) * inv_dx
imag = (dy[i + 1].imag - dy[i].imag) * inv_dx
slopes[i] = real + 1j * imag
for i in range(lenx):
x_val = dz.flat[i]
if np.isnan(x_val):
real = x_val
imag = 0.0
dres.flat[i] = real + 1j * imag
continue
j = binary_search_with_guess(x_val, dx, lenxp, j)
if j == -1:
dres.flat[i] = lval
elif j == lenxp:
dres.flat[i] = rval
elif j == lenxp - 1:
dres.flat[i] = dy[j]
else:
if slopes.size:
slope = slopes[j]
else:
inv_dx = 1 / (dx[j + 1] - dx[j])
real = (dy[j + 1].real - dy[j].real) * inv_dx
imag = (dy[j + 1].imag - dy[j].imag) * inv_dx
slope = real + 1j * imag
real = slope.real * (x_val - dx[j]) + dy[j].real
imag = slope.imag * (x_val - dx[j]) + dy[j].imag
dres.flat[i] = real + 1j * imag
# NOTE: there's a change in master which is not
# in any released version of 1.16.x yet... as
# per the real value implementation, but
# interpolate real and imaginary parts
# independently; this will need to be added in
# due course
return dres
def np_interp_impl_complex_fp_inner_factory(np117_nan_handling):
@register_jitable
def impl(x, xp, fp, dtype):
# NOTE: Do not refactor... see note in np_interp function impl below
# this is a facsimile of arr_interp_complex post 1.16 with added
# branching to support np1.17 style NaN handling. (see
# `np117_nan_handling` use)
# https://github.com/numpy/numpy/blob/maintenance/1.16.x/numpy/core/src/multiarray/compiled_base.c # noqa: E501
# Permanent reference:
# https://github.com/numpy/numpy/blob/971e2e89d08deeae0139d3011d15646fdac13c92/numpy/core/src/multiarray/compiled_base.c#L628 # noqa: E501
dz = np.asarray(x)
dx = np.asarray(xp)
dy = np.asarray(fp)
if len(dx) == 0:
raise ValueError('array of sample points is empty')
if len(dx) != len(dy):
raise ValueError('fp and xp are not of the same size.')
if dx.size == 1:
return np.full(dz.shape, fill_value=dy[0], dtype=dtype)
dres = np.empty(dz.shape, dtype=dtype)
lenx = dz.size
lenxp = len(dx)
lval = dy[0]
rval = dy[lenxp - 1]
if lenxp == 1:
xp_val = dx[0]
fp_val = dy[0]
for i in range(lenx):
x_val = dz.flat[i]
if x_val < xp_val:
dres.flat[i] = lval
elif x_val > xp_val:
dres.flat[i] = rval
else:
dres.flat[i] = fp_val
else:
j = 0
# only pre-calculate slopes if there are relatively few of them.
if lenxp <= lenx:
slopes = np.empty((lenxp - 1), dtype=dtype)
else:
slopes = np.empty(0, dtype=dtype)
if slopes.size:
for i in range(lenxp - 1):
inv_dx = 1 / (dx[i + 1] - dx[i])
real = (dy[i + 1].real - dy[i].real) * inv_dx
imag = (dy[i + 1].imag - dy[i].imag) * inv_dx
slopes[i] = real + 1j * imag
for i in range(lenx):
x_val = dz.flat[i]
if np.isnan(x_val):
real = x_val
imag = 0.0
dres.flat[i] = real + 1j * imag
continue
j = binary_search_with_guess(x_val, dx, lenxp, j)
if j == -1:
dres.flat[i] = lval
elif j == lenxp:
dres.flat[i] = rval
elif j == lenxp - 1:
dres.flat[i] = dy[j]
elif dx[j] == x_val:
# Avoid potential non-finite interpolation
dres.flat[i] = dy[j]
else:
if slopes.size:
slope = slopes[j]
else:
inv_dx = 1 / (dx[j + 1] - dx[j])
real = (dy[j + 1].real - dy[j].real) * inv_dx
imag = (dy[j + 1].imag - dy[j].imag) * inv_dx
slope = real + 1j * imag
# The following branches mimic the behavior of
# different numpy version with regard to handling NaNs.
if np117_nan_handling:
# Numpy 1.17 handles NaN correctly
result = np_interp_impl_complex_fp_innermost_117(
x, slope, x_val, dx, dy, i, j,
)
dres.flat[i] = result
else:
# Numpy 1.16 does not handles NaN correctly.
real = slope.real * (x_val - dx[j]) + dy[j].real
imag = slope.imag * (x_val - dx[j]) + dy[j].imag
dres.flat[i] = real + 1j * imag
return dres
return impl
@register_jitable
def np_interp_impl_complex_fp_innermost_117(x, slope, x_val, dx, dy, i, j):
# NOTE: Do not refactor... see note in np_interp function impl below
# this is a copy of innermost part of arr_interp_complex post 1.17:
# https://github.com/numpy/numpy/blob/maintenance/1.17.x/numpy/core/src/multiarray/compiled_base.c # noqa: E501
# Permanent reference:
# https://github.com/numpy/numpy/blob/91fbe4dde246559fa5b085ebf4bc268e2b89eea8/numpy/core/src/multiarray/compiled_base.c#L798-L812 # noqa: E501
# If we get nan in one direction, try the other
real = slope.real * (x_val - dx[j]) + dy[j].real
if np.isnan(real):
real = slope.real * (x_val - dx[j + 1]) + dy[j + 1].real
if np.isnan(real) and dy[j].real == dy[j + 1].real:
real = dy[j].real
imag = slope.imag * (x_val - dx[j]) + dy[j].imag
if np.isnan(imag):
imag = slope.imag * (x_val - dx[j + 1]) + dy[j + 1].imag
if np.isnan(imag) and dy[j].imag == dy[j + 1].imag:
imag = dy[j].imag
return real + 1j * imag
@register_jitable
def np_interp_impl_inner(x, xp, fp, dtype):
# NOTE: Do not refactor... see note in np_interp function impl below
# this is a facsimile of arr_interp prior to 1.16:
# https://github.com/numpy/numpy/blob/maintenance/1.15.x/numpy/core/src/multiarray/compiled_base.c # noqa: E501
# Permanent reference:
# https://github.com/numpy/numpy/blob/3430d78c01a3b9a19adad75f1acb5ae18286da73/numpy/core/src/multiarray/compiled_base.c#L532 # noqa: E501
dz = np.asarray(x, dtype=np.float64)
dx = np.asarray(xp, dtype=np.float64)
dy = np.asarray(fp, dtype=np.float64)
if len(dx) == 0:
raise ValueError('array of sample points is empty')
if len(dx) != len(dy):
raise ValueError('fp and xp are not of the same size.')
if dx.size == 1:
return np.full(dz.shape, fill_value=dy[0], dtype=dtype)
dres = np.empty(dz.shape, dtype=dtype)
lenx = dz.size
lenxp = len(dx)
lval = dy[0]
rval = dy[lenxp - 1]
if lenxp == 1:
xp_val = dx[0]
fp_val = dy[0]
for i in range(lenx):
x_val = dz.flat[i]
if x_val < xp_val:
dres.flat[i] = lval
elif x_val > xp_val:
dres.flat[i] = rval
else:
dres.flat[i] = fp_val
else:
j = 0
# only pre-calculate slopes if there are relatively few of them.
if lenxp <= lenx:
slopes = (dy[1:] - dy[:-1]) / (dx[1:] - dx[:-1])
else:
slopes = np.empty(0, dtype=dtype)
for i in range(lenx):
x_val = dz.flat[i]
if np.isnan(x_val):
dres.flat[i] = x_val
continue
j = binary_search_with_guess(x_val, dx, lenxp, j)
if j == -1:
dres.flat[i] = lval
elif j == lenxp:
dres.flat[i] = rval
elif j == lenxp - 1:
dres.flat[i] = dy[j]
else:
if slopes.size:
slope = slopes[j]
else:
slope = (dy[j + 1] - dy[j]) / (dx[j + 1] - dx[j])
dres.flat[i] = slope * (x_val - dx[j]) + dy[j]
# NOTE: this is in master but not in any released
# version of 1.16.x yet...
#
# If we get nan in one direction, try the other
# if np.isnan(dres.flat[i]):
# dres.flat[i] = slope * (x_val - dx[j + 1]) + dy[j + 1]
#
# if np.isnan(dres.flat[i]) and dy[j] == dy[j + 1]:
# dres.flat[i] = dy[j]
return dres
def np_interp_impl_inner_factory(np117_nan_handling):
def impl(x, xp, fp, dtype):
# NOTE: Do not refactor... see note in np_interp function impl below
# this is a facsimile of arr_interp post 1.16:
# https://github.com/numpy/numpy/blob/maintenance/1.16.x/numpy/core/src/multiarray/compiled_base.c # noqa: E501
# Permanent reference:
# https://github.com/numpy/numpy/blob/971e2e89d08deeae0139d3011d15646fdac13c92/numpy/core/src/multiarray/compiled_base.c#L473 # noqa: E501
dz = np.asarray(x, dtype=np.float64)
dx = np.asarray(xp, dtype=np.float64)
dy = np.asarray(fp, dtype=np.float64)
if len(dx) == 0:
raise ValueError('array of sample points is empty')
if len(dx) != len(dy):
raise ValueError('fp and xp are not of the same size.')
if dx.size == 1:
return np.full(dz.shape, fill_value=dy[0], dtype=dtype)
dres = np.empty(dz.shape, dtype=dtype)
lenx = dz.size
lenxp = len(dx)
lval = dy[0]
rval = dy[lenxp - 1]
if lenxp == 1:
xp_val = dx[0]
fp_val = dy[0]
for i in range(lenx):
x_val = dz.flat[i]
if x_val < xp_val:
dres.flat[i] = lval
elif x_val > xp_val:
dres.flat[i] = rval
else:
dres.flat[i] = fp_val
else:
j = 0
# only pre-calculate slopes if there are relatively few of them.
if lenxp <= lenx:
slopes = (dy[1:] - dy[:-1]) / (dx[1:] - dx[:-1])
else:
slopes = np.empty(0, dtype=dtype)
for i in range(lenx):
x_val = dz.flat[i]
if np.isnan(x_val):
dres.flat[i] = x_val
continue
j = binary_search_with_guess(x_val, dx, lenxp, j)
if j == -1:
dres.flat[i] = lval
elif j == lenxp:
dres.flat[i] = rval
elif j == lenxp - 1:
dres.flat[i] = dy[j]
elif dx[j] == x_val:
# Avoid potential non-finite interpolation
dres.flat[i] = dy[j]
else:
if slopes.size:
slope = slopes[j]
else:
slope = (dy[j + 1] - dy[j]) / (dx[j + 1] - dx[j])
dres.flat[i] = slope * (x_val - dx[j]) + dy[j]
# NOTE: this is in np1.17
# https://github.com/numpy/numpy/blob/maintenance/1.17.x/numpy/core/src/multiarray/compiled_base.c # noqa: E501
# Permanent reference:
# https://github.com/numpy/numpy/blob/91fbe4dde246559fa5b085ebf4bc268e2b89eea8/numpy/core/src/multiarray/compiled_base.c#L610-L616 # noqa: E501
#
# If we get nan in one direction, try the other
if np117_nan_handling:
if np.isnan(dres.flat[i]):
dres.flat[i] = slope * (x_val - dx[j + 1]) + dy[j + 1] # noqa: E501
if np.isnan(dres.flat[i]) and dy[j] == dy[j + 1]:
dres.flat[i] = dy[j]
return dres
return impl
np_interp_impl_inner_post_np117 = register_jitable(
np_interp_impl_inner_factory(np117_nan_handling=True)
)
np_interp_impl_complex_inner_post_np117 = register_jitable(
np_interp_impl_complex_fp_inner_factory(np117_nan_handling=True)
)
np_interp_impl_inner_pre_np117 = register_jitable(
np_interp_impl_inner_factory(np117_nan_handling=False)
)
np_interp_impl_complex_inner_pre_np117 = register_jitable(
np_interp_impl_complex_fp_inner_factory(np117_nan_handling=False)
)
@overload(np.interp)
def np_interp(x, xp, fp):
# NOTE: there is considerable duplication present in the functions:
# np_interp_impl_complex_fp_inner_116
# np_interp_impl_complex_fp_inner
# np_interp_impl_inner_116
# np_interp_impl_inner
#
# This is because:
# 1. Replicating basic interp is relatively simple, however matching the
# behaviour of NumPy for edge cases is really quite hard, after a
# couple of attempts trying to avoid translation of the C source it
# was deemed unavoidable.
# 2. Due to 1. it is much easier to keep track of changes if the Numba
# source reflects the NumPy C source, so the duplication is kept.
# 3. There are significant changes that happened in the NumPy 1.16
# release series, hence functions with `np116` appended, they behave
# slightly differently!
if hasattr(xp, 'ndim') and xp.ndim > 1:
raise TypingError('xp must be 1D')
if hasattr(fp, 'ndim') and fp.ndim > 1:
raise TypingError('fp must be 1D')
complex_dtype_msg = (
"Cannot cast array data from complex dtype to float64 dtype"
)
xp_dt = determine_dtype(xp)
if np.issubdtype(xp_dt, np.complexfloating):
raise TypingError(complex_dtype_msg)
if numpy_version >= (1, 17):
impl = np_interp_impl_inner_post_np117
impl_complex = np_interp_impl_complex_inner_post_np117
elif numpy_version >= (1, 16):
impl = np_interp_impl_inner_pre_np117
impl_complex = np_interp_impl_complex_inner_pre_np117
else:
impl = np_interp_impl_inner
impl_complex = np_interp_impl_complex_fp_inner
fp_dt = determine_dtype(fp)
dtype = np.result_type(fp_dt, np.float64)
if np.issubdtype(dtype, np.complexfloating):
inner = impl_complex
else:
inner = impl
def np_interp_impl(x, xp, fp):
return inner(x, xp, fp, dtype)
def np_interp_scalar_impl(x, xp, fp):
return inner(x, xp, fp, dtype).flat[0]
if isinstance(x, types.Number):
if isinstance(x, types.Complex):
raise TypingError(complex_dtype_msg)
return np_interp_scalar_impl
return np_interp_impl
#----------------------------------------------------------------------------
# Statistics
@register_jitable
def row_wise_average(a):
assert a.ndim == 2
m, n = a.shape
out = np.empty((m, 1), dtype=a.dtype)
for i in range(m):
out[i, 0] = np.sum(a[i, :]) / n
return out
@register_jitable
def np_cov_impl_inner(X, bias, ddof):
# determine degrees of freedom
if ddof is None:
if bias:
ddof = 0
else:
ddof = 1
# determine the normalization factor
fact = X.shape[1] - ddof
# numpy warns if less than 0 and floors at 0
fact = max(fact, 0.0)
# de-mean
X -= row_wise_average(X)
# calculate result - requires blas
c = np.dot(X, np.conj(X.T))
c *= np.true_divide(1, fact)
return c
def _prepare_cov_input_inner():
pass
@overload(_prepare_cov_input_inner)
def _prepare_cov_input_impl(m, y, rowvar, dtype):
if y in (None, types.none):
def _prepare_cov_input_inner(m, y, rowvar, dtype):
m_arr = np.atleast_2d(_asarray(m))
if not rowvar:
m_arr = m_arr.T
return m_arr
else:
def _prepare_cov_input_inner(m, y, rowvar, dtype):
m_arr = np.atleast_2d(_asarray(m))
y_arr = np.atleast_2d(_asarray(y))
# transpose if asked to and not a (1, n) vector - this looks
# wrong as you might end up transposing one and not the other,
# but it's what numpy does
if not rowvar:
if m_arr.shape[0] != 1:
m_arr = m_arr.T
if y_arr.shape[0] != 1:
y_arr = y_arr.T
m_rows, m_cols = m_arr.shape
y_rows, y_cols = y_arr.shape
if m_cols != y_cols:
raise ValueError("m and y have incompatible dimensions")
# allocate and fill output array
out = np.empty((m_rows + y_rows, m_cols), dtype=dtype)
out[:m_rows, :] = m_arr
out[-y_rows:, :] = y_arr
return out
return _prepare_cov_input_inner
@register_jitable
def _handle_m_dim_change(m):
if m.ndim == 2 and m.shape[0] == 1:
msg = ("2D array containing a single row is unsupported due to "
"ambiguity in type inference. To use numpy.cov in this case "
"simply pass the row as a 1D array, i.e. m[0].")
raise RuntimeError(msg)
_handle_m_dim_nop = register_jitable(lambda x: x)
def determine_dtype(array_like):
array_like_dt = np.float64
if isinstance(array_like, types.Array):
array_like_dt = as_dtype(array_like.dtype)
elif isinstance(array_like, (types.Number, types.Boolean)):
array_like_dt = as_dtype(array_like)
elif isinstance(array_like, (types.UniTuple, types.Tuple)):
coltypes = set()
for val in array_like:
if hasattr(val, 'count'):
[coltypes.add(v) for v in val]
else:
coltypes.add(val)
if len(coltypes) > 1:
array_like_dt = np.promote_types(*[as_dtype(ty) for ty in coltypes])
elif len(coltypes) == 1:
array_like_dt = as_dtype(coltypes.pop())
return array_like_dt
def check_dimensions(array_like, name):
if isinstance(array_like, types.Array):
if array_like.ndim > 2:
raise TypeError("{0} has more than 2 dimensions".format(name))
elif isinstance(array_like, types.Sequence):
if isinstance(array_like.key[0], types.Sequence):
if isinstance(array_like.key[0].key[0], types.Sequence):
raise TypeError("{0} has more than 2 dimensions".format(name))
@register_jitable
def _handle_ddof(ddof):
if not np.isfinite(ddof):
raise ValueError('Cannot convert non-finite ddof to integer')
if ddof - int(ddof) != 0:
raise ValueError('ddof must be integral value')
_handle_ddof_nop = register_jitable(lambda x: x)
@register_jitable
def _prepare_cov_input(m, y, rowvar, dtype, ddof, _DDOF_HANDLER,
_M_DIM_HANDLER):
_M_DIM_HANDLER(m)
_DDOF_HANDLER(ddof)
return _prepare_cov_input_inner(m, y, rowvar, dtype)
def scalar_result_expected(mandatory_input, optional_input):
opt_is_none = optional_input in (None, types.none)
if isinstance(mandatory_input, types.Array) and mandatory_input.ndim == 1:
return opt_is_none
if isinstance(mandatory_input, types.BaseTuple):
if all(isinstance(x, (types.Number, types.Boolean))
for x in mandatory_input.types):
return opt_is_none
else:
if (len(mandatory_input.types) == 1 and
isinstance(mandatory_input.types[0], types.BaseTuple)):
return opt_is_none
if isinstance(mandatory_input, (types.Number, types.Boolean)):
return opt_is_none
if isinstance(mandatory_input, types.Sequence):
if (not isinstance(mandatory_input.key[0], types.Sequence) and
opt_is_none):
return True
return False
@register_jitable
def _clip_corr(x):
return np.where(np.fabs(x) > 1, np.sign(x), x)
@register_jitable
def _clip_complex(x):
real = _clip_corr(x.real)
imag = _clip_corr(x.imag)
return real + 1j * imag
@overload(np.cov)
def np_cov(m, y=None, rowvar=True, bias=False, ddof=None):
# reject problem if m and / or y are more than 2D
check_dimensions(m, 'm')
check_dimensions(y, 'y')
# reject problem if ddof invalid (either upfront if type is
# obviously invalid, or later if value found to be non-integral)
if ddof in (None, types.none):
_DDOF_HANDLER = _handle_ddof_nop
else:
if isinstance(ddof, (types.Integer, types.Boolean)):
_DDOF_HANDLER = _handle_ddof_nop
elif isinstance(ddof, types.Float):
_DDOF_HANDLER = _handle_ddof
else:
raise TypingError('ddof must be a real numerical scalar type')
# special case for 2D array input with 1 row of data - select
# handler function which we'll call later when we have access
# to the shape of the input array
_M_DIM_HANDLER = _handle_m_dim_nop
if isinstance(m, types.Array):
_M_DIM_HANDLER = _handle_m_dim_change
# infer result dtype
m_dt = determine_dtype(m)
y_dt = determine_dtype(y)
dtype = np.result_type(m_dt, y_dt, np.float64)
def np_cov_impl(m, y=None, rowvar=True, bias=False, ddof=None):
X = _prepare_cov_input(m, y, rowvar, dtype, ddof, _DDOF_HANDLER,
_M_DIM_HANDLER).astype(dtype)
if np.any(np.array(X.shape) == 0):
return np.full((X.shape[0], X.shape[0]), fill_value=np.nan,
dtype=dtype)
else:
return np_cov_impl_inner(X, bias, ddof)
def np_cov_impl_single_variable(m, y=None, rowvar=True, bias=False,
ddof=None):
X = _prepare_cov_input(m, y, rowvar, ddof, dtype, _DDOF_HANDLER,
_M_DIM_HANDLER).astype(dtype)
if np.any(np.array(X.shape) == 0):
variance = np.nan
else:
variance = np_cov_impl_inner(X, bias, ddof).flat[0]
return np.array(variance)
if scalar_result_expected(m, y):
return np_cov_impl_single_variable
else:
return np_cov_impl
@overload(np.corrcoef)
def np_corrcoef(x, y=None, rowvar=True):
x_dt = determine_dtype(x)
y_dt = determine_dtype(y)
dtype = np.result_type(x_dt, y_dt, np.float64)
if dtype == np.complex_:
clip_fn = _clip_complex
else:
clip_fn = _clip_corr
def np_corrcoef_impl(x, y=None, rowvar=True):
c = np.cov(x, y, rowvar)
d = np.diag(c)
stddev = np.sqrt(d.real)
for i in range(c.shape[0]):
c[i, :] /= stddev
c[:, i] /= stddev
return clip_fn(c)
def np_corrcoef_impl_single_variable(x, y=None, rowvar=True):
c = np.cov(x, y, rowvar)
return c / c
if scalar_result_expected(x, y):
return np_corrcoef_impl_single_variable
else:
return np_corrcoef_impl
#----------------------------------------------------------------------------
# Element-wise computations
@overload(np.argwhere)
def np_argwhere(a):
# needs to be much more array-like for the array impl to work, Numba bug
# in one of the underlying function calls?
use_scalar = (numpy_version >= (1, 18) and
isinstance(a, (types.Number, types.Boolean)))
if type_can_asarray(a) and not use_scalar:
if numpy_version < (1, 18):
check = register_jitable(lambda x: not np.any(x))
else:
check = register_jitable(lambda x: True)
def impl(a):
arr = np.asarray(a)
if arr.shape == () and check(arr):
return np.zeros((0, 1), dtype=types.intp)
return np.transpose(np.vstack(np.nonzero(arr)))
else:
if numpy_version < (1, 18):
falseish = (0, 1)
trueish = (1, 1)
else:
falseish = (0, 0)
trueish = (1, 0)
def impl(a):
if a is not None and bool(a):
return np.zeros(trueish, dtype=types.intp)
else:
return np.zeros(falseish, dtype=types.intp)
return impl
@overload(np.flatnonzero)
def np_flatnonzero(a):
if type_can_asarray(a):
def impl(a):
arr = np.asarray(a)
return np.nonzero(np.ravel(arr))[0]
else:
def impl(a):
if a is not None and bool(a):
data = [0]
else:
data = [x for x in range(0)]
return np.array(data, dtype=types.intp)
return impl
@register_jitable
def _fill_diagonal_params(a, wrap):
if a.ndim == 2:
m = a.shape[0]
n = a.shape[1]
step = 1 + n
if wrap:
end = n * m
else:
end = n * min(m, n)
else:
shape = np.array(a.shape)
if not np.all(np.diff(shape) == 0):
raise ValueError("All dimensions of input must be of equal length")
step = 1 + (np.cumprod(shape[:-1])).sum()
end = shape.prod()
return end, step
@register_jitable
def _fill_diagonal_scalar(a, val, wrap):
end, step = _fill_diagonal_params(a, wrap)
for i in range(0, end, step):
a.flat[i] = val
@register_jitable
def _fill_diagonal(a, val, wrap):
end, step = _fill_diagonal_params(a, wrap)
ctr = 0
v_len = len(val)
for i in range(0, end, step):
a.flat[i] = val[ctr]
ctr += 1
ctr = ctr % v_len
@register_jitable
def _check_val_int(a, val):
iinfo = np.iinfo(a.dtype)
v_min = iinfo.min
v_max = iinfo.max
# check finite values are within bounds
if np.any(~np.isfinite(val)) or np.any(val < v_min) or np.any(val > v_max):
raise ValueError('Unable to safely conform val to a.dtype')
@register_jitable
def _check_val_float(a, val):
finfo = np.finfo(a.dtype)
v_min = finfo.min
v_max = finfo.max
# check finite values are within bounds
finite_vals = val[np.isfinite(val)]
if np.any(finite_vals < v_min) or np.any(finite_vals > v_max):
raise ValueError('Unable to safely conform val to a.dtype')
# no check performed, needed for pathway where no check is required
_check_nop = register_jitable(lambda x, y: x)
def _asarray(x):
pass
@overload(_asarray)
def _asarray_impl(x):
if isinstance(x, types.Array):
return lambda x: x
elif isinstance(x, (types.Sequence, types.Tuple)):
return lambda x: np.array(x)
elif isinstance(x, (types.Number, types.Boolean)):
ty = as_dtype(x)
return lambda x: np.array([x], dtype=ty)
@overload(np.fill_diagonal)
def np_fill_diagonal(a, val, wrap=False):
if a.ndim > 1:
# the following can be simplified after #3088; until then, employ
# a basic mechanism for catching cases where val is of a type/value
# which cannot safely be cast to a.dtype
if isinstance(a.dtype, types.Integer):
checker = _check_val_int
elif isinstance(a.dtype, types.Float):
checker = _check_val_float
else:
checker = _check_nop
def scalar_impl(a, val, wrap=False):
tmpval = _asarray(val).flatten()
checker(a, tmpval)
_fill_diagonal_scalar(a, val, wrap)
def non_scalar_impl(a, val, wrap=False):
tmpval = _asarray(val).flatten()
checker(a, tmpval)
_fill_diagonal(a, tmpval, wrap)
if isinstance(val, (types.Float, types.Integer, types.Boolean)):
return scalar_impl
elif isinstance(val, (types.Tuple, types.Sequence, types.Array)):
return non_scalar_impl
else:
msg = "The first argument must be at least 2-D (found %s-D)" % a.ndim
raise TypingError(msg)
def _np_round_intrinsic(tp):
# np.round() always rounds half to even
return "llvm.rint.f%d" % (tp.bitwidth,)
def _np_round_float(context, builder, tp, val):
llty = context.get_value_type(tp)
module = builder.module
fnty = llvmlite.ir.FunctionType(llty, [llty])
fn = cgutils.get_or_insert_function(module, fnty, _np_round_intrinsic(tp))
return builder.call(fn, (val,))
@glue_lowering(np.around, types.Float)
@glue_lowering(np.round, types.Float)
def scalar_round_unary_float(context, builder, sig, args):
res = _np_round_float(context, builder, sig.args[0], args[0])
return impl_ret_untracked(context, builder, sig.return_type, res)
@glue_lowering(np.around, types.Integer)
@glue_lowering(np.round, types.Integer)
def scalar_round_unary_integer(context, builder, sig, args):
res = args[0]
return impl_ret_untracked(context, builder, sig.return_type, res)
@glue_lowering(np.around, types.Complex)
@glue_lowering(np.round, types.Complex)
def scalar_round_unary_complex(context, builder, sig, args):
fltty = sig.args[0].underlying_float
z = context.make_complex(builder, sig.args[0], args[0])
z.real = _np_round_float(context, builder, fltty, z.real)
z.imag = _np_round_float(context, builder, fltty, z.imag)
res = z._getvalue()
return impl_ret_untracked(context, builder, sig.return_type, res)
@glue_lowering(np.around, types.Float, types.Integer)
@glue_lowering(np.round, types.Float, types.Integer)
@glue_lowering(np.around, types.Integer, types.Integer)
@glue_lowering(np.round, types.Integer, types.Integer)
def scalar_round_binary_float(context, builder, sig, args):
def round_ndigits(x, ndigits):
if math.isinf(x) or math.isnan(x):
return x
# NOTE: this is CPython's algorithm, but perhaps this is overkill
# when emulating Numpy's behaviour.
if ndigits >= 0:
if ndigits > 22:
# pow1 and pow2 are each safe from overflow, but
# pow1*pow2 ~= pow(10.0, ndigits) might overflow.
pow1 = 10.0 ** (ndigits - 22)
pow2 = 1e22
else:
pow1 = 10.0 ** ndigits
pow2 = 1.0
y = (x * pow1) * pow2
if math.isinf(y):
return x
return (np.round(y) / pow2) / pow1
else:
pow1 = 10.0 ** (-ndigits)
y = x / pow1
return np.round(y) * pow1
res = context.compile_internal(builder, round_ndigits, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@glue_lowering(np.around, types.Complex, types.Integer)
@glue_lowering(np.round, types.Complex, types.Integer)
def scalar_round_binary_complex(context, builder, sig, args):
def round_ndigits(z, ndigits):
return complex(np.round(z.real, ndigits),
np.round(z.imag, ndigits))
res = context.compile_internal(builder, round_ndigits, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@glue_lowering(np.around, types.Array, types.Integer, types.Array)
@glue_lowering(np.round, types.Array, types.Integer, types.Array)
def array_round(context, builder, sig, args):
def array_round_impl(arr, decimals, out):
if arr.shape != out.shape:
raise ValueError("invalid output shape")
for index, val in np.ndenumerate(arr):
out[index] = np.round(val, decimals)
return out
res = context.compile_internal(builder, array_round_impl, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@glue_lowering(np.sinc, types.Array)
def array_sinc(context, builder, sig, args):
def array_sinc_impl(arr):
out = np.zeros_like(arr)
for index, val in np.ndenumerate(arr):
out[index] = np.sinc(val)
return out
res = context.compile_internal(builder, array_sinc_impl, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@glue_lowering(np.sinc, types.Number)
def scalar_sinc(context, builder, sig, args):
scalar_dtype = sig.return_type
def scalar_sinc_impl(val):
if val == 0.e0: # to match np impl
val = 1e-20
val *= np.pi # np sinc is the normalised variant
return np.sin(val) / val
res = context.compile_internal(builder, scalar_sinc_impl, sig, args,
locals=dict(c=scalar_dtype))
return impl_ret_untracked(context, builder, sig.return_type, res)
@glue_lowering(np.angle, types.Number)
@glue_lowering(np.angle, types.Number, types.Boolean)
def scalar_angle_kwarg(context, builder, sig, args):
deg_mult = sig.return_type(180 / np.pi)
def scalar_angle_impl(val, deg):
if deg:
return np.arctan2(val.imag, val.real) * deg_mult
else:
return np.arctan2(val.imag, val.real)
if len(args) == 1:
args = args + (cgutils.false_bit,)
sig = signature(sig.return_type, *(sig.args + (types.boolean,)))
res = context.compile_internal(builder, scalar_angle_impl,
sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@glue_lowering(np.angle, types.Array)
@glue_lowering(np.angle, types.Array, types.Boolean)
def array_angle_kwarg(context, builder, sig, args):
ret_dtype = sig.return_type.dtype
def array_angle_impl(arr, deg):
out = np.zeros_like(arr, dtype=ret_dtype)
for index, val in np.ndenumerate(arr):
out[index] = np.angle(val, deg)
return out
if len(args) == 1:
args = args + (cgutils.false_bit,)
sig = signature(sig.return_type, *(sig.args + (types.boolean,)))
res = context.compile_internal(builder, array_angle_impl, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@lower_builtin(np.nonzero, types.Array)
@lower_builtin("array.nonzero", types.Array)
@glue_lowering(np.where, types.Array)
def array_nonzero(context, builder, sig, args):
aryty = sig.args[0]
# Return type is a N-tuple of 1D C-contiguous arrays
retty = sig.return_type
outaryty = retty.dtype
nouts = retty.count
ary = make_array(aryty)(context, builder, args[0])
shape = cgutils.unpack_tuple(builder, ary.shape)
strides = cgutils.unpack_tuple(builder, ary.strides)
data = ary.data
layout = aryty.layout
# First count the number of non-zero elements
zero = context.get_constant(types.intp, 0)
one = context.get_constant(types.intp, 1)
count = cgutils.alloca_once_value(builder, zero)
with cgutils.loop_nest(builder, shape, zero.type) as indices:
ptr = cgutils.get_item_pointer2(context, builder, data, shape, strides,
layout, indices)
val = load_item(context, builder, aryty, ptr)
nz = context.is_true(builder, aryty.dtype, val)
with builder.if_then(nz):
builder.store(builder.add(builder.load(count), one), count)
# Then allocate output arrays of the right size
out_shape = (builder.load(count),)
outs = [_empty_nd_impl(context, builder, outaryty, out_shape)._getvalue()
for i in range(nouts)]
outarys = [make_array(outaryty)(context, builder, out) for out in outs]
out_datas = [out.data for out in outarys]
# And fill them up
index = cgutils.alloca_once_value(builder, zero)
with cgutils.loop_nest(builder, shape, zero.type) as indices:
ptr = cgutils.get_item_pointer2(context, builder, data, shape, strides,
layout, indices)
val = load_item(context, builder, aryty, ptr)
nz = context.is_true(builder, aryty.dtype, val)
with builder.if_then(nz):
# Store element indices in output arrays
if not indices:
# For a 0-d array, store 0 in the unique output array
indices = (zero,)
cur = builder.load(index)
for i in range(nouts):
ptr = cgutils.get_item_pointer2(context, builder, out_datas[i],
out_shape, (),
'C', [cur])
store_item(context, builder, outaryty, indices[i], ptr)
builder.store(builder.add(cur, one), index)
tup = context.make_tuple(builder, sig.return_type, outs)
return impl_ret_new_ref(context, builder, sig.return_type, tup)
def array_where(context, builder, sig, args):
"""
np.where(array, array, array)
"""
layouts = set(a.layout for a in sig.args)
npty = np.promote_types(as_dtype(sig.args[1].dtype),
as_dtype(sig.args[2].dtype))
if layouts == set('C') or layouts == set('F'):
# Faster implementation for C-contiguous arrays
def where_impl(cond, x, y):
shape = cond.shape
if x.shape != shape or y.shape != shape:
raise ValueError("all inputs should have the same shape")
res = np.empty_like(x, dtype=npty)
cf = cond.flat
xf = x.flat
yf = y.flat
rf = res.flat
for i in range(cond.size):
rf[i] = xf[i] if cf[i] else yf[i]
return res
else:
def where_impl(cond, x, y):
shape = cond.shape
if x.shape != shape or y.shape != shape:
raise ValueError("all inputs should have the same shape")
res = np.empty(cond.shape, dtype=npty)
for idx, c in np.ndenumerate(cond):
res[idx] = x[idx] if c else y[idx]
return res
res = context.compile_internal(builder, where_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@register_jitable
def _where_x_y_scalar(cond, x, y, res):
for idx, c in np.ndenumerate(cond):
res[idx] = x if c else y
return res
@register_jitable
def _where_x_scalar(cond, x, y, res):
for idx, c in np.ndenumerate(cond):
res[idx] = x if c else y[idx]
return res
@register_jitable
def _where_y_scalar(cond, x, y, res):
for idx, c in np.ndenumerate(cond):
res[idx] = x[idx] if c else y
return res
def _where_inner(context, builder, sig, args, impl):
cond, x, y = sig.args
x_dt = determine_dtype(x)
y_dt = determine_dtype(y)
npty = np.promote_types(x_dt, y_dt)
if cond.layout == 'F':
def where_impl(cond, x, y):
res = np.asfortranarray(np.empty(cond.shape, dtype=npty))
return impl(cond, x, y, res)
else:
def where_impl(cond, x, y):
res = np.empty(cond.shape, dtype=npty)
return impl(cond, x, y, res)
res = context.compile_internal(builder, where_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
array_scalar_scalar_where = partial(_where_inner, impl=_where_x_y_scalar)
array_array_scalar_where = partial(_where_inner, impl=_where_y_scalar)
array_scalar_array_where = partial(_where_inner, impl=_where_x_scalar)
@glue_lowering(np.where, types.Any, types.Any, types.Any)
def any_where(context, builder, sig, args):
cond, x, y = sig.args
if isinstance(cond, types.Array):
if isinstance(x, types.Array):
if isinstance(y, types.Array):
impl = array_where
elif isinstance(y, (types.Number, types.Boolean)):
impl = array_array_scalar_where
elif isinstance(x, (types.Number, types.Boolean)):
if isinstance(y, types.Array):
impl = array_scalar_array_where
elif isinstance(y, (types.Number, types.Boolean)):
impl = array_scalar_scalar_where
return impl(context, builder, sig, args)
def scalar_where_impl(cond, x, y):
"""
np.where(scalar, scalar, scalar): return a 0-dim array
"""
scal = x if cond else y
# This is the equivalent of np.full_like(scal, scal),
# for compatibility with Numpy < 1.8
arr = np.empty_like(scal)
arr[()] = scal
return arr
res = context.compile_internal(builder, scalar_where_impl, sig, args)
return impl_ret_new_ref(context, builder, sig.return_type, res)
@overload(np.real)
def np_real(a):
def np_real_impl(a):
return a.real
return np_real_impl
@overload(np.imag)
def np_imag(a):
def np_imag_impl(a):
return a.imag
return np_imag_impl
#----------------------------------------------------------------------------
# Misc functions
@overload(operator.contains)
def np_contains(arr, key):
if not isinstance(arr, types.Array):
return
def np_contains_impl(arr, key):
for x in np.nditer(arr):
if x == key:
return True
return False
return np_contains_impl
@overload(np.count_nonzero)
def np_count_nonzero(arr, axis=None):
if not type_can_asarray(arr):
raise TypingError("The argument to np.count_nonzero must be array-like")
if is_nonelike(axis):
def impl(arr, axis=None):
arr2 = np.ravel(arr)
return np.sum(arr2 != 0)
else:
def impl(arr, axis=None):
arr2 = arr.astype(np.bool_)
return np.sum(arr2, axis=axis)
return impl
np_delete_handler_isslice = register_jitable(lambda x : x)
np_delete_handler_isarray = register_jitable(lambda x : np.asarray(x))
@overload(np.delete)
def np_delete(arr, obj):
# Implementation based on numpy
# https://github.com/numpy/numpy/blob/af66e487a57bfd4850f4306e3b85d1dac3c70412/numpy/lib/function_base.py#L4065-L4267 # noqa: E501
if not isinstance(arr, (types.Array, types.Sequence)):
raise TypingError("arr must be either an Array or a Sequence")
if isinstance(obj, (types.Array, types.Sequence, types.SliceType)):
if isinstance(obj, (types.SliceType)):
handler = np_delete_handler_isslice
else:
if not isinstance(obj.dtype, types.Integer):
raise TypingError('obj should be of Integer dtype')
handler = np_delete_handler_isarray
def np_delete_impl(arr, obj):
arr = np.ravel(np.asarray(arr))
N = arr.size
keep = np.ones(N, dtype=np.bool_)
obj = handler(obj)
keep[obj] = False
return arr[keep]
return np_delete_impl
else: # scalar value
if not isinstance(obj, types.Integer):
raise TypingError('obj should be of Integer dtype')
def np_delete_scalar_impl(arr, obj):
arr = np.ravel(np.asarray(arr))
N = arr.size
pos = obj
if (pos < -N or pos >= N):
raise IndexError('obj must be less than the len(arr)')
# NumPy raises IndexError: index 'i' is out of
# bounds for axis 'x' with size 'n'
if (pos < 0):
pos += N
return np.concatenate((arr[:pos], arr[pos + 1:]))
return np_delete_scalar_impl
@overload(np.diff)
def np_diff_impl(a, n=1):
if not isinstance(a, types.Array) or a.ndim == 0:
return
def diff_impl(a, n=1):
if n == 0:
return a.copy()
if n < 0:
raise ValueError("diff(): order must be non-negative")
size = a.shape[-1]
out_shape = a.shape[:-1] + (max(size - n, 0),)
out = np.empty(out_shape, a.dtype)
if out.size == 0:
return out
# np.diff() works on each last dimension subarray independently.
# To make things easier, normalize input and output into 2d arrays
a2 = a.reshape((-1, size))
out2 = out.reshape((-1, out.shape[-1]))
# A scratchpad for subarrays
work = np.empty(size, a.dtype)
for major in range(a2.shape[0]):
# First iteration: diff a2 into work
for i in range(size - 1):
work[i] = a2[major, i + 1] - a2[major, i]
# Other iterations: diff work into itself
for niter in range(1, n):
for i in range(size - niter - 1):
work[i] = work[i + 1] - work[i]
# Copy final diff into out2
out2[major] = work[:size - n]
return out
return diff_impl
@overload(np.array_equal)
def np_array_equal(a, b):
if not (type_can_asarray(a) and type_can_asarray(b)):
raise TypingError('Both arguments to "array_equals" must be array-like')
accepted = (types.Boolean, types.Number)
if isinstance(a, accepted) and isinstance(b, accepted):
# special case
def impl(a, b):
return a == b
else:
def impl(a, b):
a = np.asarray(a)
b = np.asarray(b)
if a.shape == b.shape:
return np.all(a == b)
return False
return impl
@overload(np.intersect1d)
def jit_np_intersect1d(ar1, ar2):
# Not implemented to support assume_unique or return_indices
# https://github.com/numpy/numpy/blob/v1.19.0/numpy/lib
# /arraysetops.py#L347-L441
if not (type_can_asarray(ar1) or type_can_asarray(ar2)):
raise TypingError('intersect1d: first two args must be array-like')
def np_intersects1d_impl(ar1, ar2):
ar1 = np.asarray(ar1)
ar2 = np.asarray(ar2)
ar1 = np.unique(ar1)
ar2 = np.unique(ar2)
aux = np.concatenate((ar1, ar2))
aux.sort()
mask = aux[1:] == aux[:-1]
int1d = aux[:-1][mask]
return int1d
return np_intersects1d_impl
def validate_1d_array_like(func_name, seq):
if isinstance(seq, types.Array):
if seq.ndim != 1:
raise TypeError("{0}(): input should have dimension 1"
.format(func_name))
elif not isinstance(seq, types.Sequence):
raise TypeError("{0}(): input should be an array or sequence"
.format(func_name))
@overload(np.bincount)
def np_bincount(a, weights=None, minlength=0):
validate_1d_array_like("bincount", a)
if not isinstance(a.dtype, types.Integer):
return
check_is_integer(minlength, 'minlength')
if weights not in (None, types.none):
validate_1d_array_like("bincount", weights)
# weights is promoted to double in C impl
# https://github.com/numpy/numpy/blob/maintenance/1.16.x/numpy/core/src/multiarray/compiled_base.c#L93-L95 # noqa: E501
out_dtype = np.float64
@register_jitable
def validate_inputs(a, weights, minlength):
if len(a) != len(weights):
raise ValueError("bincount(): weights and list don't have "
"the same length")
@register_jitable
def count_item(out, idx, val, weights):
out[val] += weights[idx]
else:
out_dtype = types.intp
@register_jitable
def validate_inputs(a, weights, minlength):
pass
@register_jitable
def count_item(out, idx, val, weights):
out[val] += 1
def bincount_impl(a, weights=None, minlength=0):
validate_inputs(a, weights, minlength)
if minlength < 0:
raise ValueError("'minlength' must not be negative")
n = len(a)
a_max = a[0] if n > 0 else -1
for i in range(1, n):
if a[i] < 0:
raise ValueError("bincount(): first argument must be "
"non-negative")
a_max = max(a_max, a[i])
out_length = max(a_max + 1, minlength)
out = np.zeros(out_length, out_dtype)
for i in range(n):
count_item(out, i, a[i], weights)
return out
return bincount_impl
def _searchsorted(func):
def searchsorted_inner(a, v):
n = len(a)
if np.isnan(v):
# Find the first nan (i.e. the last from the end of a,
# since there shouldn't be many of them in practice)
for i in range(n, 0, -1):
if not np.isnan(a[i - 1]):
return i
return 0
lo = 0
hi = n
while hi > lo:
mid = (lo + hi) >> 1
if func(a[mid], (v)):
# mid is too low => go up
lo = mid + 1
else:
# mid is too high, or is a NaN => go down
hi = mid
return lo
return searchsorted_inner
_lt = less_than
_le = register_jitable(lambda x, y: x <= y)
_searchsorted_left = register_jitable(_searchsorted(_lt))
_searchsorted_right = register_jitable(_searchsorted(_le))
@overload(np.searchsorted)
def searchsorted(a, v, side='left'):
side_val = getattr(side, 'literal_value', side)
if side_val == 'left':
loop_impl = _searchsorted_left
elif side_val == 'right':
loop_impl = _searchsorted_right
else:
raise NumbaValueError(f"Invalid value given for 'side': {side_val}")
if isinstance(v, types.Array):
# N-d array and output
def searchsorted_impl(a, v, side='left'):
out = np.empty(v.shape, np.intp)
for view, outview in np.nditer((v, out)):
index = loop_impl(a, view.item())
outview.itemset(index)
return out
elif isinstance(v, types.Sequence):
# 1-d sequence and output
def searchsorted_impl(a, v, side='left'):
out = np.empty(len(v), np.intp)
for i in range(len(v)):
out[i] = loop_impl(a, v[i])
return out
else:
# Scalar value and output
# Note: NaNs come last in Numpy-sorted arrays
def searchsorted_impl(a, v, side='left'):
return loop_impl(a, v)
return searchsorted_impl
@overload(np.digitize)
def np_digitize(x, bins, right=False):
@register_jitable
def are_bins_increasing(bins):
n = len(bins)
is_increasing = True
is_decreasing = True
if n > 1:
prev = bins[0]
for i in range(1, n):
cur = bins[i]
is_increasing = is_increasing and not prev > cur
is_decreasing = is_decreasing and not prev < cur
if not is_increasing and not is_decreasing:
raise ValueError("bins must be monotonically increasing "
"or decreasing")
prev = cur
return is_increasing
# NOTE: the algorithm is slightly different from searchsorted's,
# as the edge cases (bin boundaries, NaN) give different results.
@register_jitable
def digitize_scalar(x, bins, right):
# bins are monotonically-increasing
n = len(bins)
lo = 0
hi = n
if right:
if np.isnan(x):
# Find the first nan (i.e. the last from the end of bins,
# since there shouldn't be many of them in practice)
for i in range(n, 0, -1):
if not np.isnan(bins[i - 1]):
return i
return 0
while hi > lo:
mid = (lo + hi) >> 1
if bins[mid] < x:
# mid is too low => narrow to upper bins
lo = mid + 1
else:
# mid is too high, or is a NaN => narrow to lower bins
hi = mid
else:
if np.isnan(x):
# NaNs end up in the last bin
return n
while hi > lo:
mid = (lo + hi) >> 1
if bins[mid] <= x:
# mid is too low => narrow to upper bins
lo = mid + 1
else:
# mid is too high, or is a NaN => narrow to lower bins
hi = mid
return lo
@register_jitable
def digitize_scalar_decreasing(x, bins, right):
# bins are monotonically-decreasing
n = len(bins)
lo = 0
hi = n
if right:
if np.isnan(x):
# Find the last nan
for i in range(0, n):
if not np.isnan(bins[i]):
return i
return n
while hi > lo:
mid = (lo + hi) >> 1
if bins[mid] < x:
# mid is too high => narrow to lower bins
hi = mid
else:
# mid is too low, or is a NaN => narrow to upper bins
lo = mid + 1
else:
if np.isnan(x):
# NaNs end up in the first bin
return 0
while hi > lo:
mid = (lo + hi) >> 1
if bins[mid] <= x:
# mid is too high => narrow to lower bins
hi = mid
else:
# mid is too low, or is a NaN => narrow to upper bins
lo = mid + 1
return lo
if isinstance(x, types.Array):
# N-d array and output
def digitize_impl(x, bins, right=False):
is_increasing = are_bins_increasing(bins)
out = np.empty(x.shape, np.intp)
for view, outview in np.nditer((x, out)):
if is_increasing:
index = digitize_scalar(view.item(), bins, right)
else:
index = digitize_scalar_decreasing(view.item(), bins, right)
outview.itemset(index)
return out
return digitize_impl
elif isinstance(x, types.Sequence):
# 1-d sequence and output
def digitize_impl(x, bins, right=False):
is_increasing = are_bins_increasing(bins)
out = np.empty(len(x), np.intp)
for i in range(len(x)):
if is_increasing:
out[i] = digitize_scalar(x[i], bins, right)
else:
out[i] = digitize_scalar_decreasing(x[i], bins, right)
return out
return digitize_impl
_range = range
@overload(np.histogram)
def np_histogram(a, bins=10, range=None):
if isinstance(bins, (int, types.Integer)):
# With a uniform distribution of bins, use a fast algorithm
# independent of the number of bins
if range in (None, types.none):
inf = float('inf')
def histogram_impl(a, bins=10, range=None):
bin_min = inf
bin_max = -inf
for view in np.nditer(a):
v = view.item()
if bin_min > v:
bin_min = v
if bin_max < v:
bin_max = v
return np.histogram(a, bins, (bin_min, bin_max))
else:
def histogram_impl(a, bins=10, range=None):
if bins <= 0:
raise ValueError("histogram(): `bins` should be a "
"positive integer")
bin_min, bin_max = range
if not bin_min <= bin_max:
raise ValueError("histogram(): max must be larger than "
"min in range parameter")
hist = np.zeros(bins, np.intp)
if bin_max > bin_min:
bin_ratio = bins / (bin_max - bin_min)
for view in np.nditer(a):
v = view.item()
b = math.floor((v - bin_min) * bin_ratio)
if 0 <= b < bins:
hist[int(b)] += 1
elif v == bin_max:
hist[bins - 1] += 1
bins_array = np.linspace(bin_min, bin_max, bins + 1)
return hist, bins_array
else:
# With a custom bins array, use a bisection search
def histogram_impl(a, bins=10, range=None):
nbins = len(bins) - 1
for i in _range(nbins):
# Note this also catches NaNs
if not bins[i] <= bins[i + 1]:
raise ValueError("histogram(): bins must increase "
"monotonically")
bin_min = bins[0]
bin_max = bins[nbins]
hist = np.zeros(nbins, np.intp)
if nbins > 0:
for view in np.nditer(a):
v = view.item()
if not bin_min <= v <= bin_max:
# Value is out of bounds, ignore (also catches NaNs)
continue
# Bisect in bins[:-1]
lo = 0
hi = nbins - 1
while lo < hi:
# Note the `+ 1` is necessary to avoid an infinite
# loop where mid = lo => lo = mid
mid = (lo + hi + 1) >> 1
if v < bins[mid]:
hi = mid - 1
else:
lo = mid
hist[lo] += 1
return hist, bins
return histogram_impl
# Create np.finfo, np.iinfo and np.MachAr
# machar
_mach_ar_supported = ('ibeta', 'it', 'machep', 'eps', 'negep', 'epsneg',
'iexp', 'minexp', 'xmin', 'maxexp', 'xmax', 'irnd',
'ngrd', 'epsilon', 'tiny', 'huge', 'precision',
'resolution',)
MachAr = namedtuple('MachAr', _mach_ar_supported)
# Do not support MachAr field
# finfo
_finfo_supported = ('eps', 'epsneg', 'iexp', 'machep', 'max', 'maxexp', 'min',
'minexp', 'negep', 'nexp', 'nmant', 'precision',
'resolution', 'tiny', 'bits',)
finfo = namedtuple('finfo', _finfo_supported)
# iinfo
_iinfo_supported = ('min', 'max', 'bits',)
iinfo = namedtuple('iinfo', _iinfo_supported)
@overload(np.MachAr)
def MachAr_impl():
f = np.MachAr()
_mach_ar_data = tuple([getattr(f, x) for x in _mach_ar_supported])
def impl():
return MachAr(*_mach_ar_data)
return impl
def generate_xinfo(np_func, container, attr):
@overload(np_func)
def xinfo_impl(arg):
nbty = getattr(arg, 'dtype', arg)
np_dtype = as_dtype(nbty)
try:
f = np_func(np_dtype)
except ValueError: # This exception instance comes from NumPy
# The np function might not support the dtype
return None
data = tuple([getattr(f, x) for x in attr])
def impl(arg):
return container(*data)
return impl
generate_xinfo(np.finfo, finfo, _finfo_supported)
generate_xinfo(np.iinfo, iinfo, _iinfo_supported)
def _get_inner_prod(dta, dtb):
# gets an inner product implementation, if both types are float then
# BLAS is used else a local function
@register_jitable
def _innerprod(a, b):
acc = 0
for i in range(len(a)):
acc = acc + a[i] * b[i]
return acc
# no BLAS... use local function regardless
if not _HAVE_BLAS:
return _innerprod
flty = types.real_domain | types.complex_domain
floats = dta in flty and dtb in flty
if not floats:
return _innerprod
else:
a_dt = as_dtype(dta)
b_dt = as_dtype(dtb)
dt = np.promote_types(a_dt, b_dt)
@register_jitable
def _dot_wrap(a, b):
return np.dot(a.astype(dt), b.astype(dt))
return _dot_wrap
def _assert_1d(a, func_name):
if isinstance(a, types.Array):
if not a.ndim <= 1:
raise TypingError("%s() only supported on 1D arrays " % func_name)
def _np_correlate_core(ap1, ap2, mode, direction):
pass
class _corr_conv_Mode(IntEnum):
"""
Enumerated modes for correlate/convolve as per:
https://github.com/numpy/numpy/blob/ac6b1a902b99e340cf7eeeeb7392c91e38db9dd8/numpy/core/numeric.py#L862-L870 # noqa: E501
"""
VALID = 0
SAME = 1
FULL = 2
@overload(_np_correlate_core)
def _np_correlate_core_impl(ap1, ap2, mode, direction):
a_dt = as_dtype(ap1.dtype)
b_dt = as_dtype(ap2.dtype)
dt = np.promote_types(a_dt, b_dt)
innerprod = _get_inner_prod(ap1.dtype, ap2.dtype)
Mode = _corr_conv_Mode
def impl(ap1, ap2, mode, direction):
# Implementation loosely based on `_pyarray_correlate` from
# https://github.com/numpy/numpy/blob/3bce2be74f228684ca2895ad02b63953f37e2a9d/numpy/core/src/multiarray/multiarraymodule.c#L1191 # noqa: E501
# For "Mode":
# Convolve uses 'full' by default, this is denoted by the number 2
# Correlate uses 'valid' by default, this is denoted by the number 0
# For "direction", +1 to write the return values out in order 0->N
# -1 to write them out N->0.
if not (mode == Mode.VALID or mode == Mode.FULL):
raise ValueError("Invalid mode")
n1 = len(ap1)
n2 = len(ap2)
length = n1
n = n2
if mode == Mode.VALID: # mode == valid == 0, correlate default
length = length - n + 1
n_left = 0
n_right = 0
elif mode == Mode.FULL: # mode == full == 2, convolve default
n_right = n - 1
n_left = n - 1
length = length + n - 1
else:
raise ValueError("Invalid mode")
ret = np.zeros(length, dt)
n = n - n_left
if direction == 1:
idx = 0
inc = 1
elif direction == -1:
idx = length - 1
inc = -1
else:
raise ValueError("Invalid direction")
for i in range(n_left):
ret[idx] = innerprod(ap1[:idx + 1], ap2[-(idx + 1):])
idx = idx + inc
for i in range(n1 - n2 + 1):
ret[idx] = innerprod(ap1[i : i + n2], ap2)
idx = idx + inc
for i in range(n_right, 0, -1):
ret[idx] = innerprod(ap1[-i:], ap2[:i])
idx = idx + inc
return ret
return impl
@overload(np.correlate)
def _np_correlate(a, v):
_assert_1d(a, 'np.correlate')
_assert_1d(v, 'np.correlate')
@register_jitable
def op_conj(x):
return np.conj(x)
@register_jitable
def op_nop(x):
return x
Mode = _corr_conv_Mode
if a.dtype in types.complex_domain:
if v.dtype in types.complex_domain:
a_op = op_nop
b_op = op_conj
else:
a_op = op_nop
b_op = op_nop
else:
if v.dtype in types.complex_domain:
a_op = op_nop
b_op = op_conj
else:
a_op = op_conj
b_op = op_nop
_NP_PRED = numpy_version > (1, 17)
def impl(a, v):
la = len(a)
lv = len(v)
if _NP_PRED is True:
if la == 0:
raise ValueError("'a' cannot be empty")
if lv == 0:
raise ValueError("'v' cannot be empty")
if la < lv:
return _np_correlate_core(b_op(v), a_op(a), Mode.VALID, -1)
else:
return _np_correlate_core(a_op(a), b_op(v), Mode.VALID, 1)
return impl
@overload(np.convolve)
def np_convolve(a, v):
_assert_1d(a, 'np.convolve')
_assert_1d(v, 'np.convolve')
Mode = _corr_conv_Mode
def impl(a, v):
la = len(a)
lv = len(v)
if la == 0:
raise ValueError("'a' cannot be empty")
if lv == 0:
raise ValueError("'v' cannot be empty")
if la < lv:
return _np_correlate_core(v, a[::-1], Mode.FULL, 1)
else:
return _np_correlate_core(a, v[::-1], Mode.FULL, 1)
return impl
@overload(np.asarray)
def np_asarray(a, dtype=None):
# developer note... keep this function (type_can_asarray) in sync with the
# accepted types implementations below!
if not type_can_asarray(a):
return None
impl = None
if isinstance(a, types.Array):
if is_nonelike(dtype) or a.dtype == dtype.dtype:
def impl(a, dtype=None):
return a
else:
def impl(a, dtype=None):
return a.astype(dtype)
elif isinstance(a, (types.Sequence, types.Tuple)):
# Nested lists cannot be unpacked, therefore only single lists are
# permitted and these conform to Sequence and can be unpacked along on
# the same path as Tuple.
if is_nonelike(dtype):
def impl(a, dtype=None):
return np.array(a)
else:
def impl(a, dtype=None):
return np.array(a, dtype)
elif isinstance(a, (types.Number, types.Boolean)):
dt_conv = a if is_nonelike(dtype) else dtype
ty = as_dtype(dt_conv)
def impl(a, dtype=None):
return np.array(a, ty)
elif isinstance(a, types.containers.ListType):
if not isinstance(a.dtype, (types.Number, types.Boolean)):
raise TypingError(
"asarray support for List is limited "
"to Boolean and Number types")
target_dtype = a.dtype if is_nonelike(dtype) else dtype
def impl(a, dtype=None):
l = len(a)
ret = np.empty(l, dtype=target_dtype)
for i, v in enumerate(a):
ret[i] = v
return ret
elif isinstance(a, types.StringLiteral):
arr = np.asarray(a.literal_value)
def impl(a, dtype=None):
return arr.copy()
return impl
@overload(np.asfarray)
def np_asfarray(a, dtype=np.float64):
# convert numba dtype types into NumPy dtype
if isinstance(dtype, types.Type):
dtype = as_dtype(dtype)
if not np.issubdtype(dtype, np.inexact):
dx = types.float64
else:
dx = dtype
def impl(a, dtype=np.float64):
return np.asarray(a, dx)
return impl
@overload(np.extract)
def np_extract(condition, arr):
def np_extract_impl(condition, arr):
cond = np.asarray(condition).flatten()
a = np.asarray(arr)
if a.size == 0:
raise ValueError('Cannot extract from an empty array')
# the following looks odd but replicates NumPy...
# https://github.com/numpy/numpy/issues/12859
if np.any(cond[a.size:]) and cond.size > a.size:
msg = 'condition shape inconsistent with arr shape'
raise ValueError(msg)
# NumPy raises IndexError: index 'm' is out of
# bounds for size 'n'
max_len = min(a.size, cond.size)
out = [a.flat[idx] for idx in range(max_len) if cond[idx]]
return np.array(out)
return np_extract_impl
@overload(np.select)
def np_select(condlist, choicelist, default=0):
def np_select_arr_impl(condlist, choicelist, default=0):
if len(condlist) != len(choicelist):
raise ValueError('list of cases must be same length as list '
'of conditions')
out = default * np.ones(choicelist[0].shape, choicelist[0].dtype)
# should use reversed+zip, but reversed is not available
for i in range(len(condlist) - 1, -1, -1):
cond = condlist[i]
choice = choicelist[i]
out = np.where(cond, choice, out)
return out
# first we check the types of the input parameters
if not isinstance(condlist, (types.List, types.UniTuple)):
raise NumbaTypeError('condlist must be a List or a Tuple')
if not isinstance(choicelist, (types.List, types.UniTuple)):
raise NumbaTypeError('choicelist must be a List or a Tuple')
if not isinstance(default, (int, types.Number, types.Boolean)):
raise NumbaTypeError('default must be a scalar (number or boolean)')
# the types of the parameters have been checked, now we test the types
# of the content of the parameters
# implementation note: if in the future numba's np.where accepts tuples
# as elements of condlist, then the check below should be extended to
# accept tuples
if not isinstance(condlist[0], types.Array):
raise NumbaTypeError('items of condlist must be arrays')
if not isinstance(choicelist[0], types.Array):
raise NumbaTypeError('items of choicelist must be arrays')
# the types of the parameters and their contents have been checked,
# now we test the dtypes of the content of parameters
if isinstance(condlist[0], types.Array):
if not isinstance(condlist[0].dtype, types.Boolean):
raise NumbaTypeError('condlist arrays must contain booleans')
if isinstance(condlist[0], types.UniTuple):
if not (isinstance(condlist[0], types.UniTuple)
and isinstance(condlist[0][0], types.Boolean)):
raise NumbaTypeError('condlist tuples must only contain booleans')
# the input types are correct, now we perform checks on the dimensions
if (isinstance(condlist[0], types.Array) and
condlist[0].ndim != choicelist[0].ndim):
raise NumbaTypeError('condlist and choicelist elements must have the '
'same number of dimensions')
if isinstance(condlist[0], types.Array) and condlist[0].ndim < 1:
raise NumbaTypeError('condlist arrays must be of at least dimension 1')
return np_select_arr_impl
@overload(np.asarray_chkfinite)
def np_asarray_chkfinite(a, dtype=None):
msg = "The argument to np.asarray_chkfinite must be array-like"
if not isinstance(a, (types.Array, types.Sequence, types.Tuple)):
raise TypingError(msg)
if is_nonelike(dtype):
dt = a.dtype
else:
try:
dt = as_dtype(dtype)
except NumbaNotImplementedError:
raise TypingError('dtype must be a valid Numpy dtype')
def impl(a, dtype=None):
a = np.asarray(a, dtype=dt)
for i in np.nditer(a):
if not np.isfinite(i):
raise ValueError("array must not contain infs or NaNs")
return a
return impl
#----------------------------------------------------------------------------
# Windowing functions
# - translated from the numpy implementations found in:
# https://github.com/numpy/numpy/blob/v1.16.1/numpy/lib/function_base.py#L2543-L3233 # noqa: E501
# at commit: f1c4c758e1c24881560dd8ab1e64ae750
# - and also, for NumPy >= 1.20, translated from implementations in
# https://github.com/numpy/numpy/blob/156cd054e007b05d4ac4829e10a369d19dd2b0b1/numpy/lib/function_base.py#L2655-L3065 # noqa: E501
@register_jitable
def np_bartlett_impl(M):
if numpy_version >= (1, 20):
n = np.arange(1. - M, M, 2)
return np.where(np.less_equal(n, 0), 1 + n / (M - 1), 1 - n / (M - 1))
else:
n = np.arange(M)
return np.where(np.less_equal(n, (M - 1) / 2.0), 2.0 * n / (M - 1),
2.0 - 2.0 * n / (M - 1))
@register_jitable
def np_blackman_impl(M):
if numpy_version >= (1, 20):
n = np.arange(1. - M, M, 2)
return (0.42 + 0.5 * np.cos(np.pi * n / (M - 1)) +
0.08 * np.cos(2.0 * np.pi * n / (M - 1)))
else:
n = np.arange(M)
return (0.42 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1)) +
0.08 * np.cos(4.0 * np.pi * n / (M - 1)))
@register_jitable
def np_hamming_impl(M):
if numpy_version >= (1, 20):
n = np.arange(1 - M, M, 2)
return 0.54 + 0.46 * np.cos(np.pi * n / (M - 1))
else:
n = np.arange(M)
return 0.54 - 0.46 * np.cos(2.0 * np.pi * n / (M - 1))
@register_jitable
def np_hanning_impl(M):
if numpy_version >= (1, 20):
n = np.arange(1 - M, M, 2)
return 0.5 + 0.5 * np.cos(np.pi * n / (M - 1))
else:
n = np.arange(M)
return 0.5 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1))
def window_generator(func):
def window_overload(M):
if not isinstance(M, types.Integer):
raise TypingError('M must be an integer')
def window_impl(M):
if M < 1:
return np.array((), dtype=np.float_)
if M == 1:
return np.ones(1, dtype=np.float_)
return func(M)
return window_impl
return window_overload
overload(np.bartlett)(window_generator(np_bartlett_impl))
overload(np.blackman)(window_generator(np_blackman_impl))
overload(np.hamming)(window_generator(np_hamming_impl))
overload(np.hanning)(window_generator(np_hanning_impl))
_i0A = np.array([
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1,
])
_i0B = np.array([
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1,
])
@register_jitable
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x * b1 - b2 + vals[i]
return 0.5 * (b0 - b2)
@register_jitable
def _i0(x):
if x < 0:
x = -x
if x <= 8.0:
y = (0.5 * x) - 2.0
return np.exp(x) * _chbevl(y, _i0A)
return np.exp(x) * _chbevl(32.0 / x - 2.0, _i0B) / np.sqrt(x)
@register_jitable
def _i0n(n, alpha, beta):
y = np.empty_like(n, dtype=np.float_)
t = _i0(np.float_(beta))
for i in range(len(y)):
y[i] = _i0(beta * np.sqrt(1 - ((n[i] - alpha) / alpha)**2.0)) / t
return y
@overload(np.kaiser)
def np_kaiser(M, beta):
if not isinstance(M, types.Integer):
raise TypingError('M must be an integer')
if not isinstance(beta, (types.Integer, types.Float)):
raise TypingError('beta must be an integer or float')
def np_kaiser_impl(M, beta):
if M < 1:
return np.array((), dtype=np.float_)
if M == 1:
return np.ones(1, dtype=np.float_)
n = np.arange(0, M)
alpha = (M - 1) / 2.0
return _i0n(n, alpha, beta)
return np_kaiser_impl
@register_jitable
def _cross_operation(a, b, out):
def _cross_preprocessing(x):
x0 = x[..., 0]
x1 = x[..., 1]
if x.shape[-1] == 3:
x2 = x[..., 2]
else:
x2 = np.multiply(x.dtype.type(0), x0)
return x0, x1, x2
a0, a1, a2 = _cross_preprocessing(a)
b0, b1, b2 = _cross_preprocessing(b)
cp0 = np.multiply(a1, b2) - np.multiply(a2, b1)
cp1 = np.multiply(a2, b0) - np.multiply(a0, b2)
cp2 = np.multiply(a0, b1) - np.multiply(a1, b0)
out[..., 0] = cp0
out[..., 1] = cp1
out[..., 2] = cp2
@generated_jit
def _cross_impl(a, b):
dtype = np.promote_types(as_dtype(a.dtype), as_dtype(b.dtype))
if a.ndim == 1 and b.ndim == 1:
def impl(a, b):
cp = np.empty((3,), dtype)
_cross_operation(a, b, cp)
return cp
else:
def impl(a, b):
shape = np.add(a[..., 0], b[..., 0]).shape
cp = np.empty(shape + (3,), dtype)
_cross_operation(a, b, cp)
return cp
return impl
@overload(np.cross)
def np_cross(a, b):
if not type_can_asarray(a) or not type_can_asarray(b):
raise TypingError("Inputs must be array-like.")
def impl(a, b):
a_ = np.asarray(a)
b_ = np.asarray(b)
if a_.shape[-1] not in (2, 3) or b_.shape[-1] not in (2, 3):
raise ValueError((
"Incompatible dimensions for cross product\n"
"(dimension must be 2 or 3)"
))
if a_.shape[-1] == 3 or b_.shape[-1] == 3:
return _cross_impl(a_, b_)
else:
raise ValueError((
"Dimensions for both inputs is 2.\n"
"Please replace your numpy.cross(a, b) call with "
"a call to `cross2d(a, b)` from `numba.np.extensions`."
))
return impl
@register_jitable
def _cross2d_operation(a, b):
def _cross_preprocessing(x):
x0 = x[..., 0]
x1 = x[..., 1]
return x0, x1
a0, a1 = _cross_preprocessing(a)
b0, b1 = _cross_preprocessing(b)
cp = np.multiply(a0, b1) - np.multiply(a1, b0)
# If ndim of a and b is 1, cp is a scalar.
# In this case np.cross returns a 0-D array, containing the scalar.
# np.asarray is used to reconcile this case, without introducing
# overhead in the case where cp is an actual N-D array.
# (recall that np.asarray does not copy existing arrays)
return np.asarray(cp)
@generated_jit
def cross2d(a, b):
if not type_can_asarray(a) or not type_can_asarray(b):
raise TypingError("Inputs must be array-like.")
def impl(a, b):
a_ = np.asarray(a)
b_ = np.asarray(b)
if a_.shape[-1] != 2 or b_.shape[-1] != 2:
raise ValueError((
"Incompatible dimensions for 2D cross product\n"
"(dimension must be 2 for both inputs)"
))
return _cross2d_operation(a_, b_)
return impl
|
py | 7dffefa8c90942810d5d20faa6765b144848d792 | #
# Copyright (C) 2020 Arm Mbed. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Defines the public API of the package."""
import pathlib
import logging
from typing import Dict, Any
from mbed_project.mbed_program import MbedProgram, parse_url
logger = logging.getLogger(__name__)
def clone_project(url: str, dst_path: Any = None, recursive: bool = False) -> None:
"""Clones an Mbed project from a remote repository.
Args:
url: URL of the repository to clone.
dst_path: Destination path for the repository.
recursive: Recursively clone all project dependencies.
"""
git_data = parse_url(url)
url = git_data["url"]
if not dst_path:
dst_path = pathlib.Path(git_data["dst_path"])
program = MbedProgram.from_url(url, dst_path, check_mbed_os=False)
if recursive:
program.resolve_libraries()
def initialise_project(path: pathlib.Path, create_only: bool) -> None:
"""Create a new Mbed project, optionally fetching and adding mbed-os.
Args:
path: Path to the project folder. Created if it doesn't exist.
create_only: Flag which suppreses fetching mbed-os. If the value is `False`, fetch mbed-os from the remote.
"""
program = MbedProgram.from_new(path)
if not create_only:
program.resolve_libraries()
def checkout_project_revision(path: pathlib.Path, force: bool = False) -> None:
"""Checkout a specific revision of the current Mbed project.
This function also resolves and syncs all library dependencies to the revision specified in the library reference
files.
Args:
path: Path to the Mbed project.
project_revision: Revision of the Mbed project to check out.
force: Force overwrite uncommitted changes. If False, the checkout will fail if there are uncommitted local
changes.
"""
program = MbedProgram.from_existing(path, check_mbed_os=False)
program.checkout_libraries(force=force)
if program.has_unresolved_libraries():
logger.info("Unresolved libraries detected, downloading library source code.")
program.resolve_libraries()
def get_known_libs(path: pathlib.Path) -> Dict[str, Any]:
"""List all resolved library dependencies.
This function will not resolve dependencies. This will only generate a list of resolved dependencies.
Args:
path: Path to the Mbed project.
Returns:
dictionary containing a list of known dependencies and a boolean stating whether unresolved dependencies were
detected.
"""
program = MbedProgram.from_existing(path, check_mbed_os=False)
return {"known_libs": program.list_known_library_dependencies(), "unresolved": program.has_unresolved_libraries()}
|
py | 7dfff0d9fc8227451fd373e815aaffb6748ccd2b | """ pre-flight check, will fill default configurations when needed """
def get_network_G_config(network_G, scale, crop_size):
kind_G = None
if isinstance(network_G, str):
kind_G = network_G.lower()
network_G = {}
elif isinstance(network_G, dict):
if 'which_model_G' in network_G:
which_model = 'which_model_G'
elif 'type' in network_G:
which_model = 'type'
kind_G = network_G[which_model].lower()
full_network_G = {}
full_network_G['strict'] = network_G.pop('strict', False) # True | False: whether to load the model in strict mode or not
# SR networks
if kind_G in ('rrdb_net', 'esrgan', 'evsrgan', 'esrgan-lite'):
# ESRGAN (or EVSRGAN):
full_network_G['type'] = "rrdb_net" # RRDB_net (original ESRGAN arch)
full_network_G['norm_type'] = network_G.pop('norm_type', None) # "instance" normalization, "batch" normalization or no norm
full_network_G['mode'] = network_G.pop('mode', "CNA") # CNA: conv->norm->act, NAC: norm->act->conv
if kind_G == 'esrgan-lite':
full_network_G['nf'] = network_G.pop('nf', 32) # number of filters in the first conv layer
full_network_G['nb'] = network_G.pop('nb', 12) # number of RRDB blocks
else:
full_network_G['nf'] = network_G.pop('nf', 64) # number of filters in the first conv layer
full_network_G['nb'] = network_G.pop('nb', 23) # number of RRDB blocks
full_network_G['nr'] = network_G.pop('nr', 3) # number of residual layers in each RRDB block
full_network_G['in_nc'] = network_G.pop('in_nc', 3) # num. of input image channels: 3 for RGB and 1 for grayscale
full_network_G['out_nc'] = network_G.pop('out_nc', 3) # num. of output image channels: 3 for RGB and 1 for grayscale
full_network_G['gc'] = network_G.pop('gc', 32) #
if kind_G == 'evsrgan':
full_network_G['convtype'] = network_G.pop('convtype', "Conv3D") # Conv3D for video
else:
full_network_G['convtype'] = network_G.pop('convtype', "Conv2D") # Conv2D | PartialConv2D | DeformConv2D | Conv3D
full_network_G['act_type'] = network_G.pop('net_act', None) or network_G.pop('act_type', "leakyrelu") # swish | leakyrelu
full_network_G['gaussian_noise'] = network_G.pop('gaussian', True) # add gaussian noise in the net latent # True | False
full_network_G['plus'] = network_G.pop('plus', False) # use the ESRGAN+ modifications # true | false
full_network_G['finalact'] = network_G.pop('finalact', None) # Activation function, ie use "tanh" to make outputs fit in [-1, 1] range. Default = None. Coordinate with znorm.
full_network_G['upscale'] = network_G.pop('scale', scale)
full_network_G['upsample_mode'] = network_G.pop('upsample_mode', "upconv") # the type of upsample to use
elif kind_G in ('mrrdb_net', 'mesrgan'):
# ESRGAN modified arch:
full_network_G['type'] = "mrrdb_net" # MRRDB_net (modified/"new" arch) | sr_resnet
full_network_G['in_nc'] = network_G.pop('in_nc', 3) # num. of input image channels: 3 for RGB and 1 for grayscale
full_network_G['out_nc'] = network_G.pop('out_nc', 3) # num. of output image channels: 3 for RGB and 1 for grayscale
full_network_G['nf'] = network_G.pop('nf', 64) # number of filters in the first conv layer
full_network_G['nb'] = network_G.pop('nb', 23) # number of RRDB blocks
full_network_G['gc'] = network_G.pop('gc', 32) #
elif 'ppon' in kind_G:
# PPON:
full_network_G['type'] = "ppon" # RRDB_net (original ESRGAN arch)
full_network_G['in_nc'] = network_G.pop('in_nc', 3) # num. of input image channels: 3 for RGB and 1 for grayscale
full_network_G['out_nc'] = network_G.pop('out_nc', 3) # num. of output image channels: 3 for RGB and 1 for grayscale
full_network_G['nf'] = network_G.pop('nf', 64) # number of filters in the first conv layer
full_network_G['nb'] = network_G.pop('nb', 24) # number of RRDB blocks
full_network_G['upscale'] = network_G.pop('scale', scale)
full_network_G['act_type'] = network_G.pop('net_act', None) or network_G.pop('act_type', "leakyrelu") # swish | leakyrelu
elif 'asr_cnn' in kind_G:
full_network_G['type'] = "asr_cnn" # ASRCNN
full_network_G['upscale_factor'] = network_G.pop('scale', scale)
full_network_G['spectral_norm'] = network_G.pop('spectral_norm', True)
full_network_G['self_attention'] = network_G.pop('self_attention', True)
full_network_G['spectral_norm'] = network_G.pop('spectral_norm', True)
full_network_G['max_pool'] = network_G.pop('max_pool', True)
full_network_G['poolsize'] = network_G.pop('poolsize', 4)
full_network_G['finalact'] = network_G.pop('finalact', 'tanh')
elif 'asr_resnet' in kind_G:
full_network_G['type'] = "asr_resnet" # ASRResNet
full_network_G['scale_factor'] = network_G.pop('scale', scale)
full_network_G['spectral_norm'] = network_G.pop('spectral_norm', True)
full_network_G['self_attention'] = network_G.pop('self_attention', True)
full_network_G['spectral_norm'] = network_G.pop('spectral_norm', True)
full_network_G['max_pool'] = network_G.pop('max_pool', True)
full_network_G['poolsize'] = network_G.pop('poolsize', 4)
elif kind_G in ('sr_resnet', 'srresnet', 'srgan'):
# SRGAN:
full_network_G['type'] = "sr_resnet" # SRResNet
full_network_G['in_nc'] = network_G.pop('in_nc', 3) # num. of input image channels: 3 for RGB and 1 for grayscale
full_network_G['out_nc'] = network_G.pop('out_nc', 3) # num. of output image channels: 3 for RGB and 1 for grayscale
full_network_G['nf'] = network_G.pop('nf', 64) # number of filters in the first conv layer
full_network_G['nb'] = network_G.pop('nb', 16) # number of RRDB blocks
full_network_G['upscale'] = network_G.pop('scale', scale)
full_network_G['norm_type'] = network_G.pop('norm_type', None) # "instance" normalization, "batch" normalization or no norm
full_network_G['act_type'] = network_G.pop('net_act', None) or network_G.pop('act_type', "relu") # swish | relu | leakyrelu
full_network_G['mode'] = network_G.pop('mode', "CNA") # CNA: conv->norm->act, NAC: norm->act->conv
full_network_G['upsample_mode'] = network_G.pop('upsample_mode', "pixelshuffle") # the type of upsample to use
full_network_G['convtype'] = network_G.pop('convtype', "Conv2D") # Conv2D | PartialConv2D | DeformConv2D | Conv3D
full_network_G['finalact'] = network_G.pop('finalact', None) # Activation function, ie use "tanh" to make outputs fit in [-1, 1] range. Default = None. Coordinate with znorm.
full_network_G['res_scale'] = network_G.pop('res_scale', 1)
#TODO: msrresnet
elif kind_G in ('sft_arch', 'sft_net'):
full_network_G['type'] = "sft_arch" # SFT-GAN
elif kind_G in ('pan_net', 'pan'):
# PAN:
full_network_G['type'] = "pan_net" # PAN_net
full_network_G['in_nc'] = network_G.pop('in_nc', 3) # num. of input image channels: 3 for RGB and 1 for grayscale
full_network_G['out_nc'] = network_G.pop('out_nc', 3) # num. of output image channels: 3 for RGB and 1 for grayscale
full_network_G['nf'] = network_G.pop('nf', 40) # number of filters in each conv layer
full_network_G['unf'] = network_G.pop('unf', 24) # number of filters during upscale
full_network_G['nb'] = network_G.pop('nb', 16) # number of blocks
full_network_G['scale'] = network_G.pop('scale', scale)
full_network_G['self_attention'] = network_G.pop('self_attention', False)
full_network_G['double_scpa'] = network_G.pop('double_scpa', False)
full_network_G['ups_inter_mode'] = network_G.pop('ups_inter_mode', "nearest")
elif kind_G in ('abpn_net', 'abpn'):
full_network_G['type'] = "abpn_net" # ABPN_net
full_network_G['input_dim'] = network_G.pop('in_nc', None) or network_G.pop('input_dim', 3) # num. of input image channels: 3 for RGB and 1 for grayscale
full_network_G['dim'] = network_G.pop('dim', 32)
# SRFlow
elif kind_G in ('srflow_net', 'srflow'):
# SRFLOW:
full_network_G['type'] = "srflow_net" # SRFlow_net
full_network_G['in_nc'] = network_G.pop('in_nc', 3) # num. of input image channels: 3 for RGB and 1 for grayscale
full_network_G['out_nc'] = network_G.pop('out_nc', 3) # num. of output image channels: 3 for RGB and 1 for grayscale
full_network_G['nf'] = network_G.pop('nf', 64) # number of filters in the first conv layer
full_network_G['nb'] = network_G.pop('nb', 23) # number of RRDB blocks
full_network_G['gc'] = network_G.pop('gc', 32) #
full_network_G['scale'] = network_G.pop('scale', scale)
full_network_G['upscale'] = full_network_G['scale']
flow_config = network_G['flow'] if ('flow' in network_G) else {}
full_network_G['K'] = flow_config.pop('K', 16) if flow_config else 16
#Note: the network also needs opt and step, below options not used as network parameters, but for GLOW
full_network_G['train_RRDB'] = network_G.pop('train_RRDB', False) # if RRDB network will be trained
full_network_G['train_RRDB_delay'] = network_G.pop('train_RRDB_delay', 0.5) # at what % of training will RRDB start training
full_network_G['flow'] = {}
full_network_G['flow']['K'] = flow_config.pop('K', 16)
full_network_G['flow']['L'] = flow_config.pop('L', 3)
full_network_G['flow']['noInitialInj'] = flow_config.pop('noInitialInj', True)
full_network_G['flow']['coupling'] = flow_config.pop('coupling', "CondAffineSeparatedAndCond")
full_network_G['flow']['additionalFlowNoAffine'] = flow_config.pop('additionalFlowNoAffine', 2)
full_network_G['flow']['fea_up0'] = flow_config.pop('fea_up0', True)
if 'split' in flow_config:
full_network_G['flow']['split'] = {
"enable": flow_config['split'].pop('enable', True)}
else:
full_network_G['flow']['split'] = {
"enable": True}
if 'augmentation' in flow_config:
full_network_G['flow']['augmentation'] = {
"noiseQuant": flow_config['augmentation'].pop('noiseQuant', True)}
else:
full_network_G['flow']['augmentation'] = {
"noiseQuant": True}
if 'stackRRDB' in flow_config:
full_network_G['flow']['stackRRDB'] = {
"blocks": flow_config['stackRRDB'].pop('blocks', [ 1, 8, 15, 22 ]),
"concat": flow_config['stackRRDB'].pop('concat', True)}
else:
full_network_G['flow']['stackRRDB'] = {
"blocks": [ 1, 8, 15, 22 ],
"concat": True}
# image to image translation
elif 'wbcunet' in kind_G:
# WBC
full_network_G['type'] = "wbcunet_net"
full_network_G['nf'] = network_G.pop('nf', 32)
if 'tf' in kind_G:
full_network_G['mode'] = 'tf'
else:
full_network_G['mode'] = network_G.pop('mode', 'pt')
elif 'unet' in kind_G:
# UNET:
full_network_G['type'] = "unet_net"
full_network_G['input_nc'] = network_G.pop('in_nc', 3) # # of input image channels: 3 for RGB and 1 for grayscale
full_network_G['output_nc'] = network_G.pop('out_nc', 3) # # of output image channels: 3 for RGB and 1 for grayscale
if kind_G == 'unet_128':
full_network_G['num_downs'] = network_G.pop('num_downs', 7) # for 'unet_128' (for 128x128 input images)
elif kind_G == 'unet_256':
full_network_G['num_downs'] = network_G.pop('num_downs', 8) # for 'unet_256' (for 256x256 input images)
else:
full_network_G['num_downs'] = network_G.pop('num_downs', 8) #7 for 'unet_128' (for 128x128 input images) | 8 for 'unet_256' (for 256x256 input images)
# check valid crop size for UNET
if full_network_G['num_downs'] == 7:
assert crop_size == 128, f'Invalid crop size {crop_size} for UNET config, must be 128'
elif full_network_G['num_downs'] == 8:
assert crop_size == 256, f'Invalid crop size {crop_size} for UNET config, must be 256'
elif full_network_G['num_downs'] == 9:
assert crop_size == 512, f'Invalid crop size {crop_size} for UNET config, must be 512'
full_network_G['ngf'] = network_G.pop('ngf', 64) # # of gen filters in the last conv layer
full_network_G['norm_type'] = network_G.pop('norm_type', "batch") # "instance" normalization or "batch" normalization
full_network_G['use_dropout'] = network_G.pop('use_dropout', False) # whether to use dropout or not
#TODO: add:
# full_network_G['dropout_prob'] = network_G.pop('dropout_prob', 0.5) # the default dropout probability
full_network_G['upsample_mode'] = network_G.pop('upsample_mode', "deconv") # deconv | upconv # the type of upsample to use, deconvolution or upsample+convolution
elif 'resnet' in kind_G and kind_G != 'sr_resnet':
# RESNET:
full_network_G['type'] = "resnet_net"
full_network_G['input_nc'] = network_G.pop('in_nc', 3) # # of input image channels: 3 for RGB and 1 for grayscale
full_network_G['output_nc'] = network_G.pop('out_nc', 3) # # of output image channels: 3 for RGB and 1 for grayscale
if kind_G == 'resnet_6blocks':
full_network_G['n_blocks'] = network_G.pop('n_blocks', 6) # 6 for resnet_6blocks (with 6 Resnet blocks) and
elif kind_G == 'resnet_9blocks':
full_network_G['n_blocks'] = network_G.pop('n_blocks', 9) # 9 for resnet_9blocks (with 9 Resnet blocks)
else:
full_network_G['n_blocks'] = network_G.pop('n_blocks', 9) # 6 for resnet_6blocks (with 6 Resnet blocks) and 9 for resnet_9blocks (with 9 Resnet blocks)
full_network_G['ngf'] = network_G.pop('ngf', 64) # num. of gen filters in the last conv layer
full_network_G['norm_type'] = network_G.pop('norm_type', "instance") # "instance" normalization or "batch" normalization
full_network_G['use_dropout'] = network_G.pop('use_dropout', False) # whether to use dropout or not
#TODO: add:
# full_network_G['dropout_prob'] = network_G.pop('dropout_prob', 0.5) # the default dropout probability
full_network_G['upsample_mode'] = network_G.pop('upsample_mode', "deconv") # deconv | upconv # the type of upsample to use, deconvolution or upsample+convolution
full_network_G['padding_type'] = network_G.pop('padding_type', "reflect")
# video networks
elif kind_G in ('sofvsr_net', 'sofvsr'):
full_network_G['type'] = "sofvsr_net" # RRDB_net (original ESRGAN arch)
full_network_G['n_frames'] = network_G.pop('n_frames', 3) # number of frames the network will use to estimate the central frame (n-1)/2. Must coincide with "num_frames" in the dataset.
full_network_G['channels'] = network_G.pop('channels', 320) # feature extraction layer with 320 kernels of size 3 × 3
full_network_G['scale'] = network_G.pop('scale', scale)
full_network_G['img_ch'] = network_G.pop('in_nc', 3) or network_G.pop('img_ch', 3) # num. of input image channels: 3 for RGB and 1 for grayscale
# for SR network:
full_network_G['SR_net'] = network_G.pop('SR_net', "rrdb") # sofvsr | rrdb | pan
full_network_G['sr_nf'] = network_G.pop('sr_nf', 64) # for rrdb or pan # number of filters in the first conv layer
full_network_G['sr_nb'] = network_G.pop('sr_nb', 23) # for rrdb or pan # number of RRDB blocks
full_network_G['sr_gc'] = network_G.pop('sr_gc', 32) # for rrdb
full_network_G['sr_unf'] = network_G.pop('sr_unf', 24) # for pan # number of filters during upscale
full_network_G['sr_gaussian_noise'] = network_G.pop('sr_gaussian_noise', True) # for rrdb # add gaussian noise in the net latent # True | False
full_network_G['sr_plus'] = network_G.pop('sr_plus', False) # for rrdb # use the ESRGAN+ modifications # true | false
full_network_G['sr_sa'] = network_G.pop('sr_sa', True) # for pan # self_attention
full_network_G['sr_upinter_mode'] = network_G.pop('sr_upinter_mode', "nearest") # for pan
# unused options for RRDB:
# full_network_G['sr_norm_type'] = network_G.pop('sr_norm_type', None) # "instance" normalization, "batch" normalization or no norm
# full_network_G['sr_mode'] = network_G.pop('sr_mode', "CNA") # CNA: conv->norm->act, NAC: norm->act->conv
# full_network_G['sr_nr'] = network_G.pop('sr_nr', 3) # number of residual layers in each RRDB block
# full_network_G['sr_out_nc'] = network_G.pop('sr_out_nc', 3) # num. of output image channels: 3 for RGB and 1 for grayscale
# full_network_G['sr_group'] = network_G.pop('sr_group', 1) #
# full_network_G['sr_convtype'] = network_G.pop('sr_convtype', "Conv2D") # Conv2D | PartialConv2D | DeformConv2D | Conv3D
# full_network_G['sr_act_type'] = network_G.pop('sr_net_act', None) or network_G.pop('sr_act_type', "leakyrelu") # swish | leakyrelu
# full_network_G['sr_finalact'] = network_G.pop('sr_finalact', None) # Activation function, ie use "tanh" to make outputs fit in [-1, 1] range. Default = None. Coordinate with znorm.
# full_network_G['sr_upsample_mode'] = network_G.pop('sr_upsample_mode', "upconv") # the type of upsample to use
elif kind_G in ('sr3d_net', 'sr3d'):
# SR3D:
full_network_G['type'] = "sr3d_net" # SR3DNet
full_network_G['in_nc'] = network_G.pop('in_nc', 3) # num. of input image channels: 3 for RGB and 1 for grayscale
full_network_G['out_nc'] = network_G.pop('out_nc', 3) # num. of output image channels: 3 for RGB and 1 for grayscale
full_network_G['nf'] = network_G.pop('nf', 64) # number of filters in the conv layers
full_network_G['nb'] = network_G.pop('nb', 23) # number of Conv3D blocks
full_network_G['scale'] = network_G.pop('scale', scale)
full_network_G['n_frames'] = network_G.pop('n_frames', 5) # number of frames the network will use to estimate the central frame (n-1)/2. Must coincide with "num_frames" in the dataset.
elif kind_G in ('edvr_net', 'edvr'):
# EDVR:
full_network_G['type'] = "edvr_net" # EDVR
full_network_G['num_in_ch'] = network_G.pop('in_nc', 3) # num. of input image channels: 3 for RGB and 1 for grayscale
full_network_G['num_out_ch'] = network_G.pop('out_nc', 3) # num. of output image channels: 3 for RGB and 1 for grayscale
full_network_G['num_feat'] = network_G.pop('nf', 64) # number of features (M=64, L=128)
full_network_G['num_frame'] = network_G.pop('n_frames', 5) # number of frames the network will use to estimate the central frame (n-1)/2. Must coincide with "num_frames" in the dataset.
full_network_G['upscale'] = network_G.pop('scale', scale)
full_network_G['deformable_groups'] = network_G.pop('deformable_groups', 8) # number of deformable offset groups in the deformable layers
full_network_G['num_extract_block'] = network_G.pop('n_extract_block', 5) # number of extract blocks
full_network_G['num_reconstruct_block'] = network_G.pop('n_reconstruct_block', 10) # number of reconstruction blocks (M=10, L=40)
full_network_G['center_frame_idx'] = network_G.pop('center_frame_idx', None) # fix center frame, if None will use num_frame // 2
full_network_G['with_predeblur'] = network_G.pop('predeblur', False) # use pre-deblur
full_network_G['with_tsa'] = network_G.pop('tsa', True) # use Temporal Spatial Attention
full_network_G['upsample_mode'] = network_G.pop('upsample_mode', "pixelshuffle") # pixelshuffle | upconv
full_network_G['add_rrdb'] = network_G.pop('add_rrdb', False) # adds RRDB blocks before upsample step to improve SR
full_network_G['nb'] = network_G.pop('nb', 23) # number of blocks, only applies to add_rrdb's RRDB blocks
elif kind_G in ('rife_net', 'rife'):
full_network_G['type'] = "rife_net" # RIFE
elif kind_G == 'dvd_net':
full_network_G['type'] = "dvd_net" # DVD
full_network_G['in_nc'] = network_G.pop('in_nc', 3) # num. of input image channels: 3 for RGB and 1 for grayscale
full_network_G['out_nc'] = network_G.pop('out_nc', 3) # num. of output image channels: 3 for RGB and 1 for grayscale
full_network_G['nf'] = network_G.pop('nf', 64) # number of filters in the conv layers
else:
raise NotImplementedError(f'Generator model [{kind_G:s}] not recognized')
#TODO: check if any options in network_G went unprocessed
if bool(network_G):
print(network_G)
return full_network_G
def get_network_D_config(network_D, scale, crop_size, model_G):
# Note: in PPON they used 100 features in the linear classifier for
# VGG-like discriminator instead of 128. Not important, but can review.
if model_G == 'ppon':
model_G = 'PPON'
else:
model_G = 'ESRGAN'
kind_D = None
if isinstance(network_D, str):
kind_D = network_D.lower()
network_D = {}
elif isinstance(network_D, dict):
if 'which_model_D' in network_D:
which_model = 'which_model_D'
elif 'type' in network_D:
which_model = 'type'
kind_D = network_D[which_model].lower()
full_network_D = {}
full_network_D['strict'] = network_D.pop('strict', True) # True | False: whether to load the model in strict mode or not
if kind_D == 'dis_acd':
# sft-gan, Auxiliary Classifier Discriminator
full_network_D['type'] = network_D.pop('type', "dis_acd")
elif kind_D == 'discriminator_vgg_128_sn':
# TODO: will be replaced by regular discriminator_vgg with optional spectral norm
full_network_D['type'] = network_D.pop('type', "discriminator_vgg_128_SN")
elif kind_D in ('adiscriminator', 'adiscriminator_s'):
# TODO: replace with discriminator_vgg_fea
full_network_D['type'] = network_D.pop('type', "adiscriminator")
full_network_D['spectral_norm'] = network_D.pop('spectral_norm', True)
full_network_D['self_attention'] = network_D.pop('self_attention', True)
full_network_D['max_pool'] = network_D.pop('max_pool', False)
full_network_D['poolsize'] = network_D.pop('poolsize', 4)
elif 'discriminator_vgg_' in kind_D or kind_D in ['discriminator_192', 'discriminator_256', 'discriminator_vgg']:
# 'discriminator_vgg_96', 'discriminator_vgg_128', 'discriminator_vgg_192' or 'discriminator_192', 'discriminator_vgg_256' or 'discriminator_256'
full_network_D['type'] = network_D.pop('type', kind_D) # VGG-like discriminator ("discriminator_vgg")
full_network_D['in_nc'] = network_D.pop('in_nc', 3) # num. of input image channels: 3 for RGB and 1 for grayscale
full_network_D['base_nf'] = network_D.pop('nf', 64) # num. of features in conv layers
full_network_D['norm_type'] = network_D.pop('norm_type', "batch") # "instance" normalization, "batch" normalization or no norm
full_network_D['mode'] = network_D.pop('mode', "CNA") # CNA: conv->norm->act, NAC: norm->act->conv
full_network_D['act_type'] = network_D.pop('net_act', None) or network_D.pop('act_type', "leakyrelu") # swish | leakyrelu
full_network_D['convtype'] = network_D.pop('convtype', "Conv2D")
full_network_D['arch'] = network_D.pop('G_arch', model_G)
if "_fea" in kind_D:
# feature extraction/maching: 'discriminator_vgg_128_fea', 'discriminator_vgg_fea'
# TODO: these options are not currently enabled in the networks
full_network_D['spectral_norm'] = network_D.pop('spectral_norm', False)
full_network_D['self_attention'] = network_D.pop('self_attention', False)
full_network_D['max_pool'] = network_D.pop('max_pool', False)
full_network_D['poolsize'] = network_D.pop('poolsize', 4)
if kind_D == 'discriminator_vgg' or kind_D == 'discriminator_vgg_fea':
full_network_D['size'] = network_D.pop('D_size', crop_size)
elif kind_D in ['patchgan', 'nlayerdiscriminator', 'multiscale', 'pixelgan', 'pixeldiscriminator']:
if kind_D in ('patchgan', 'nlayerdiscriminator'):
full_network_D['type'] = 'patchgan'
elif kind_D == 'multiscale':
full_network_D['type'] = 'multiscale'
elif kind_D in ('pixelgan', 'pixeldiscriminator'):
full_network_D['type'] = 'pixelgan'
full_network_D['input_nc'] = network_D.pop('in_nc', 3) # num. of input image channels: 3 for RGB and 1 for grayscale
full_network_D['ndf'] = network_D.pop('nf', 64) # num. of features in conv layers
if kind_D in ['patchgan', 'nlayerdiscriminator', 'multiscale']:
full_network_D['n_layers'] = network_D.pop('n_layers', None) or network_D.pop('nlayer', 3)
if kind_D in ['patchgan'] or kind_D in ['nlayerdiscriminator']:
full_network_D['patch'] = network_D.pop('patch_output', True) # discriminator will return full result as image patch
full_network_D['use_spectral_norm'] = network_D.pop('spectral_norm', None) or network_D.pop('use_spectral_norm', False)
if kind_D == 'multiscale':
full_network_D['num_D'] = network_D.pop('num_D', 3) # number of discriminators (scales)
elif 'unet' in kind_D:
full_network_D['type'] = 'unet'
full_network_D['input_nc'] = network_D.pop('in_nc', 3)
full_network_D['nf'] = network_D.pop('nf', 64)
full_network_D['skip_connection'] = network_D.pop('skip_connection', True)
else:
raise NotImplementedError(f'Discriminator model [{kind_D:s}] not recognized')
#TODO: add check for vgg_# to validate the crop size matches the discriminator patch size
# with: vgg_size = kind[18:] and int(vgg_size)
#TODO: check if any options in network_D went unprocessed
if bool(network_D):
print(network_D)
return full_network_D
def get_network_defaults(opt, is_train):
scale = opt.get('scale', 1)
if is_train:
crop_size = int(opt['datasets']['train']['crop_size'])
else:
crop_size = opt.get('img_size')
#TODO: could check dataset type to match model, not needed
#TODO: can check model type and validate networks (sr, video, i2i, etc)
# network_G:
network_G = opt.pop('network_G', None)
network_G = get_network_G_config(network_G, scale, crop_size)
model_G = network_G['type']
opt['network_G'] = network_G
# network_D:
# fetch crop_size (if HR_size used, crop_size should have been injected already)
# Note: VGG Discriminator image patch size should be either a power of 2 number or 3 multiplied by a power of 2.
if opt.get('network_D', None):
network_D = opt.pop('network_D', None)
network_D = get_network_D_config(network_D, scale, crop_size, model_G)
opt['network_D'] = network_D
# opt.update(network_D)
return opt
def main():
from options import NoneDict, dict_to_nonedict
opt = {}
opt['network_G'] = 'ESRGAN'
opt['network_D'] = 'patchgan'
opt['datasets'] = {}
opt['datasets']['train'] = {}
opt['datasets']['train']['crop_size'] = 128
opt = dict_to_nonedict(opt)
opt = get_network_defaults(opt)
print(opt)
if __name__ == '__main__':
main()
|
py | 7dfff1372a0999ac01252f39a5cc0e2e668d5d90 | import os
import requests
from twilio.rest import Client
from datetime import datetime
from dateutil import tz
from config import Config
account_sid = Config.account_sid
auth_token = Config.auth_token
cells = Config.cells
twilio_number = Config.twilio_number
def send_sms(cell, msg):
client = Client(account_sid, auth_token)
message = client.messages.create(body=msg, from_=twilio_number, to=cell)
print(message.status)
def get_launches():
RLL_url = "https://fdo.rocketlaunch.live/json/launches/next/5"
response = requests.get(url=RLL_url)
response.raise_for_status()
return response.json()["result"]
def utc_to_est(utc_string):
"""returns a 12h formatted local time from a UTC data time string"""
from_zone = tz.gettz("UTC")
to_zone = tz.gettz("America/New_York")
utc = datetime.strptime(utc_string, "%Y-%m-%dT%H:%MZ")
utc = utc.replace(tzinfo=from_zone)
est = utc.astimezone(to_zone)
est_12h = est.strftime("%I:%M %p")
return est_12h
if __name__ == "__main__":
launch_data = get_launches()
va_lauches = [
l for _, l in enumerate(launch_data) if l["pad"]["location"]["id"] == 88
]
msgs = []
for l in va_lauches:
est = utc_to_est(l["win_open"])
msgs.append(f"{l['launch_description']} {est} (EST)")
for msg in msgs:
for cell in cells:
send_sms(cell, msg)
|
py | 7dfff35095d96f17610b54b259a00002a0ed4f41 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: visual.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from . import pose_pb2 as pose__pb2
from . import geometry_pb2 as geometry__pb2
from . import material_pb2 as material__pb2
from . import plugin_pb2 as plugin__pb2
from . import vector3d_pb2 as vector3d__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='visual.proto',
package='gazebo.msgs',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n\x0cvisual.proto\x12\x0bgazebo.msgs\x1a\npose.proto\x1a\x0egeometry.proto\x1a\x0ematerial.proto\x1a\x0cplugin.proto\x1a\x0evector3d.proto\"\xcd\x04\n\x06Visual\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\n\n\x02id\x18\x02 \x01(\r\x12\x13\n\x0bparent_name\x18\x03 \x02(\t\x12\x11\n\tparent_id\x18\x04 \x01(\r\x12\x14\n\x0c\x63\x61st_shadows\x18\x05 \x01(\x08\x12\x14\n\x0ctransparency\x18\x06 \x01(\x01\x12\x13\n\x0blaser_retro\x18\x07 \x01(\x01\x12\x1f\n\x04pose\x18\x08 \x01(\x0b\x32\x11.gazebo.msgs.Pose\x12\'\n\x08geometry\x18\t \x01(\x0b\x32\x15.gazebo.msgs.Geometry\x12\'\n\x08material\x18\n \x01(\x0b\x32\x15.gazebo.msgs.Material\x12\x0f\n\x07visible\x18\x0b \x01(\x08\x12\x11\n\tdelete_me\x18\x0c \x01(\x08\x12\x11\n\tis_static\x18\r \x01(\x08\x12#\n\x06plugin\x18\x0e \x03(\x0b\x32\x13.gazebo.msgs.Plugin\x12$\n\x05scale\x18\x0f \x01(\x0b\x32\x15.gazebo.msgs.Vector3d\x12&\n\x04meta\x18\x10 \x01(\x0b\x32\x18.gazebo.msgs.Visual.Meta\x12&\n\x04type\x18\x11 \x01(\x0e\x32\x18.gazebo.msgs.Visual.Type\x1a\x15\n\x04Meta\x12\r\n\x05layer\x18\x01 \x01(\x05\"d\n\x04Type\x12\n\n\x06\x45NTITY\x10\x00\x12\t\n\x05MODEL\x10\x01\x12\x08\n\x04LINK\x10\x02\x12\n\n\x06VISUAL\x10\x03\x12\r\n\tCOLLISION\x10\x04\x12\n\n\x06SENSOR\x10\x05\x12\x07\n\x03GUI\x10\x06\x12\x0b\n\x07PHYSICS\x10\x07')
,
dependencies=[pose__pb2.DESCRIPTOR,geometry__pb2.DESCRIPTOR,material__pb2.DESCRIPTOR,plugin__pb2.DESCRIPTOR,vector3d__pb2.DESCRIPTOR,])
_VISUAL_TYPE = _descriptor.EnumDescriptor(
name='Type',
full_name='gazebo.msgs.Visual.Type',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ENTITY', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MODEL', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LINK', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VISUAL', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COLLISION', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SENSOR', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GUI', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PHYSICS', index=7, number=7,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=593,
serialized_end=693,
)
_sym_db.RegisterEnumDescriptor(_VISUAL_TYPE)
_VISUAL_META = _descriptor.Descriptor(
name='Meta',
full_name='gazebo.msgs.Visual.Meta',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='layer', full_name='gazebo.msgs.Visual.Meta.layer', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=570,
serialized_end=591,
)
_VISUAL = _descriptor.Descriptor(
name='Visual',
full_name='gazebo.msgs.Visual',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='gazebo.msgs.Visual.name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='gazebo.msgs.Visual.id', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parent_name', full_name='gazebo.msgs.Visual.parent_name', index=2,
number=3, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parent_id', full_name='gazebo.msgs.Visual.parent_id', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cast_shadows', full_name='gazebo.msgs.Visual.cast_shadows', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='transparency', full_name='gazebo.msgs.Visual.transparency', index=5,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='laser_retro', full_name='gazebo.msgs.Visual.laser_retro', index=6,
number=7, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pose', full_name='gazebo.msgs.Visual.pose', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='geometry', full_name='gazebo.msgs.Visual.geometry', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='material', full_name='gazebo.msgs.Visual.material', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='visible', full_name='gazebo.msgs.Visual.visible', index=10,
number=11, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='delete_me', full_name='gazebo.msgs.Visual.delete_me', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_static', full_name='gazebo.msgs.Visual.is_static', index=12,
number=13, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='plugin', full_name='gazebo.msgs.Visual.plugin', index=13,
number=14, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scale', full_name='gazebo.msgs.Visual.scale', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='meta', full_name='gazebo.msgs.Visual.meta', index=15,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='gazebo.msgs.Visual.type', index=16,
number=17, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_VISUAL_META, ],
enum_types=[
_VISUAL_TYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=104,
serialized_end=693,
)
_VISUAL_META.containing_type = _VISUAL
_VISUAL.fields_by_name['pose'].message_type = pose__pb2._POSE
_VISUAL.fields_by_name['geometry'].message_type = geometry__pb2._GEOMETRY
_VISUAL.fields_by_name['material'].message_type = material__pb2._MATERIAL
_VISUAL.fields_by_name['plugin'].message_type = plugin__pb2._PLUGIN
_VISUAL.fields_by_name['scale'].message_type = vector3d__pb2._VECTOR3D
_VISUAL.fields_by_name['meta'].message_type = _VISUAL_META
_VISUAL.fields_by_name['type'].enum_type = _VISUAL_TYPE
_VISUAL_TYPE.containing_type = _VISUAL
DESCRIPTOR.message_types_by_name['Visual'] = _VISUAL
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Visual = _reflection.GeneratedProtocolMessageType('Visual', (_message.Message,), dict(
Meta = _reflection.GeneratedProtocolMessageType('Meta', (_message.Message,), dict(
DESCRIPTOR = _VISUAL_META,
__module__ = 'visual_pb2'
# @@protoc_insertion_point(class_scope:gazebo.msgs.Visual.Meta)
))
,
DESCRIPTOR = _VISUAL,
__module__ = 'visual_pb2'
# @@protoc_insertion_point(class_scope:gazebo.msgs.Visual)
))
_sym_db.RegisterMessage(Visual)
_sym_db.RegisterMessage(Visual.Meta)
# @@protoc_insertion_point(module_scope)
|
py | 7dfff585b4ed0d95db7ef2a9ec444960fa2f352f | # Copyright 2018 MLBenchmark Group. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for calculating loss, accuracy, and other model metrics.
Metrics:
- Padded loss, accuracy, and negative log perplexity. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/metrics.py
- BLEU approximation. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py
- ROUGE score. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/rouge.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
import six
import tensorflow as tf
from six.moves import xrange
def _pad_tensors_to_same_length(x, y):
"""Pad x and y so that the results have the same length (second dimension)."""
with tf.name_scope("pad_to_same_length"):
x_length = tf.shape(x)[1]
y_length = tf.shape(y)[1]
max_length = tf.maximum(x_length, y_length)
x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]])
y = tf.pad(y, [[0, 0], [0, max_length - y_length]])
return x, y
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
"""Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns a float32 tensor with shape
[batch_size, max(length_logits, length_labels)]
"""
with tf.name_scope("loss", [logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
# Calculate smoothing cross entropy
with tf.name_scope("smoothing_cross_entropy", [logits, labels]):
confidence = 1.0 - smoothing
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant = -(
confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
xentropy -= normalizing_constant
weights = tf.to_float(tf.not_equal(labels, 0))
return xentropy * weights, weights
def _convert_to_eval_metric(metric_fn):
"""Wrap a metric fn that returns scores and weights as an eval metric fn.
The input metric_fn returns values for the current batch. The wrapper
aggregates the return values collected over all of the batches evaluated.
Args:
metric_fn: function that returns scores and weights for the current batch's
logits and predicted labels.
Returns:
function that aggregates the scores and weights from metric_fn.
"""
def problem_metric_fn(*args):
"""Returns an aggregation of the metric_fn's returned values."""
(scores, weights) = metric_fn(*args)
# The tf.metrics.mean function assures correct aggregation.
return tf.metrics.mean(scores, weights)
return problem_metric_fn
def get_eval_metrics(logits, labels, params):
"""Return dictionary of model evaluation metrics."""
metrics = {
"accuracy": _convert_to_eval_metric(padded_accuracy)(logits, labels),
"accuracy_top5": _convert_to_eval_metric(padded_accuracy_top5)(
logits, labels),
"accuracy_per_sequence": _convert_to_eval_metric(
padded_sequence_accuracy)(logits, labels),
"neg_log_perplexity": _convert_to_eval_metric(padded_neg_log_perplexity)(
logits, labels, params.vocab_size),
"approx_bleu_score": _convert_to_eval_metric(bleu_score)(logits, labels),
"rouge_2_fscore": _convert_to_eval_metric(rouge_2_fscore)(logits, labels),
"rouge_L_fscore": _convert_to_eval_metric(rouge_l_fscore)(logits, labels),
}
# Prefix each of the metric names with "metrics/". This allows the metric
# graphs to display under the "metrics" category in TensorBoard.
metrics = {"metrics/%s" % k: v for k, v in six.iteritems(metrics)}
return metrics
def padded_accuracy(logits, labels):
"""Percentage of times that predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
return tf.to_float(tf.equal(outputs, padded_labels)), weights
def padded_accuracy_topk(logits, labels, k):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy_topk", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
effective_k = tf.minimum(k, tf.shape(logits)[-1])
_, outputs = tf.nn.top_k(logits, k=effective_k)
outputs = tf.to_int32(outputs)
padded_labels = tf.to_int32(labels)
padded_labels = tf.expand_dims(padded_labels, axis=-1)
padded_labels += tf.zeros_like(outputs) # Pad to same shape.
same = tf.to_float(tf.equal(outputs, padded_labels))
same_topk = tf.reduce_sum(same, axis=-1)
return same_topk, weights
def padded_accuracy_top5(logits, labels):
return padded_accuracy_topk(logits, labels, 5)
def padded_sequence_accuracy(logits, labels):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
with tf.variable_scope("padded_sequence_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
not_correct = tf.to_float(tf.not_equal(
outputs, padded_labels)) * weights
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - \
tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0)
def padded_neg_log_perplexity(logits, labels, vocab_size):
"""Average log-perplexity excluding padding 0s. No smoothing."""
num, den = padded_cross_entropy_loss(logits, labels, 0, vocab_size)
return -num, den
def bleu_score(logits, labels):
"""Approximate BLEU score computation between labels and predictions.
An approximate BLEU scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4
and use brevity penalty. Also, this does not have beam search.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch-size, length_labels]
Returns:
bleu: int, approx bleu score
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32)
return bleu, tf.constant(1.0)
def _get_ngrams_with_counter(segment, max_order):
"""Extracts all n-grams up to a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in xrange(1, max_order + 1):
for i in xrange(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
use_bp=True):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
use_bp: boolean, whether to apply brevity penalty.
Returns:
BLEU score.
"""
reference_length = 0
translation_length = 0
bp = 1.0
geo_mean = 0
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
precisions = []
for (references, translations) in zip(reference_corpus, translation_corpus):
reference_length += len(references)
translation_length += len(translations)
ref_ngram_counts = _get_ngrams_with_counter(references, max_order)
translation_ngram_counts = _get_ngrams_with_counter(
translations, max_order)
overlap = dict((ngram,
min(count, translation_ngram_counts[ngram]))
for ngram, count in ref_ngram_counts.items())
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for ngram in translation_ngram_counts:
possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[
ngram]
precisions = [0] * max_order
smooth = 1.0
for i in xrange(0, max_order):
if possible_matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / \
possible_matches_by_order[i]
if matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[
i]
else:
smooth *= 2
precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])
else:
precisions[i] = 0.0
if max(precisions) > 0:
p_log_sum = sum(math.log(p) for p in precisions if p)
geo_mean = math.exp(p_log_sum / max_order)
if use_bp:
ratio = translation_length / reference_length
bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0
bleu = geo_mean * bp
return np.float32(bleu)
def rouge_2_fscore(logits, labels):
"""ROUGE-2 F1 score computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
logits: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge2_fscore: approx rouge-2 f1 score.
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32)
return rouge_2_f_score, tf.constant(1.0)
def _get_ngrams(n, text):
"""Calculates n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def rouge_n(eval_sentences, ref_sentences, n=2):
"""Computes ROUGE-N f1 score of two text collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Args:
eval_sentences: Predicted sentences.
ref_sentences: Sentences from the reference set
n: Size of ngram. Defaults to 2.
Returns:
f1 score for ROUGE-N
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
eval_ngrams = _get_ngrams(n, eval_sentence)
ref_ngrams = _get_ngrams(n, ref_sentence)
ref_count = len(ref_ngrams)
eval_count = len(eval_ngrams)
# Count the overlapping ngrams between evaluated and reference
overlapping_ngrams = eval_ngrams.intersection(ref_ngrams)
overlapping_count = len(overlapping_ngrams)
# Handle edge case. This isn't mathematically correct, but it's good enough
if eval_count == 0:
precision = 0.0
else:
precision = float(overlapping_count) / eval_count
if ref_count == 0:
recall = 0.0
else:
recall = float(overlapping_count) / ref_count
f1_scores.append(2.0 * ((precision * recall) /
(precision + recall + 1e-8)))
# return overlapping_count / reference_count
return np.mean(f1_scores, dtype=np.float32)
def rouge_l_fscore(predictions, labels):
"""ROUGE scores computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge_l_fscore: approx rouge-l f1 score.
"""
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),
tf.float32)
return rouge_l_f_score, tf.constant(1.0)
def rouge_l_sentence_level(eval_sentences, ref_sentences):
"""Computes ROUGE-L (sentence level) of two collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Calculated according to:
R_lcs = LCS(X,Y)/m
P_lcs = LCS(X,Y)/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
X = reference summary
Y = Candidate summary
m = length of reference summary
n = length of candidate summary
Args:
eval_sentences: The sentences that have been picked by the summarizer
ref_sentences: The sentences from the reference set
Returns:
A float: F_lcs
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
m = float(len(ref_sentence))
n = float(len(eval_sentence))
lcs = _len_lcs(eval_sentence, ref_sentence)
f1_scores.append(_f_lcs(lcs, m, n))
return np.mean(f1_scores, dtype=np.float32)
def _len_lcs(x, y):
"""Returns the length of the Longest Common Subsequence between two seqs.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns
integer: Length of LCS between x and y
"""
table = _lcs(x, y)
n, m = len(x), len(y)
return table[n, m]
def _lcs(x, y):
"""Computes the length of the LCS between two seqs.
The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs
"""
n, m = len(x), len(y)
table = dict()
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table
def _f_lcs(llcs, m, n):
"""Computes the LCS-based F-measure score.
Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Args:
llcs: Length of LCS
m: number of words in reference summary
n: number of words in candidate summary
Returns:
Float. LCS-based F-measure score
"""
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta ** 2)) * r_lcs * p_lcs
denom = r_lcs + ((beta ** 2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return f_lcs
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.