content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
"""
Tests for Timestamp parsing, aimed at pandas/_libs/tslibs/parsing.pyx
"""
from datetime import datetime
import re
from dateutil.parser import parse
import numpy as np
import pytest
from pandas._libs.tslibs import parsing
from pandas._libs.tslibs.parsing import parse_time_string
import pandas.util._test_decorators as td
import pandas._testing as tm
def test_parse_time_string():
(parsed, reso) = parse_time_string("4Q1984")
(parsed_lower, reso_lower) = parse_time_string("4q1984")
assert reso == reso_lower
assert parsed == parsed_lower
def test_parse_time_string_invalid_type():
# Raise on invalid input, don't just return it
msg = "Argument 'arg' has incorrect type (expected str, got tuple)"
with pytest.raises(TypeError, match=re.escape(msg)):
parse_time_string((4, 5))
@pytest.mark.parametrize(
"dashed,normal", [("1988-Q2", "1988Q2"), ("2Q-1988", "2Q1988")]
)
def test_parse_time_quarter_with_dash(dashed, normal):
# see gh-9688
(parsed_dash, reso_dash) = parse_time_string(dashed)
(parsed, reso) = parse_time_string(normal)
assert parsed_dash == parsed
assert reso_dash == reso
@pytest.mark.parametrize("dashed", ["-2Q1992", "2-Q1992", "4-4Q1992"])
def test_parse_time_quarter_with_dash_error(dashed):
msg = f"Unknown datetime string format, unable to parse: {dashed}"
with pytest.raises(parsing.DateParseError, match=msg):
parse_time_string(dashed)
@pytest.mark.parametrize(
"date_string,expected",
[
("123.1234", False),
("-50000", False),
("999", False),
("m", False),
("T", False),
("Mon Sep 16, 2013", True),
("2012-01-01", True),
("01/01/2012", True),
("01012012", True),
("0101", True),
("1-1", True),
],
)
def test_does_not_convert_mixed_integer(date_string, expected):
assert parsing._does_string_look_like_datetime(date_string) is expected
@pytest.mark.parametrize(
"date_str,kwargs,msg",
[
(
"2013Q5",
dict(),
(
"Incorrect quarterly string is given, "
"quarter must be between 1 and 4: 2013Q5"
),
),
# see gh-5418
(
"2013Q1",
dict(freq="INVLD-L-DEC-SAT"),
(
"Unable to retrieve month information "
"from given freq: INVLD-L-DEC-SAT"
),
),
],
)
def test_parsers_quarterly_with_freq_error(date_str, kwargs, msg):
with pytest.raises(parsing.DateParseError, match=msg):
parsing.parse_time_string(date_str, **kwargs)
@pytest.mark.parametrize(
"date_str,freq,expected",
[
("2013Q2", None, datetime(2013, 4, 1)),
("2013Q2", "A-APR", datetime(2012, 8, 1)),
("2013-Q2", "A-DEC", datetime(2013, 4, 1)),
],
)
def test_parsers_quarterly_with_freq(date_str, freq, expected):
result, _ = parsing.parse_time_string(date_str, freq=freq)
assert result == expected
@pytest.mark.parametrize(
"date_str", ["2Q 2005", "2Q-200A", "2Q-200", "22Q2005", "2Q200.", "6Q-20"]
)
def test_parsers_quarter_invalid(date_str):
if date_str == "6Q-20":
msg = (
"Incorrect quarterly string is given, quarter "
f"must be between 1 and 4: {date_str}"
)
else:
msg = f"Unknown datetime string format, unable to parse: {date_str}"
with pytest.raises(ValueError, match=msg):
parsing.parse_time_string(date_str)
@pytest.mark.parametrize(
"date_str,expected",
[("201101", datetime(2011, 1, 1, 0, 0)), ("200005", datetime(2000, 5, 1, 0, 0))],
)
def test_parsers_month_freq(date_str, expected):
result, _ = parsing.parse_time_string(date_str, freq="M")
assert result == expected
@td.skip_if_not_us_locale
@pytest.mark.parametrize(
"string,fmt",
[
("20111230", "%Y%m%d"),
("2011-12-30", "%Y-%m-%d"),
("30-12-2011", "%d-%m-%Y"),
("2011-12-30 00:00:00", "%Y-%m-%d %H:%M:%S"),
("2011-12-30T00:00:00", "%Y-%m-%dT%H:%M:%S"),
("2011-12-30 00:00:00.000000", "%Y-%m-%d %H:%M:%S.%f"),
],
)
def test_guess_datetime_format_with_parseable_formats(string, fmt):
result = parsing._guess_datetime_format(string)
assert result == fmt
@pytest.mark.parametrize("dayfirst,expected", [(True, "%d/%m/%Y"), (False, "%m/%d/%Y")])
def test_guess_datetime_format_with_dayfirst(dayfirst, expected):
ambiguous_string = "01/01/2011"
result = parsing._guess_datetime_format(ambiguous_string, dayfirst=dayfirst)
assert result == expected
@td.skip_if_has_locale
@pytest.mark.parametrize(
"string,fmt",
[
("30/Dec/2011", "%d/%b/%Y"),
("30/December/2011", "%d/%B/%Y"),
("30/Dec/2011 00:00:00", "%d/%b/%Y %H:%M:%S"),
],
)
def test_guess_datetime_format_with_locale_specific_formats(string, fmt):
result = parsing._guess_datetime_format(string)
assert result == fmt
@pytest.mark.parametrize(
"invalid_dt",
[
"2013",
"01/2013",
"12:00:00",
"1/1/1/1",
"this_is_not_a_datetime",
"51a",
9,
datetime(2011, 1, 1),
],
)
def test_guess_datetime_format_invalid_inputs(invalid_dt):
# A datetime string must include a year, month and a day for it to be
# guessable, in addition to being a string that looks like a datetime.
assert parsing._guess_datetime_format(invalid_dt) is None
@pytest.mark.parametrize(
"string,fmt",
[
("2011-1-1", "%Y-%m-%d"),
("1/1/2011", "%m/%d/%Y"),
("30-1-2011", "%d-%m-%Y"),
("2011-1-1 0:0:0", "%Y-%m-%d %H:%M:%S"),
("2011-1-3T00:00:0", "%Y-%m-%dT%H:%M:%S"),
("2011-1-1 00:00:00", "%Y-%m-%d %H:%M:%S"),
],
)
def test_guess_datetime_format_no_padding(string, fmt):
# see gh-11142
result = parsing._guess_datetime_format(string)
assert result == fmt
def test_try_parse_dates():
arr = np.array(["5/1/2000", "6/1/2000", "7/1/2000"], dtype=object)
result = parsing.try_parse_dates(arr, dayfirst=True)
expected = np.array([parse(d, dayfirst=True) for d in arr])
tm.assert_numpy_array_equal(result, expected)
def test_parse_time_string_check_instance_type_raise_exception():
# issue 20684
msg = "Argument 'arg' has incorrect type (expected str, got tuple)"
with pytest.raises(TypeError, match=re.escape(msg)):
parse_time_string((1, 2, 3))
result = parse_time_string("2019")
expected = (datetime(2019, 1, 1), "year")
assert result == expected
| 29.825328 | 89 | 0.599122 | [
"MIT"
] | OliviaNabbosa89/Disaster_Responses | venv/Lib/site-packages/pandas/tests/tslibs/test_parsing.py | 6,830 | Python |
"""
This file is also being used by the GalaxyCloudRunner (gcr) Docker image.
"""
from getpass import getuser
from multiprocessing import cpu_count
from socket import gethostname
from string import Template
SLURM_CONFIG_TEMPLATE = '''
# slurm.conf file generated by configurator.html.
# Put this file on all nodes of your cluster.
# See the slurm.conf man page for more information.
#
ControlMachine=$hostname
#ControlAddr=
#BackupController=
#BackupAddr=
#
AuthType=auth/munge
CacheGroups=0
#CheckpointType=checkpoint/none
CryptoType=crypto/munge
MpiDefault=none
#PluginDir=
#PlugStackConfig=
#PrivateData=jobs
ProctrackType=proctrack/pgid
#Prolog=
#PrologSlurmctld=
#PropagatePrioProcess=0
#PropagateResourceLimits=
#PropagateResourceLimitsExcept=
ReturnToService=1
#SallocDefaultCommand=
SlurmctldPidFile=/var/run/slurmctld.pid
SlurmctldPort=6817
SlurmdPidFile=/var/run/slurmd.pid
SlurmdPort=6818
SlurmdSpoolDir=/tmp/slurmd
SlurmUser=$user
#SlurmdUser=root
#SrunEpilog=
#SrunProlog=
StateSaveLocation=/tmp
SwitchType=switch/none
#TaskEpilog=
TaskPlugin=task/none
#TaskPluginParam=
#TaskProlog=
InactiveLimit=0
KillWait=30
MinJobAge=300
#OverTimeLimit=0
SlurmctldTimeout=120
SlurmdTimeout=300
#UnkillableStepTimeout=60
#VSizeFactor=0
Waittime=0
FastSchedule=1
SchedulerType=sched/backfill
SchedulerPort=7321
SelectType=select/linear
#SelectTypeParameters=
AccountingStorageType=accounting_storage/none
#AccountingStorageUser=
AccountingStoreJobComment=YES
ClusterName=cluster
#DebugFlags=
#JobCompHost=
#JobCompLoc=
#JobCompPass=
#JobCompPort=
JobCompType=jobcomp/none
#JobCompUser=
JobAcctGatherFrequency=30
JobAcctGatherType=jobacct_gather/none
SlurmctldDebug=3
#SlurmctldLogFile=
SlurmdDebug=3
#SlurmdLogFile=
NodeName=$hostname CPUs=$cpus State=UNKNOWN
PartitionName=debug Nodes=$hostname Default=YES MaxTime=INFINITE State=UP
'''
def main():
template_params = {"hostname": gethostname(),
"user": getuser(),
"cpus": cpu_count()}
config_contents = Template(SLURM_CONFIG_TEMPLATE).substitute(template_params)
open("/etc/slurm-llnl/slurm.conf", "w").write(config_contents)
if __name__ == "__main__":
main()
| 22.453608 | 81 | 0.800735 | [
"Apache-2.0"
] | Slugger70/pulsar | pulsar/scripts/_configure_slurm.py | 2,178 | Python |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
import numpy as np
from astropy import units as u
from astropy.coordinates import Angle
from astropy.io import fits
from astropy.table import Table
from gammapy.maps import MapAxes, MapAxis
from gammapy.utils.array import array_stats_str
from gammapy.utils.gauss import MultiGauss2D
from gammapy.utils.interpolation import ScaledRegularGridInterpolator
from gammapy.utils.scripts import make_path
from .table import PSF3D, EnergyDependentTablePSF
__all__ = ["EnergyDependentMultiGaussPSF"]
log = logging.getLogger(__name__)
class EnergyDependentMultiGaussPSF:
"""Triple Gauss analytical PSF depending on energy and theta.
To evaluate the PSF call the ``to_energy_dependent_table_psf`` or ``psf_at_energy_and_theta`` methods.
Parameters
----------
energy_axis_true : `MapAxis`
True energy axis
offset_axis : `MapAxis`
Offset axis.
sigmas : list of 'numpy.ndarray'
Triple Gauss sigma parameters, where every entry is
a two dimensional 'numpy.ndarray' containing the sigma
value for every given energy and theta.
norms : list of 'numpy.ndarray'
Triple Gauss norm parameters, where every entry is
a two dimensional 'numpy.ndarray' containing the norm
value for every given energy and theta. Norm corresponds
to the value of the Gaussian at theta = 0.
meta : dict
Meta data
Examples
--------
Plot R68 of the PSF vs. theta and energy:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from gammapy.irf import EnergyDependentMultiGaussPSF
filename = '$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits'
psf = EnergyDependentMultiGaussPSF.read(filename, hdu='POINT SPREAD FUNCTION')
psf.plot_containment(0.68)
plt.show()
"""
tag = "psf_3gauss"
def __init__(
self,
energy_axis_true,
offset_axis,
sigmas,
norms,
meta,
):
energy_axis_true.assert_name("energy_true")
offset_axis.assert_name("offset")
self._energy_axis_true = energy_axis_true
self._offset_axis = offset_axis
sigmas[0][sigmas[0] == 0] = 1
sigmas[1][sigmas[1] == 0] = 1
sigmas[2][sigmas[2] == 0] = 1
self.sigmas = sigmas
self.norms = norms
self.meta = meta or {}
self._interp_norms = self._setup_interpolators(self.norms)
self._interp_sigmas = self._setup_interpolators(self.sigmas)
@property
def energy_thresh_lo(self):
"""Low energy threshold"""
return self.meta["LO_THRES"] * u.TeV
@property
def energy_thresh_hi(self):
"""High energy threshold"""
return self.meta["HI_THRES"] * u.TeV
@property
def energy_axis_true(self):
return self._energy_axis_true
@property
def offset_axis(self):
return self._offset_axis
def _setup_interpolators(self, values_list):
interps = []
for values in values_list:
interp = ScaledRegularGridInterpolator(
points=(self.offset_axis.center, self.energy_axis_true.center),
values=values,
)
interps.append(interp)
return interps
@classmethod
def read(cls, filename, hdu="PSF_2D_GAUSS"):
"""Create `EnergyDependentMultiGaussPSF` from FITS file.
Parameters
----------
filename : str
File name
"""
with fits.open(str(make_path(filename)), memmap=False) as hdulist:
return cls.from_table_hdu(hdulist[hdu])
@classmethod
def from_table_hdu(cls, hdu):
"""Create `EnergyDependentMultiGaussPSF` from HDU list.
Parameters
----------
hdu : `~astropy.io.fits.BinTableHDU`
HDU
"""
table = Table.read(hdu)
energy_axis_true = MapAxis.from_table(
table, column_prefix="ENERG", format="gadf-dl3"
)
offset_axis = MapAxis.from_table(
table, column_prefix="THETA", format="gadf-dl3"
)
# Get sigmas
shape = (offset_axis.nbin, energy_axis_true.nbin)
sigmas = []
for key in ["SIGMA_1", "SIGMA_2", "SIGMA_3"]:
sigma = hdu.data[key].reshape(shape).copy()
sigmas.append(sigma)
# Get amplitudes
norms = []
for key in ["SCALE", "AMPL_2", "AMPL_3"]:
norm = hdu.data[key].reshape(shape).copy()
norms.append(norm)
return cls(
energy_axis_true=energy_axis_true,
offset_axis=offset_axis,
sigmas=sigmas,
norms=norms,
meta=dict(hdu.header)
)
def to_hdulist(self):
"""
Convert psf table data to FITS hdu list.
Returns
-------
hdu_list : `~astropy.io.fits.HDUList`
PSF in HDU list format.
"""
# Set up data
names = [
"SCALE",
"SIGMA_1",
"AMPL_2",
"SIGMA_2",
"AMPL_3",
"SIGMA_3",
]
units = ["", "deg", "", "deg", "", "deg"]
data = [
self.norms[0],
self.sigmas[0],
self.norms[1],
self.sigmas[1],
self.norms[2],
self.sigmas[2],
]
axes = MapAxes([self.energy_axis_true, self.offset_axis])
table = axes.to_table(format="gadf-dl3")
for name_, data_, unit_ in zip(names, data, units):
table[name_] = [data_]
table[name_].unit = unit_
# Create hdu and hdu list
hdu = fits.BinTableHDU(table)
hdu.header.update(self.meta)
return fits.HDUList([fits.PrimaryHDU(), hdu])
def write(self, filename, *args, **kwargs):
"""Write PSF to FITS file.
Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.
"""
self.to_hdulist().writeto(str(make_path(filename)), *args, **kwargs)
def psf_at_energy_and_theta(self, energy, theta):
"""
Get `~gammapy.modeling.models.MultiGauss2D` model for given energy and theta.
No interpolation is used.
Parameters
----------
energy : `~astropy.units.u.Quantity`
Energy at which a PSF is requested.
theta : `~astropy.coordinates.Angle`
Offset angle at which a PSF is requested.
Returns
-------
psf : `~gammapy.utils.gauss.MultiGauss2D`
Multigauss PSF object.
"""
energy = u.Quantity(energy)
theta = u.Quantity(theta)
sigmas, norms = [], []
pars = {"A_1": 1}
for interp_sigma in self._interp_sigmas:
sigma = interp_sigma((theta, energy))
sigmas.append(sigma)
for name, interp_norm in zip(["scale", "A_2", "A_3"], self._interp_norms):
pars[name] = interp_norm((theta, energy))
for idx, sigma in enumerate(sigmas):
a = pars[f"A_{idx + 1}"]
norm = pars["scale"] * 2 * a * sigma ** 2
norms.append(norm)
m = MultiGauss2D(sigmas, norms)
m.normalize()
return m
def containment_radius(self, energy, theta, fraction=0.68):
"""Compute containment for all energy and theta values"""
# This is a false positive from pylint
# See https://github.com/PyCQA/pylint/issues/2435
energies = u.Quantity(
energy
).flatten() # pylint:disable=assignment-from-no-return
thetas = Angle(theta).flatten()
radius = np.empty((theta.size, energy.size))
for idx, energy in enumerate(energies):
for jdx, theta in enumerate(thetas):
try:
psf = self.psf_at_energy_and_theta(energy, theta)
radius[jdx, idx] = psf.containment_radius(fraction)
except ValueError:
log.debug(
f"Computing containment failed for energy = {energy:.2f}"
f" and theta={theta:.2f}"
)
log.debug(f"Sigmas: {psf.sigmas} Norms: {psf.norms}")
radius[jdx, idx] = np.nan
return Angle(radius, "deg")
def plot_containment(self, fraction=0.68, ax=None, add_cbar=True, **kwargs):
"""
Plot containment image with energy and theta axes.
Parameters
----------
fraction : float
Containment fraction between 0 and 1.
add_cbar : bool
Add a colorbar
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.energy_axis_true.center
offset = self.offset_axis.center
# Set up and compute data
containment = self.containment_radius(energy, offset, fraction)
# plotting defaults
kwargs.setdefault("cmap", "GnBu")
kwargs.setdefault("vmin", np.nanmin(containment.value))
kwargs.setdefault("vmax", np.nanmax(containment.value))
# Plotting
x = energy.value
y = offset.value
caxes = ax.pcolormesh(x, y, containment.value, **kwargs)
# Axes labels and ticks, colobar
ax.semilogx()
ax.set_ylabel(f"Offset ({offset.unit})")
ax.set_xlabel(f"Energy ({energy.unit})")
ax.set_xlim(x.min(), x.max())
ax.set_ylim(y.min(), y.max())
try:
self._plot_safe_energy_range(ax)
except KeyError:
pass
if add_cbar:
label = f"Containment radius R{100 * fraction:.0f} ({containment.unit})"
ax.figure.colorbar(caxes, ax=ax, label=label)
return ax
def _plot_safe_energy_range(self, ax):
"""add safe energy range lines to the plot"""
esafe = self.energy_thresh_lo
omin = self.offset_axis.center.min()
omax = self.offset_axis.center.max()
ax.vlines(x=esafe.value, ymin=omin.value, ymax=omax.value)
label = f"Safe energy threshold: {esafe:3.2f}"
ax.text(x=1.1 * esafe.value, y=0.3, s=label, va="top")
def plot_containment_vs_energy(
self, fractions=[0.68, 0.95], thetas=Angle([0, 1], "deg"), ax=None, **kwargs
):
"""Plot containment fraction as a function of energy.
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
energy = self.energy_axis_true.center
for theta in thetas:
for fraction in fractions:
radius = self.containment_radius(energy, theta, fraction).squeeze()
kwargs.setdefault("label", f"{theta.deg} deg, {100 * fraction:.1f}%")
ax.plot(energy.value, radius.value, **kwargs)
ax.semilogx()
ax.legend(loc="best")
ax.set_xlabel("Energy (TeV)")
ax.set_ylabel("Containment radius (deg)")
def peek(self, figsize=(15, 5)):
"""Quick-look summary plots."""
import matplotlib.pyplot as plt
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=figsize)
self.plot_containment(fraction=0.68, ax=axes[0])
self.plot_containment(fraction=0.95, ax=axes[1])
self.plot_containment_vs_energy(ax=axes[2])
# TODO: implement this plot
# psf = self.psf_at_energy_and_theta(energy='1 TeV', theta='1 deg')
# psf.plot_components(ax=axes[2])
plt.tight_layout()
def info(
self,
fractions=[0.68, 0.95],
energies=u.Quantity([1.0, 10.0], "TeV"),
thetas=u.Quantity([0.0], "deg"),
):
"""
Print PSF summary info.
The containment radius for given fraction, energies and thetas is
computed and printed on the command line.
Parameters
----------
fractions : list
Containment fraction to compute containment radius for.
energies : `~astropy.units.u.Quantity`
Energies to compute containment radius for.
thetas : `~astropy.units.u.Quantity`
Thetas to compute containment radius for.
Returns
-------
ss : string
Formatted string containing the summary info.
"""
ss = "\nSummary PSF info\n"
ss += "----------------\n"
ss += array_stats_str(self.offset_axis.center.to("deg"), "Theta")
ss += array_stats_str(self.energy_axis_true.edges[1:], "Energy hi")
ss += array_stats_str(self.energy_axis_true.edges[:-1], "Energy lo")
ss += f"Safe energy threshold lo: {self.energy_thresh_lo:6.3f}\n"
ss += f"Safe energy threshold hi: {self.energy_thresh_hi:6.3f}\n"
for fraction in fractions:
containment = self.containment_radius(energies, thetas, fraction)
for i, energy in enumerate(energies):
for j, theta in enumerate(thetas):
radius = containment[j, i]
ss += (
"{:2.0f}% containment radius at theta = {} and "
"E = {:4.1f}: {:5.8f}\n"
"".format(100 * fraction, theta, energy, radius)
)
return ss
def to_energy_dependent_table_psf(self, theta=None, rad=None, exposure=None):
"""Convert triple Gaussian PSF ot table PSF.
Parameters
----------
theta : `~astropy.coordinates.Angle`
Offset in the field of view. Default theta = 0 deg
rad : `~astropy.coordinates.Angle`
Offset from PSF center used for evaluating the PSF on a grid.
Default offset = [0, 0.005, ..., 1.495, 1.5] deg.
exposure : `~astropy.units.u.Quantity`
Energy dependent exposure. Should be in units equivalent to 'cm^2 s'.
Default exposure = 1.
Returns
-------
tabe_psf : `~gammapy.irf.EnergyDependentTablePSF`
Instance of `EnergyDependentTablePSF`.
"""
# Convert energies to log center
energies = self.energy_axis_true.center
# Defaults and input handling
if theta is None:
theta = Angle(0, "deg")
else:
theta = Angle(theta)
if rad is None:
rad = Angle(np.arange(0, 1.5, 0.005), "deg")
rad_axis = MapAxis.from_nodes(rad, name="rad")
psf_value = u.Quantity(np.zeros((energies.size, rad.size)), "deg^-2")
for idx, energy in enumerate(energies):
psf_gauss = self.psf_at_energy_and_theta(energy, theta)
psf_value[idx] = u.Quantity(psf_gauss(rad), "deg^-2")
return EnergyDependentTablePSF(
energy_axis_true=self.energy_axis_true,
rad_axis=rad_axis,
exposure=exposure,
data=psf_value,
)
def to_psf3d(self, rad=None):
"""Create a PSF3D from an analytical PSF.
Parameters
----------
rad : `~astropy.units.u.Quantity` or `~astropy.coordinates.Angle`
the array of position errors (rad) on which the PSF3D will be defined
Returns
-------
psf3d : `~gammapy.irf.PSF3D`
the PSF3D. It will be defined on the same energy and offset values than the input psf.
"""
offsets = self.offset_axis.center
energy = self.energy_axis_true.center
if rad is None:
rad = np.linspace(0, 0.66, 67) * u.deg
rad_axis = MapAxis.from_edges(rad, name="rad")
shape = (self.energy_axis_true.nbin, self.offset_axis.nbin, rad_axis.nbin)
psf_value = np.zeros(shape) * u.Unit("sr-1")
for idx, offset in enumerate(offsets):
table_psf = self.to_energy_dependent_table_psf(offset)
psf_value[:, idx, :] = table_psf.evaluate(energy, rad_axis.center)
return PSF3D(
energy_axis_true=self.energy_axis_true,
rad_axis=rad_axis,
offset_axis=self.offset_axis,
data=psf_value,
meta=self.meta.copy()
)
| 32.542169 | 106 | 0.578304 | [
"BSD-3-Clause"
] | mdebony/gammapy | gammapy/irf/psf/gauss.py | 16,206 | Python |
from typing import List
import torch
from torch import Tensor
from hw_asr.base.base_metric import BaseMetric
from hw_asr.base.base_text_encoder import BaseTextEncoder
from hw_asr.metric.utils import calc_cer
class ArgmaxCERMetric(BaseMetric):
def __init__(self, text_encoder: BaseTextEncoder, *args, **kwargs):
super().__init__(*args, **kwargs)
self.text_encoder = text_encoder
def __call__(self, log_probs: Tensor, text: List[str], *args, **kwargs):
cers = []
predictions = torch.argmax(log_probs.cpu(), dim=-1)
for log_prob_vec, target_text, log_prob_length in zip(predictions, text, kwargs['log_probs_length']):
if hasattr(self.text_encoder, "ctc_decode"):
pred_text = self.text_encoder.ctc_decode(log_prob_vec[:log_prob_length.item()])
else:
pred_text = self.text_encoder.decode(log_prob_vec)
cers.append(calc_cer(target_text, pred_text))
return sum(cers) / len(cers)
class BeamSearchCERMetric(BaseMetric):
def __init__(self, text_encoder: BaseTextEncoder, *args, **kwargs):
super().__init__(*args, **kwargs)
self.text_encoder = text_encoder
def __call__(self, log_probs: Tensor, text: List[str], *args, **kwargs):
cers = []
if hasattr(self.text_encoder, "ctc_beam_search"):
predictions = log_probs.cpu()
else:
predictions = torch.argmax(log_probs.cpu(), dim=-1)
for log_prob_length, log_prob_vec, target_text in zip(kwargs['log_probs_length'], predictions, text):
if hasattr(self.text_encoder, "ctc_beam_search"):
pred_text = self.text_encoder.ctc_beam_search(log_prob_vec[:log_prob_length.item(), :].unsqueeze(0))
elif hasattr(self.text_encoder, "ctc_decode"):
pred_text = self.text_encoder.ctc_decode(log_prob_vec)
else:
pred_text = self.text_encoder.decode(log_prob_vec)
cers.append(calc_cer(target_text, pred_text))
return sum(cers) / len(cers)
| 41.44 | 116 | 0.668436 | [
"MIT"
] | ArseniyBolotin/asr_project | hw_asr/metric/cer_metric.py | 2,072 | Python |
# We need to make our string alternating, i. e. si≠si+1. When we reverse substring sl…sr,
# we change no more than two pairs sl−1,sl and sr,sr+1. Moreover, one pair should be a
# consecutive pair 00 and other — 11. So, we can find lower bound to our answer as maximum
# between number of pairs of 00 and number of pairs of 11. And we can always reach this
# lower bound, by pairing 00 with 11 or with left/right border of s.
for _ in range(int(input())):
n = int(input())
s = input()
z, o = 0, 0 # will store total number of pairs
zeros, ones = 0, 0 # will store no of pairs in one streak
for el in s:
if el == '1':
ones += 1
# streak of zeros are broken by one so no of pairs of zeros are added to z
z += max(zeros-1, 0)
zeros = 0
if el == '0':
zeros += 1
# streak of ones are broken by one so no of pairs of ones are added to o
o += max(ones-1, 0)
ones = 0
# we count pairs only when it the streak is broken. So to count the final unbroken streak
o += max(ones-1, 0)
z += max(zeros-1, 0)
print(max(o, z))
| 36.40625 | 93 | 0.593133 | [
"MIT"
] | Abhinav-22/CompetitiveCode | Codeforces_problems/Reverse Binary Strings/solution.py | 1,173 | Python |
import numpy as np
import cv2
import matplotlib.pylab as plt
from keras.preprocessing.image import load_img
from keras.models import model_from_json
from models import (
create_cam_model, preprocess_image,
get_cam_img
)
# Define CAM conv layer name
CAM_CONV_LAYER = 'cam_conv_layer'
def read_model(model_path, weigths_path):
"""Load your pretrained model
"""
model = model_from_json(open(model_path).read())
model.load_weights(weigths_path)
return model
def train_cam_model(X_train, Y_train, X_test, Y_test,
batch_size, nb_epoch):
"""Train CAM model based on your pretrained model
# Arguments
model: your pretrained model, CAM model is trained based on this model.
"""
# Use your allready trained model
pretrained_model_path = ''
pretrained_weights_path = ''
# Your pretrained model name
pretrained_model_name = 'VGG16'
# Label class num
num_classes = 10
# CAM input spacial size
gap_spacial_size = 14
# The layer before CAM(GAP) layers.
# CAM paper suggests to use the last convnet(VGG) or mergenet(Inception, or other architectures)
# Change this name based on your model.
if pretrained_model_name == 'VGG16':
in_layer_name = 'block5_conv3'
elif pretrained_model_name == 'InceptionV3':
in_layer_name = 'batchnormalization_921'
elif pretrained_model_name == 'ResNet50':
in_layer_name = 'merge_13'
else:
in_layer_name = ''
# Load your allready trained model, transfer it to CAM model
pretrained_model = read_model(pretrained_model_path,
pretrained_weights_path)
# Create CAM model based on trained model
model = create_cam_model(pretrained_model,
gap_spacial_size,
num_classes,
in_layer_name,
CAM_CONV_LAYER)
# Train your CAM model
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
shuffle=True, verbose=1,
validation_data=(X_test, Y_test))
# Save model
model.save_weights('')
return model
def cam_model():
"""
Return your trained CAM model
"""
return
def plot_cam_map(img_path, img_size, batch_size, label_plot):
"""Plot class activation map.
"""
# CAM input spacial size
gap_spacial_size = 14
# Use your trained CAM model
model = cam_model()
# Load and format data
im_ori = np.asarray(load_img(img_path, target_size=(img_size, img_size)))
test_data = preprocess_image(img_path, img_size, expand_dims=True)
# Get class map image
im_cam = get_cam_img(model,
test_data,
label_plot,
CAM_CONV_LAYER,
ratio=img_size / gap_spacial_size)
# Resize if the shape of class map is not equal to original image
if im_cam.shape != im_ori[:, :, 0].shape:
im_cam = cv2.resize(im_cam, (img_size, img_size), cv2.INTER_LINEAR)
# Show the predictions. You can analyze the class map with the predictions.
prediction_labels = model.predict(test_data.astype('float32'), batch_size=batch_size, verbose=1)
print('Info: Predictions:\n{}'.format(prediction_labels))
# Plot original image and the class map
plt.imshow(im_ori)
plt.imshow(im_cam,
cmap='jet',
alpha=0.5,
interpolation='bilinear')
plt.show()
| 25.784 | 97 | 0.730996 | [
"MIT"
] | ijinmao/CAM-Localization | demo.py | 3,223 | Python |
#!/usr/bin/python
"""Plot LFEs of given order parameter."""
import argparse
import sys
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import gridspec
from matplotlib.ticker import MaxNLocator
import numpy as np
import pandas as pd
from matplotlibstyles import styles
from matplotlibstyles import plotutils
def main():
args = parse_args()
f = setup_figure()
gs = gridspec.GridSpec(1, 1, f)
ax = f.add_subplot(gs[0, 0])
if args.post_lfes == None:
args.post_lfes = ['' for i in range(len(args.systems))]
plot_figure(f, ax, vars(args))
setup_axis(ax, args.tag)
#set_labels(ax)
save_figure(f, args.plot_filebase)
def setup_figure():
styles.set_default_style()
figsize = (plotutils.cm_to_inches(10), plotutils.cm_to_inches(7))
return plt.figure(figsize=figsize, dpi=300, constrained_layout=True)
def plot_figure(f, ax, args):
systems = args['systems']
varis = args['varis']
input_dir = args['input_dir']
tag = args['tag']
post_lfes = args['post_lfes']
stacking_enes = args['stacking_enes']
if stacking_enes is not None:
stacking_enes = [abs(e) for e in stacking_enes]
cmap = plotutils.create_truncated_colormap(
0.2, 0.8, name='plasma')
#mappable = plotutils.create_linear_mappable(
# cmap, abs(stacking_enes[0]), abs(stacking_enes[-1]))
#colors = [mappable.to_rgba(abs(e)) for e in stacking_enes]
increment = stacking_enes[1] - stacking_enes[0]
cmap, norm, colors = plotutils.create_segmented_colormap(cmap, stacking_enes, increment)
else:
cmap = cm.get_cmap('tab10')
colors = [cmap(i) for i in range(len(systems))]
for i in range(len(systems)):
system = systems[i]
vari = varis[i]
post_lfe = post_lfes[i]
if post_lfe != '':
post_lfe = '-' + post_lfe
inp_filebase = f'{input_dir}/{system}-{vari}_lfes{post_lfe}-{tag}'
lfes = pd.read_csv(f'{inp_filebase}.aves', sep=' ', index_col=0)
lfe_stds = pd.read_csv(f'{inp_filebase}.stds', sep=' ', index_col=0)
temp = lfes.columns[0]
lfes = lfes[temp]
lfes = lfes - lfes[0]
lfe_stds = lfe_stds[temp]
label = f'{system}-{vari}'
ax.errorbar(lfes.index, lfes, yerr=lfe_stds, marker='o', label=label,
color=colors[i])
if stacking_enes is not None:
label = r'$-U_\text{stack} / \SI{1000}{\kb\kelvin}$'
tick_labels = [f'${e/1000:.1f}$' for e in stacking_enes]
plotutils.plot_segmented_colorbar(
f, ax, cmap, norm, label, tick_labels, 'horizontal')
def setup_axis(ax, ylabel=None, xlabel=None, ylim_top=None, xlim_right=None):
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ax.set_ylim(top=ylim_top)
ax.set_xlim(right=xlim_right)
def set_labels(ax):
plt.legend()
def save_figure(f, plot_filebase):
#f.savefig(plot_filebase + '.pgf', transparent=True)
f.savefig(plot_filebase + '.pdf', transparent=True)
f.savefig(plot_filebase + '.png', transparent=True)
def parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'input_dir',
type=str,
help='Input directory')
parser.add_argument(
'plot_filebase',
type=str,
help='Plots directory')
parser.add_argument(
'tag',
type=str,
help='OP tag')
parser.add_argument(
'--systems',
nargs='+',
type=str,
help='Systems')
parser.add_argument(
'--varis',
nargs='+',
type=str,
help='Simulation variants')
parser.add_argument(
'--post_lfes',
nargs='+',
type=str,
help='Filename additions after lfes, if any')
parser.add_argument(
'--stacking_enes',
nargs='+',
type=float,
help='Stacking energies (for colormap)')
return parser.parse_args()
if __name__ == '__main__':
main()
| 28.414966 | 96 | 0.625808 | [
"MIT"
] | acumb/LatticeDNAOrigami | scripts/plotting/plot_lfes.py | 4,177 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Fabian Affolter <fabian()affolter-engineering.ch>'
__copyright__ = 'Copyright 2014 Fabian Affolter'
__license__ = """Eclipse Public License - v 1.0 (http://www.eclipse.org/legal/epl-v10.html)"""
HAVE_DBUS=True
try:
import dbus
except ImportError:
HAVE_DBUS=False
def plugin(srv, item):
"""Send a message through dbus to the user's desktop."""
srv.logging.debug("*** MODULE=%s: service=%s, target=%s", __file__, item.service, item.target)
if not HAVE_DBUS:
srv.logging.error("Cannot send DBUS message; `dbus' module not installed")
return False
text = item.message
summary = item.addrs[0]
app_name = item.get('title', srv.SCRIPTNAME)
replaces_id = 0
service = 'org.freedesktop.Notifications'
path = '/' + service.replace('.', '/')
interface = service
app_icon = '/usr/share/icons/gnome/32x32/places/network-server.png'
expire_timeout = 1000
actions = []
hints = []
try:
srv.logging.debug("Sending message to %s..." % (item.target))
session_bus = dbus.SessionBus()
obj = session_bus.get_object(service, path)
interface = dbus.Interface(obj, interface)
interface.Notify(app_name, replaces_id, app_icon, summary, text,
actions, hints, expire_timeout)
srv.logging.debug("Successfully sent message")
except Exception, e:
srv.logging.error("Error sending message to %s: %s" % (item.target, str(e)))
return False
return True
| 32.625 | 98 | 0.649425 | [
"EPL-1.0"
] | daq-tools/mqttwarn | services/dbus.py | 1,566 | Python |
# encoding: utf-8
import datetime
import logging
from ckan.common import config
from six import text_type
from sqlalchemy import Table, select, join, func, and_
import ckan.plugins as p
import ckan.model as model
log = logging.getLogger(__name__)
cache_enabled = p.toolkit.asbool(
config.get('ckanext.stats.cache_enabled', False)
)
if cache_enabled:
log.warn(
'ckanext.stats does not support caching in current implementations'
)
DATE_FORMAT = '%Y-%m-%d'
def table(name):
return Table(name, model.meta.metadata, autoload=True)
def datetime2date(datetime_):
return datetime.date(datetime_.year, datetime_.month, datetime_.day)
class Stats(object):
@classmethod
def largest_groups(cls, limit=10):
member = table('member')
package = table('package')
j = join(member, package, member.c.table_id == package.c.id)
s = select(
[member.c.group_id,
func.count(member.c.table_id)]
).select_from(j).group_by(member.c.group_id).where(
and_(
member.c.group_id != None, member.c.table_name == 'package',
package.c.private == False, package.c.state == 'active'
)
).order_by(func.count(member.c.table_id).desc()).limit(limit)
res_ids = model.Session.execute(s).fetchall()
res_groups = [
(model.Session.query(model.Group).get(text_type(group_id)), val)
for group_id, val in res_ids
]
return res_groups
@classmethod
def top_tags(cls, limit=10, returned_tag_info='object'): # by package
assert returned_tag_info in ('name', 'id', 'object')
tag = table('tag')
package_tag = table('package_tag')
package = table('package')
if returned_tag_info == 'name':
from_obj = [package_tag.join(tag)]
tag_column = tag.c.name
else:
from_obj = None
tag_column = package_tag.c.tag_id
j = join(
package_tag, package, package_tag.c.package_id == package.c.id
)
s = select([tag_column,
func.count(package_tag.c.package_id)],
from_obj=from_obj).select_from(j).where(
and_(
package_tag.c.state == 'active',
package.c.private == False,
package.c.state == 'active'
)
)
s = s.group_by(tag_column).order_by(
func.count(package_tag.c.package_id).desc()
).limit(limit)
res_col = model.Session.execute(s).fetchall()
if returned_tag_info in ('id', 'name'):
return res_col
elif returned_tag_info == 'object':
res_tags = [
(model.Session.query(model.Tag).get(text_type(tag_id)), val)
for tag_id, val in res_col
]
return res_tags
@classmethod
def top_package_creators(cls, limit=10):
userid_count = model.Session.query(
model.Package.creator_user_id,
func.count(model.Package.creator_user_id)
).filter(model.Package.state == 'active'
).filter(model.Package.private == False).group_by(
model.Package.creator_user_id
).order_by(func.count(model.Package.creator_user_id).desc()
).limit(limit).all()
user_count = [
(model.Session.query(model.User).get(text_type(user_id)), count)
for user_id, count in userid_count
if user_id
]
return user_count
| 32.839286 | 76 | 0.576672 | [
"BSD-3-Clause"
] | 4yjo/ckan | ckanext/stats/stats.py | 3,678 | Python |
import time
import numpy as np
import os.path as osp
import datetime
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
import nni
from dassl.data import DataManager
from dassl.optim import build_optimizer, build_lr_scheduler
from dassl.utils import (
MetricMeter, AverageMeter, tolist_if_not, count_num_param, load_checkpoint,
save_checkpoint, resume_from_checkpoint, load_pretrained_weights
)
from dassl.modeling import build_head, build_backbone
from dassl.evaluation import build_evaluator
class SimpleNet(nn.Module):
"""A simple neural network composed of a CNN backbone
and optionally a head such as mlp for classification.
"""
def __init__(self, cfg, model_cfg, num_classes, **kwargs):
super().__init__()
self.backbone = build_backbone(
model_cfg.BACKBONE.NAME,
verbose=cfg.VERBOSE,
pretrained=model_cfg.BACKBONE.PRETRAINED,
**kwargs
)
fdim = self.backbone.out_features
print("------------------------fdim:", fdim)
self.head = None
if model_cfg.HEAD.NAME and model_cfg.HEAD.HIDDEN_LAYERS:
self.head = build_head(
model_cfg.HEAD.NAME,
verbose=cfg.VERBOSE,
in_features=fdim,
hidden_layers=model_cfg.HEAD.HIDDEN_LAYERS,
activation=model_cfg.HEAD.ACTIVATION,
bn=model_cfg.HEAD.BN,
dropout=model_cfg.HEAD.DROPOUT,
**kwargs
)
fdim = self.head.out_features
self.classifier = None
if num_classes > 0:
self.classifier = nn.Linear(fdim, num_classes)
self._fdim = fdim
@property
def fdim(self):
return self._fdim
def forward(self, x, return_feature=False):
f = self.backbone(x)
if self.head is not None:
f = self.head(f)
if self.classifier is None:
return f
y = self.classifier(f)
if return_feature:
return y, f
return y
class TrainerBase:
"""Base class for iterative trainer."""
def __init__(self):
self._models = OrderedDict()
self._optims = OrderedDict()
self._scheds = OrderedDict()
self._writer = None
def register_model(self, name='model', model=None, optim=None, sched=None):
if self.__dict__.get('_models') is None:
raise AttributeError(
'Cannot assign model before super().__init__() call'
)
if self.__dict__.get('_optims') is None:
raise AttributeError(
'Cannot assign optim before super().__init__() call'
)
if self.__dict__.get('_scheds') is None:
raise AttributeError(
'Cannot assign sched before super().__init__() call'
)
assert name not in self._models, 'Found duplicate model names'
self._models[name] = model
self._optims[name] = optim
self._scheds[name] = sched
def get_model_names(self, names=None):
names_real = list(self._models.keys())
if names is not None:
names = tolist_if_not(names)
for name in names:
assert name in names_real
return names
else:
return names_real
def save_model(self, epoch, directory, is_best=False, model_name=''):
names = self.get_model_names()
for name in names:
model_dict = self._models[name].state_dict()
optim_dict = None
if self._optims[name] is not None:
optim_dict = self._optims[name].state_dict()
sched_dict = None
if self._scheds[name] is not None:
sched_dict = self._scheds[name].state_dict()
save_checkpoint(
{
'state_dict': model_dict,
'epoch': epoch + 1,
'optimizer': optim_dict,
'scheduler': sched_dict
},
osp.join(directory, name),
is_best=is_best,
model_name=model_name
)
def resume_model_if_exist(self, directory):
names = self.get_model_names()
file_missing = False
for name in names:
path = osp.join(directory, name)
if not osp.exists(path):
file_missing = True
break
if file_missing:
print('No checkpoint found, train from scratch')
return 0
print(
'Found checkpoint in "{}". Will resume training'.format(directory)
)
for name in names:
path = osp.join(directory, name)
start_epoch = resume_from_checkpoint(
path, self._models[name], self._optims[name],
self._scheds[name]
)
return start_epoch
def load_model(self, directory, epoch=None):
if not directory:
print(
'Note that load_model() is skipped as no pretrained model is given'
)
return
names = self.get_model_names()
# By default, the best model is loaded
model_file = 'model-best.pth.tar'
if epoch is not None:
model_file = 'model.pth.tar-' + str(epoch)
for name in names:
model_path = osp.join(directory, name, model_file)
if not osp.exists(model_path):
raise FileNotFoundError(
'Model not found at "{}"'.format(model_path)
)
checkpoint = load_checkpoint(model_path)
state_dict = checkpoint['state_dict']
epoch = checkpoint['epoch']
print(
'Loading weights to {} '
'from "{}" (epoch = {})'.format(name, model_path, epoch)
)
self._models[name].load_state_dict(state_dict)
def set_model_mode(self, mode='train', names=None):
names = self.get_model_names(names)
for name in names:
if mode == 'train':
self._models[name].train()
else:
self._models[name].eval()
def update_lr(self, names=None):
names = self.get_model_names(names)
for name in names:
if self._scheds[name] is not None:
self._scheds[name].step()
def detect_anomaly(self, loss):
if not torch.isfinite(loss).all():
raise FloatingPointError('Loss is infinite or NaN!')
def init_writer(self, log_dir):
if self.__dict__.get('_writer') is None or self._writer is None:
print(
'Initializing summary writer for tensorboard '
'with log_dir={}'.format(log_dir)
)
self._writer = SummaryWriter(log_dir=log_dir)
def close_writer(self):
if self._writer is not None:
self._writer.close()
def write_scalar(self, tag, scalar_value, global_step=None):
if self._writer is None:
# Do nothing if writer is not initialized
# Note that writer is only used when training is needed
pass
else:
self._writer.add_scalar(tag, scalar_value, global_step)
def train(self, start_epoch, max_epoch):
"""Generic training loops."""
self.start_epoch = start_epoch
self.max_epoch = max_epoch
self.before_train()
for self.epoch in range(self.start_epoch, self.max_epoch):
self.before_epoch()
self.run_epoch()
self.after_epoch()
self.after_train()
def before_train(self):
pass
def after_train(self):
pass
def before_epoch(self):
pass
def after_epoch(self):
pass
def run_epoch(self):
raise NotImplementedError
def test(self):
raise NotImplementedError
def parse_batch_train(self, batch):
raise NotImplementedError
def parse_batch_test(self, batch):
raise NotImplementedError
def forward_backward(self, batch):
raise NotImplementedError
def model_inference(self, input):
raise NotImplementedError
def model_zero_grad(self, names=None):
names = self.get_model_names(names)
for name in names:
if self._optims[name] is not None:
self._optims[name].zero_grad()
def model_backward(self, loss):
self.detect_anomaly(loss)
if not self.use_amp:
loss.backward()
else:
self.scaler.scale(loss).backward()
def model_update(self, names=None):
names = self.get_model_names(names)
for name in names:
if self._optims[name] is not None:
if not self.use_amp:
self._optims[name].step()
else:
self.scaler.step(self._optims[name])
def model_backward_and_update(self, loss, names=None):
self.model_zero_grad(names)
self.model_backward(loss)
self.model_update(names)
if self.use_amp:
self.scaler.update()
class SimpleTrainer(TrainerBase):
"""A simple trainer class implementing generic functions."""
def __init__(self, cfg):
super().__init__()
self.check_cfg(cfg)
if torch.cuda.is_available() and cfg.USE_CUDA:
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
# use amp to accelerate training
self.use_amp = cfg.TRAIN.USE_AMP
if self.use_amp:
self.scaler = torch.cuda.amp.GradScaler()
# Save as attributes some frequently used variables
self.start_epoch = self.epoch = 0
self.max_epoch = cfg.OPTIM.MAX_EPOCH
self.output_dir = cfg.OUTPUT_DIR
self.cfg = cfg
self.build_data_loader()
self.build_model()
self.evaluator = build_evaluator(cfg, lab2cname=self.dm.lab2cname)
# zhaoxin modify
self.best_val_acc = -np.inf
self.best_test_acc = -np.inf
self.best_val_test_acc = 0
self.best_val_epoch = 0
self.best_test_epoch = 0
def check_cfg(self, cfg):
"""Check whether some variables are set correctly for
the trainer (optional).
For example, a trainer might require a particular sampler
for training such as 'RandomDomainSampler', so it is good
to do the checking:
assert cfg.DATALOADER.SAMPLER_TRAIN == 'RandomDomainSampler'
"""
pass
def build_data_loader(self):
"""Create essential data-related attributes.
What must be done in the re-implementation
of this method:
1) initialize data manager
2) assign as attributes the data loaders
3) assign as attribute the number of classes
"""
self.dm = DataManager(self.cfg)
self.train_loader_x = self.dm.train_loader_x
self.train_loader_u = self.dm.train_loader_u
self.val_loader = self.dm.val_loader
self.test_loader = self.dm.test_loader
self.num_classes = self.dm.num_classes
def build_model(self):
"""Build and register model.
The default builds a classification model along with its
optimizer and scheduler.
Custom trainers can re-implement this method if necessary.
"""
cfg = self.cfg
print('Building model')
self.model = SimpleNet(cfg, cfg.MODEL, self.num_classes)
# for name, module in self.model.named_children():
# print(name)
if cfg.MODEL.INIT_WEIGHTS:
load_pretrained_weights(self.model, cfg.MODEL.INIT_WEIGHTS)
self.model.to(self.device)
print('# params: {:,}'.format(count_num_param(self.model)))
self.optim = build_optimizer(self.model, cfg.OPTIM)
self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)
self.register_model('model', self.model, self.optim, self.sched)
def train(self):
super().train(self.start_epoch, self.max_epoch)
def before_train(self):
# directory = self.cfg.OUTPUT_DIR
if self.cfg.RESUME:
directory = self.cfg.RESUME
self.start_epoch = self.resume_model_if_exist(directory)
# Initialize summary writer
self.init_writer(self.output_dir)
# Remember the starting time (for computing the elapsed time)
self.time_start = time.time()
def after_train(self):
print('Finished training')
do_test = not self.cfg.TEST.NO_TEST
if do_test and not self.cfg.NNI:
if self.cfg.TEST.FINAL_MODEL == 'best_val':
print('Deploy the model with the best val performance')
self.load_model(self.output_dir)
# zhaoxin modify
if self.cfg.TEST.PER_CLASS_RESULT:
self.best_val_test_acc, per_class_accs = self.test(return_per_class_results=True)
perclass_path = osp.join(self.output_dir, 'perclass_result.txt')
with open(perclass_path, 'w') as f:
for acc in per_class_accs:
f.write("{:6f}\n".format(acc))
else:
self.best_val_test_acc = self.test()
# zhaoxin add
if self.cfg.TEST.FINAL_MODEL == 'best_val':
print(
'best_val_acc: {}\nbest_val_epoch: {}\nbest_val_test_acc: {}'.
format(
self.best_val_acc, self.best_val_epoch,
self.best_val_test_acc
)
)
if self.cfg.TEST.TEST_EVERY_EPOCH:
print(
'best_test_acc: {}\nbest_test_epoch: {}'.format(
self.best_test_acc, self.best_test_epoch
)
)
result_path = osp.join(self.output_dir, 'result.txt')
with open(result_path, 'w') as f:
f.write("{:6f}\n".format(self.best_val_test_acc))
if self.cfg.NNI:
nni.report_final_result(self.best_val_acc)
# Show elapsed time
elapsed = round(time.time() - self.time_start)
elapsed = str(datetime.timedelta(seconds=elapsed))
print('Elapsed: {}'.format(elapsed))
# Close writer
self.close_writer()
def after_epoch(self):
last_epoch = (self.epoch + 1) == self.max_epoch
do_test = not self.cfg.TEST.NO_TEST
meet_checkpoint_freq = (
self.epoch + 1
) % self.cfg.TRAIN.CHECKPOINT_FREQ == 0 if self.cfg.TRAIN.CHECKPOINT_FREQ > 0 else False
# zhaoxin modify
if do_test and self.cfg.TEST.FINAL_MODEL == 'best_val':
curr_val_acc = self.test(split='val')
# nni: report intermediate result
if self.cfg.NNI:
nni.report_intermediate_result(curr_val_acc)
is_best = curr_val_acc > self.best_val_acc
if is_best:
self.best_val_acc = curr_val_acc
self.best_val_epoch = self.epoch + 1
self.save_model(
self.epoch,
self.output_dir,
model_name='model-best.pth.tar'
)
if do_test and self.cfg.TEST.TEST_EVERY_EPOCH:
curr_test_acc = self.test(split='test')
if curr_test_acc > self.best_test_acc:
self.best_test_acc = curr_test_acc
self.best_test_epoch = self.epoch + 1
# if self.cfg.TEST.FINAL_MODEL == 'best_val':
# if is_best:
# self.best_val_test_acc = curr_test_acc
if meet_checkpoint_freq or last_epoch:
self.save_model(self.epoch, self.output_dir)
@torch.no_grad()
def test(self, split=None, return_per_class_results=False):
"""A generic testing pipeline."""
self.set_model_mode('eval')
self.evaluator.reset()
if split is None:
split = self.cfg.TEST.SPLIT
if split == 'val' and self.val_loader is not None:
data_loader = self.val_loader
print('Do evaluation on {} set'.format(split))
else:
data_loader = self.test_loader
print('Do evaluation on test set')
for batch_idx, batch in enumerate(data_loader):
input, label = self.parse_batch_test(batch)
output = self.model_inference(input)
self.evaluator.process(output, label)
results = self.evaluator.evaluate()
for k, v in results.items():
if k == 'perclass_accuracies':
continue
tag = '{}/{}'.format(split, k)
self.write_scalar(tag, v, self.epoch)
if not return_per_class_results:
return list(results.values())[0]
else:
return results['accuracy'], results['perclass_accuracies']
def model_inference(self, input):
return self.model(input)
def parse_batch_test(self, batch):
input = batch['img']
label = batch['label']
input = input.to(self.device)
label = label.to(self.device)
return input, label
def get_current_lr(self, names=None):
names = self.get_model_names(names)
name = names[0]
return self._optims[name].param_groups[0]['lr']
class TrainerXU(SimpleTrainer):
"""A base trainer using both labeled and unlabeled data.
In the context of domain adaptation, labeled and unlabeled data
come from source and target domains respectively.
When it comes to semi-supervised learning, all data comes from the
same domain.
"""
def run_epoch(self):
self.set_model_mode('train')
losses = MetricMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
# Decide to iterate over labeled or unlabeled dataset
len_train_loader_x = len(self.train_loader_x)
len_train_loader_u = len(self.train_loader_u)
if self.cfg.TRAIN.COUNT_ITER == 'train_x':
self.num_batches = len_train_loader_x
elif self.cfg.TRAIN.COUNT_ITER == 'train_u':
self.num_batches = len_train_loader_u
elif self.cfg.TRAIN.COUNT_ITER == 'smaller_one':
self.num_batches = min(len_train_loader_x, len_train_loader_u)
else:
raise ValueError
train_loader_x_iter = iter(self.train_loader_x)
train_loader_u_iter = iter(self.train_loader_u)
end = time.time()
for self.batch_idx in range(self.num_batches):
try:
batch_x = next(train_loader_x_iter)
except StopIteration:
train_loader_x_iter = iter(self.train_loader_x)
batch_x = next(train_loader_x_iter)
try:
batch_u = next(train_loader_u_iter)
except StopIteration:
train_loader_u_iter = iter(self.train_loader_u)
batch_u = next(train_loader_u_iter)
data_time.update(time.time() - end)
loss_summary = self.forward_backward(batch_x, batch_u)
batch_time.update(time.time() - end)
losses.update(loss_summary)
if (self.batch_idx + 1) % self.cfg.TRAIN.PRINT_FREQ == 0:
nb_this_epoch = self.num_batches - (self.batch_idx + 1)
nb_future_epochs = (
self.max_epoch - (self.epoch + 1)
) * self.num_batches
eta_seconds = batch_time.avg * (nb_this_epoch+nb_future_epochs)
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
print(
'epoch [{0}/{1}][{2}/{3}]\t'
'time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'eta {eta}\t'
'{losses}\t'
'lr {lr}'.format(
self.epoch + 1,
self.max_epoch,
self.batch_idx + 1,
self.num_batches,
batch_time=batch_time,
data_time=data_time,
eta=eta,
losses=losses,
lr=self.get_current_lr()
)
)
n_iter = self.epoch * self.num_batches + self.batch_idx
for name, meter in losses.meters.items():
self.write_scalar('train/' + name, meter.avg, n_iter)
self.write_scalar('train/lr', self.get_current_lr(), n_iter)
end = time.time()
def parse_batch_train(self, batch_x, batch_u):
input_x = batch_x['img']
label_x = batch_x['label']
input_u = batch_u['img']
input_x = input_x.to(self.device)
label_x = label_x.to(self.device)
input_u = input_u.to(self.device)
return input_x, label_x, input_u
class TrainerX(SimpleTrainer):
"""A base trainer using labeled data only."""
def run_epoch(self):
self.set_model_mode('train')
losses = MetricMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
self.num_batches = len(self.train_loader_x)
end = time.time()
for self.batch_idx, batch in enumerate(self.train_loader_x):
data_time.update(time.time() - end)
loss_summary = self.forward_backward(batch)
batch_time.update(time.time() - end)
losses.update(loss_summary)
if (self.batch_idx + 1) % self.cfg.TRAIN.PRINT_FREQ == 0:
nb_this_epoch = self.num_batches - (self.batch_idx + 1)
nb_future_epochs = (
self.max_epoch - (self.epoch + 1)
) * self.num_batches
eta_seconds = batch_time.avg * (nb_this_epoch+nb_future_epochs)
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
print(
'epoch [{0}/{1}][{2}/{3}]\t'
'time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'eta {eta}\t'
'{losses}\t'
'lr {lr}'.format(
self.epoch + 1,
self.max_epoch,
self.batch_idx + 1,
self.num_batches,
batch_time=batch_time,
data_time=data_time,
eta=eta,
losses=losses,
lr=self.get_current_lr()
)
)
n_iter = self.epoch * self.num_batches + self.batch_idx
for name, meter in losses.meters.items():
self.write_scalar('train/' + name, meter.avg, n_iter)
self.write_scalar('train/lr', self.get_current_lr(), n_iter)
end = time.time()
def parse_batch_train(self, batch):
input = batch['img']
label = batch['label']
domain = batch['domain']
input = input.to(self.device)
label = label.to(self.device)
domain = domain.to(self.device)
return input, label, domain
| 33.30226 | 97 | 0.57015 | [
"MIT"
] | zhaoxin94/Dassl.pytorch | dassl/engine/trainer.py | 23,578 | Python |
import json
import os
import pickle
import requests
import shutil
import tempfile
import uuid
from flask import Blueprint, current_app, jsonify, request, send_file
name = 'HTTP'
prefix = 'http'
storage_enabled = True
global storage_path
plugin = Blueprint(name, __name__)
def register(app, plugin_storage_path=None):
app.register_blueprint(plugin, url_prefix=f'/{prefix}')
app.logger.info(f'{name} plugin registered.')
global storage_path
storage_path = plugin_storage_path
persistence = {
"configuration": {},
"execution": {},
}
result_zip_file_name = 'results.zip'
@plugin.route('/')
def index():
return f'This is the Radon CTT Agent HTTP Plugin.', 200
@plugin.route('/configuration/', methods=['POST'])
def configuration_create():
config_instance = {}
configuration_uuid = str(uuid.uuid4())
config_instance['uuid'] = configuration_uuid
params = {
'use_https': {
'required': True,
'default': False,
},
'method': {
'required': True,
'default': 'GET',
},
'hostname': {
'required': True,
'default': None,
},
'port': {
'required': True,
'default': 80,
},
'path': {
'required': True,
'default': "/",
},
'test_body': {
'required': False,
'default': None,
},
'test_header': {
'required': False,
'default': None,
},
}
for param in params:
is_required = params[param]['required']
default_value = params[param]['default']
if param in request.form:
value = request.form.get(param, type=str)
current_app.logger.info(f'\'{param}\' set to: \'{value}\'.')
config_instance[param] = value
else:
if is_required and default_value is not None:
value = default_value
current_app.logger.info(f'\'{param}\' set to default value: \'{value}\'.')
config_instance[param] = value
if is_required and param not in config_instance:
current_app.logger.error(f"Required parameter {param} not provided.")
return f'Required parameter {param} not provided.', 400
persistence['configuration'][configuration_uuid] = config_instance
current_app.logger.info(f"Config: {config_instance}")
return jsonify(config_instance), 201
@plugin.route('/execution/', methods=['POST'])
def execution():
execution_instance = {}
if 'config_uuid' in request.form:
config_uuid = request.form['config_uuid']
config_entry = persistence['configuration'][config_uuid]
execution_instance['config'] = config_entry
# Assign values from config if they are stored in the config, otherwise assign None
use_https = bool(config_entry['use_https']) if 'use_https' in config_entry else None
method = str(config_entry['method']).upper() if 'method' in config_entry else None
hostname = str(config_entry['hostname']) if 'hostname' in config_entry else None
port = int(config_entry['port']) if 'port' in config_entry else None
path = str(config_entry['path']) if 'path' in config_entry else None
test_body = config_entry['test_body'] if 'test_body' in config_entry else None
test_header = config_entry['test_header'] if 'test_header' in config_entry else None
# Check if required parameters are set
if use_https is not None and method and hostname and port and path:
protocol = 'http'
if use_https:
protocol += 's'
target_url = f'{protocol}://{hostname}:{port}{path}'
# Send request with given parameters
response = requests.request(method, target_url, headers=test_header, json=test_body)
response_status = response.status_code
# Create UUID for execution
execution_uuid = str(uuid.uuid4())
execution_instance['uuid'] = execution_uuid
execution_instance['target_url'] = target_url
execution_instance['status'] = str(response_status)
persistence['execution'][execution_uuid] = execution_instance
execution_results_dir = os.path.join(storage_path, execution_uuid)
os.makedirs(execution_results_dir)
execution_json = os.path.join(execution_results_dir, 'execution.json')
received_response = os.path.join(execution_results_dir, 'response.bin')
with open(execution_json, 'w') as exec_json:
exec_json.write(json.dumps(execution_instance))
with open(received_response, 'wb') as response_bin:
response_bin.write(pickle.dumps(response))
with tempfile.NamedTemporaryFile() as tf:
tmp_zip_file = shutil.make_archive(tf.name, 'zip', execution_results_dir)
shutil.copy2(tmp_zip_file, os.path.join(execution_results_dir, result_zip_file_name))
# Test was executed with any possible outcome
return jsonify(execution_instance), 200
else:
return "Required configuration parameters are missing.", jsonify(config_entry), 400
else:
return "No configuration with that ID found.", jsonify(persistence), 404
# Get execution results
@plugin.route('/execution/<string:exec_uuid>/', methods=['GET'])
def execution_results(exec_uuid):
try:
execution_uuid = persistence.get('execution').get(exec_uuid).get('uuid')
except AttributeError:
return "No execution found with that ID.", 404
results_zip_path = os.path.join(storage_path, execution_uuid, result_zip_file_name)
if os.path.isfile(results_zip_path):
return send_file(results_zip_path)
else:
return "No results available (yet).", 404
| 33.661017 | 101 | 0.630916 | [
"Apache-2.0"
] | radon-h2020/radon-ctt-agent-plugins | http/__init__.py | 5,958 | Python |
from trixi.logger.file.numpyplotfilelogger import NumpyPlotFileLogger
from trixi.logger.file.pytorchplotfilelogger import PytorchPlotFileLogger
from trixi.logger.file.textfilelogger import TextFileLogger
| 51 | 73 | 0.897059 | [
"MIT"
] | comeonfox/trixi | trixi/logger/file/__init__.py | 204 | Python |
"""
Copyright 2021 K.M Ahnaf Zamil
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from urllib.parse import parse_qs
import typing
__all__: typing.Final = ["Request", "ImmutableDict", "_Redirect", "Endpoint"]
class _Redirect(object):
"""Just an object for simulating a redirect"""
def __init__(self, url: str) -> None:
self.url = url
class ImmutableDict(dict):
"""An immutable dictionary implementation for query arguments and form data"""
def __setitem__(self, k, v) -> None:
raise ValueError("ImmutableDict object cannot be modified (immutable)")
class Request(object):
"""An object that contains information related to the HTTP request"""
def __init__(self, environ):
self._environ = environ
@property
def method(self) -> str:
"""HTTP method used for the request"""
return self._environ["REQUEST_METHOD"]
@property
def endpoint(self) -> str:
"""The route/endpoint used for that specific request"""
return self._environ["PATH_INFO"]
@property
def query_args(self) -> ImmutableDict:
"""Query arguments from the request"""
args = self._environ["QUERY_STRING"]
if not args:
return ImmutableDict({})
args = args.split("&")
query_args = {}
for _arg in args:
name, value = _arg.split("=")
query_args[name] = value
return ImmutableDict(query_args)
@property
def form(self) -> typing.Optional[typing.Dict]:
"""Form data sent via HTTP request"""
data = self._environ.get("wsgi.input") # Returns io.BytesIO object
if data:
form_dict = parse_qs(data.getvalue().decode("utf-8"))
final_dict = {}
for k, v in form_dict.items():
final_dict[k] = v[0] # Since v is list containing the form data
return ImmutableDict(final_dict)
def __str__(self):
return f'<Request endpoint="{self.endpoint}" method="{self.method}">'
class Endpoint(object):
def __init__(self, route, func) -> None:
self.route = route
self.extension = None
self._func = func
def __call__(self, request: Request):
return self._func(request)
| 36.426966 | 460 | 0.677051 | [
"MIT"
] | ahnaf-zamil/pogweb | pogweb/models.py | 3,242 | Python |
from mizarlabs.transformers.technical.rsi import BarArrivalRSIStrategy
from mizarlabs.transformers.technical.rsi import RSIConfirmation
def test_bar_arrival_rsi_strategy_predict(dollar_bar_dataframe):
rsi_upper_threshold = 55
rsi_lower_threshold = 45
bar_arrival_upper_threshold = 0
bar_arrival_lower_threshold = -0.2
rsi_timeperiod = 25
bar_arrival_fast_period = 500
bar_arrival_slow_period = 200
max_bar_arrival_mean_diff = 10000000000000
bar_arrival_rsi_strategy = BarArrivalRSIStrategy(
rsi_upper_threshold,
rsi_lower_threshold,
bar_arrival_upper_threshold,
bar_arrival_lower_threshold,
rsi_timeperiod,
bar_arrival_fast_period,
bar_arrival_slow_period,
max_bar_arrival_mean_diff,
)
preds = bar_arrival_rsi_strategy.predict(dollar_bar_dataframe)
assert set(preds).issubset(bar_arrival_rsi_strategy.classes_)
def test_bar_arrival_rsi_strategy_predict_proba(dollar_bar_dataframe):
rsi_upper_threshold = 55
rsi_lower_threshold = 45
bar_arrival_upper_threshold = 0
bar_arrival_lower_threshold = -0.2
rsi_timeperiod = 25
bar_arrival_fast_period = 500
bar_arrival_slow_period = 200
max_bar_arrival_mean_diff = 10000000000000
bar_arrival_rsi_strategy = BarArrivalRSIStrategy(
rsi_upper_threshold,
rsi_lower_threshold,
bar_arrival_upper_threshold,
bar_arrival_lower_threshold,
rsi_timeperiod,
bar_arrival_fast_period,
bar_arrival_slow_period,
max_bar_arrival_mean_diff,
)
preds = bar_arrival_rsi_strategy.predict(dollar_bar_dataframe)
pred_proba = bar_arrival_rsi_strategy.predict_proba(dollar_bar_dataframe)
pred_to_idx_map = {0: 0, 1: 1}
assert all(pred_proba.sum(axis=1) == 1)
assert all(pred_proba[i, pred_to_idx_map[p]] == 1 for i, p in enumerate(preds))
def test_rsi_confirmation_predict(dollar_bar_dataframe):
rsi_threshold = 45
rsi_timeperiod = 25
rsi_confirmation_period = 2
rsi_moving_average_window = 2
bar_arrival_rsi_strategy = RSIConfirmation(
rsi_timeperiod,
rsi_threshold,
rsi_confirmation_period,
rsi_moving_average_window,
)
preds = bar_arrival_rsi_strategy.predict(dollar_bar_dataframe)
assert set(preds).issubset(bar_arrival_rsi_strategy.classes_)
def test_rsi_confirmation_predict_proba(dollar_bar_dataframe):
rsi_threshold = 45
rsi_timeperiod = 25
rsi_confirmation_period = 2
rsi_moving_average_window = 2
bar_arrival_rsi_strategy = RSIConfirmation(
rsi_timeperiod,
rsi_threshold,
rsi_confirmation_period,
rsi_moving_average_window,
)
preds = bar_arrival_rsi_strategy.predict(dollar_bar_dataframe)
pred_proba = bar_arrival_rsi_strategy.predict_proba(dollar_bar_dataframe)
pred_to_idx_map = {0: 0, 1: 1}
assert all(pred_proba.sum(axis=1) == 1)
assert all(pred_proba[i, pred_to_idx_map[p]] == 1 for i, p in enumerate(preds))
| 35.174419 | 83 | 0.753719 | [
"MIT"
] | MizarAI/mizar-labs | mizarlabs/tests/transformers/test_rsi.py | 3,025 | Python |
# Scrapy settings for amzASINScrapper project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'amzASINScrapper'
SPIDER_MODULES = ['amzASINScrapper.spiders']
NEWSPIDER_MODULE = 'amzASINScrapper.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'amzASINScrapper (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'amzASINScrapper.middlewares.AmzasinscrapperSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'amzASINScrapper.middlewares.AmzasinscrapperDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'amzASINScrapper.pipelines.AmzasinscrapperPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 35.460674 | 103 | 0.78327 | [
"MIT"
] | sunil-dhaka/python-webScrappers | amzASINScrapper/amzASINScrapper/settings.py | 3,156 | Python |
"""Test cases around the demo fan platform."""
import pytest
from homeassistant.setup import async_setup_component
from homeassistant.components import fan
from homeassistant.const import STATE_OFF, STATE_ON
from tests.components.fan import common
FAN_ENTITY_ID = 'fan.living_room_fan'
def get_entity(hass):
"""Get the fan entity."""
return hass.states.get(FAN_ENTITY_ID)
@pytest.fixture(autouse=True)
def setup_comp(hass):
"""Initialize components."""
hass.loop.run_until_complete(async_setup_component(hass, fan.DOMAIN, {
'fan': {
'platform': 'demo',
}
}))
async def test_turn_on(hass):
"""Test turning on the device."""
assert STATE_OFF == get_entity(hass).state
await common.async_turn_on(hass, FAN_ENTITY_ID)
assert STATE_OFF != get_entity(hass).state
await common.async_turn_on(hass, FAN_ENTITY_ID, fan.SPEED_HIGH)
assert STATE_ON == get_entity(hass).state
assert fan.SPEED_HIGH == \
get_entity(hass).attributes[fan.ATTR_SPEED]
async def test_turn_off(hass):
"""Test turning off the device."""
assert STATE_OFF == get_entity(hass).state
await common.async_turn_on(hass, FAN_ENTITY_ID)
assert STATE_OFF != get_entity(hass).state
await common.async_turn_off(hass, FAN_ENTITY_ID)
assert STATE_OFF == get_entity(hass).state
async def test_turn_off_without_entity_id(hass):
"""Test turning off all fans."""
assert STATE_OFF == get_entity(hass).state
await common.async_turn_on(hass, FAN_ENTITY_ID)
assert STATE_OFF != get_entity(hass).state
await common.async_turn_off(hass)
assert STATE_OFF == get_entity(hass).state
async def test_set_direction(hass):
"""Test setting the direction of the device."""
assert STATE_OFF == get_entity(hass).state
await common.async_set_direction(hass, FAN_ENTITY_ID,
fan.DIRECTION_REVERSE)
assert fan.DIRECTION_REVERSE == \
get_entity(hass).attributes.get('direction')
async def test_set_speed(hass):
"""Test setting the speed of the device."""
assert STATE_OFF == get_entity(hass).state
await common.async_set_speed(hass, FAN_ENTITY_ID, fan.SPEED_LOW)
assert fan.SPEED_LOW == \
get_entity(hass).attributes.get('speed')
async def test_oscillate(hass):
"""Test oscillating the fan."""
assert not get_entity(hass).attributes.get('oscillating')
await common.async_oscillate(hass, FAN_ENTITY_ID, True)
assert get_entity(hass).attributes.get('oscillating')
await common.async_oscillate(hass, FAN_ENTITY_ID, False)
assert not get_entity(hass).attributes.get('oscillating')
async def test_is_on(hass):
"""Test is on service call."""
assert not fan.is_on(hass, FAN_ENTITY_ID)
await common.async_turn_on(hass, FAN_ENTITY_ID)
assert fan.is_on(hass, FAN_ENTITY_ID)
| 29.050505 | 74 | 0.714882 | [
"Apache-2.0"
] | ActuallyRuben/home-assistant | tests/components/demo/test_fan.py | 2,876 | Python |
from brownie import AdvancedCollectible, network
import pytest
from scripts.advanced_collectible.deploy_and_create import deploy_and_create, get_contract
from scripts.utils.helpful_scripts import LOCAL_BLOCKCHAIN_ENVIRONMENTS, get_account
def test_can_create_advanced_collectible():
if network.show_active() not in LOCAL_BLOCKCHAIN_ENVIRONMENTS:
pytest.skip("Only for local testing")
advanced_collectible, creation_transaction = deploy_and_create()
# getting the requestId value from the requestedCollectible event
requestId = creation_transaction.events["requestedCollectible"]["requestId"]
randomNumber = 777
get_contract("vrf_coordinator").callBackWithRandomness(
requestId, randomNumber, advanced_collectible.address, {"from": get_account()})
assert advanced_collectible.tokenCounter() == 1
assert advanced_collectible.tokenIdToBreed(0) == randomNumber % 3
| 48 | 90 | 0.804825 | [
"MIT"
] | Sam44323/nft-mix-opensea | tests/unit/test_advanced_collectible.py | 912 | Python |
# Digital OCEAN FLASK SERVER RECEIVES IMAGE
from flask import Flask, request, jsonify
import classify
import base64
import json
import firebase
import env
# Instantiate Flask
app = Flask(__name__)
# health check
@app.route("/status")
def health_check():
return "Running!"
# Performing image Recognition on Image, sent as bytes via POST payload
@app.route("/detect", methods=["POST"])
def detect():
imgBytes = request.data
imgdata = base64.b64decode(imgBytes)
with open("temp.png", "wb") as f:
f.write(imgdata)
print("successfully receieved image")
# Pass image bytes to classifier
result = classify.analyse("temp.png")
# Return results as neat JSON object, using
result = jsonify(result)
print(result.json)
response_data = result.json
print(response_data)
db = firebase.Firebase()
db.authenticate()
db.push(response_data)
print("Updated Firebase.")
return result
if __name__ == "__main__":
app.run(host="0.0.0.0", port=80, debug=True)
| 20.196078 | 71 | 0.691262 | [
"MIT"
] | flamanta/river-trash-detection | app.py | 1,030 | Python |
#!/usr/bin/env python3
import os.path
import tensorflow as tf
import helper
import warnings
from distutils.version import LooseVersion
import project_tests as tests
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def load_vgg(sess, vgg_path):
"""
Load Pretrained VGG Model into TensorFlow.
:param sess: TensorFlow Session
:param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"
:return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)
"""
# TODO: Implement function
# Use tf.saved_model.loader.load to load the model and weights
vgg_tag = 'vgg16'
tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)
vgg_input_tensor_name = 'image_input:0'
vgg_keep_prob_tensor_name = 'keep_prob:0'
vgg_layer3_out_tensor_name = 'layer3_out:0'
vgg_layer4_out_tensor_name = 'layer4_out:0'
vgg_layer7_out_tensor_name = 'layer7_out:0'
graph = tf.get_default_graph()
input_img = graph.get_tensor_by_name(vgg_input_tensor_name)
prob = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)
layer3_o = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)
layer4_o = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)
layer7_o = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)
return input_img, prob, layer3_o, layer4_o, layer7_o
tests.test_load_vgg(load_vgg, tf)
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):
"""
Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.
:param vgg_layer3_out: TF Tensor for VGG Layer 3 output
:param vgg_layer4_out: TF Tensor for VGG Layer 4 output
:param vgg_layer7_out: TF Tensor for VGG Layer 7 output
:param num_classes: Number of classes to classify
:return: The Tensor for the last layer of output
"""
# TODO: Implement function
# 1x1 convolution layer with road / not-road features only
conv_1by1_l7 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding='SAME',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# upscaling size/ add features
output = tf.layers.conv2d_transpose(conv_1by1_l7, 512, 4, strides=(2,2), padding='SAME',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# skip connections / add to upscaled output
output = tf.add(output, vgg_layer4_out)
# upscaling size/ reduce features
output = tf.layers.conv2d_transpose(output, 256, 4, strides=(2,2), padding='SAME',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# skip connections / add to upscaled output
output = tf.add(output, vgg_layer3_out)
# upscaling size/ reduce features to road OR not-road
output = tf.layers.conv2d_transpose(output, num_classes, 16, strides=(8,8), padding='SAME',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3), name='nn_final_output')
return output
tests.test_layers(layers)
def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
"""
Build the TensorFLow loss and optimizer operations.
:param nn_last_layer: TF Tensor of the last layer in the neural network
:param correct_label: TF Placeholder for the correct label image
:param learning_rate: TF Placeholder for the learning rate
:param num_classes: Number of classes to classify
:return: Tuple of (logits, train_op, cross_entropy_loss)
"""
# TODO: Implement function
logits = tf.reshape(nn_last_layer, (-1, num_classes))
# add loss function
cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# training_op
training_operation = optimizer.minimize(cross_entropy_loss)
return logits, training_operation, cross_entropy_loss
tests.test_optimize(optimize)
def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,
correct_label, keep_prob, learning_rate):
"""
Train neural network and print out the loss during training.
:param sess: TF Session
:param epochs: Number of epochs
:param batch_size: Batch size
:param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)
:param train_op: TF Operation to train the neural network
:param cross_entropy_loss: TF Tensor for the amount of loss
:param input_image: TF Placeholder for input images
:param correct_label: TF Placeholder for label images
:param keep_prob: TF Placeholder for dropout keep probability
:param learning_rate: TF Placeholder for learning rate
"""
# TODO: Implement function
# initialize global variables
sess.run(tf.global_variables_initializer())
# going through the batches of images i.e. epoch
for epoch in range(epochs):
for (input_img, gt_img) in get_batches_fn(batch_size):
_, loss = sess.run([train_op, cross_entropy_loss], feed_dict={input_image: input_img,
correct_label: gt_img,
keep_prob: 0.7,
learning_rate: 5e-04})
print("Loss of {} at epoch {}/{}".format(loss, epoch, epochs))
tests.test_train_nn(train_nn)
def run():
num_classes = 2
image_shape = (160, 576) # KITTI dataset uses 160x576 images
data_dir = './data'
runs_dir = './runs'
tests.test_for_kitti_dataset(data_dir)
# Download pretrained vgg model
helper.maybe_download_pretrained_vgg(data_dir)
# OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
# You'll need a GPU with at least 10 teraFLOPS to train on.
# https://www.cityscapes-dataset.com/
epochs = 20
batch_size = 5
with tf.Session() as sess:
# Path to vgg model
vgg_path = os.path.join(data_dir, 'vgg')
# Create function to get batches
get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)
# OPTIONAL: Augment Images for better results
# https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network
correct_label = tf.placeholder(tf.int32)
learning_rate = tf.placeholder(tf.float32)
# TODO: Build NN using load_vgg, layers, and optimize function
input_img, keep_prob, layer3_o, layer4_o, layer7_o = load_vgg(sess, vgg_path)
layer_output = layers(layer3_o, layer4_o, layer7_o, num_classes)
logits, train_op, cross_entropy_loss = optimize(layer_output, correct_label, learning_rate, num_classes)
# TODO: Train NN using the train_nn function
train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_img,
correct_label, keep_prob, learning_rate)
# TODO: Save inference data using helper.save_inference_samples
helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_img)
# OPTIONAL: Apply the trained model to a video
if __name__ == '__main__':
run()
| 41.891892 | 146 | 0.711613 | [
"MIT"
] | papaispicolo/CarNDT3-SemanticSegmentation | main.py | 7,750 | Python |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CIFAR10 small image classification dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras.datasets.cifar import load_batch
from tensorflow.python.keras._impl.keras.utils.data_utils import get_file
def load_data():
"""Loads CIFAR10 dataset.
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
dirname = 'cifar-10-batches-py'
origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
path = get_file(dirname, origin=origin, untar=True)
num_train_samples = 50000
x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')
y_train = np.empty((num_train_samples,), dtype='uint8')
for i in range(1, 6):
fpath = os.path.join(path, 'data_batch_' + str(i))
(x_train[(i - 1) * 10000:i * 10000, :, :, :],
y_train[(i - 1) * 10000:i * 10000]) = load_batch(fpath)
fpath = os.path.join(path, 'test_batch')
x_test, y_test = load_batch(fpath)
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if K.image_data_format() == 'channels_last':
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
return (x_train, y_train), (x_test, y_test)
| 34.262295 | 80 | 0.691388 | [
"Apache-2.0"
] | 252125889/tensorflow | tensorflow/python/keras/_impl/keras/datasets/cifar10.py | 2,090 | Python |
"""
Django settings for scannerKH project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '74lbuyy!_ihecg*uh8i9^j!wq3gc_)vv$55!h&0yon03f2%c$$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'scanner.apps.ScannerConfig',
'user.apps.UserConfig',
'grosshaendler.apps.GrosshaendlerConfig',
'artikel.apps.ArtikelConfig',
'bestellung.apps.BestellungConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'scannerKH.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'scannerKH.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL = 'user.User'
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'de'
TIME_ZONE = 'CEST'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| 25.651163 | 91 | 0.699003 | [
"MIT"
] | JanGut/scannerKH | scannerKH/scannerKH/settings.py | 3,309 | Python |
from .draw_graph import draw_graph
| 17.5 | 34 | 0.857143 | [
"Apache-2.0"
] | IntelLabs/causality-lab | plot_utils/__init__.py | 35 | Python |
# -*- coding: utf-8 -*-
import scrapy
import re
import json
from locations.hourstudy import inputoutput
class AldiUKSpider(scrapy.Spider):
name = "aldiuk"
allowed_domains = ['www.aldi.co.uk']
start_urls = (
'https://www.aldi.co.uk/sitemap/store',
)
def parse(self, response):
response.selector.remove_namespaces()
city_urls = response.xpath('//url/loc/text()').extract()
for path in city_urls:
yield scrapy.Request(
path.strip(),
callback=self.parse_store,
)
else:
pass
def parse_store(self, response):
json_data = response.xpath('//script[@type="text/javascript"]/text()').extract_first().replace('\n','').replace('\t','').split('.push(')[1].rstrip(')')
data = json.loads(json_data)
geojson_data = response.xpath('//script[@class="js-store-finder-initial-state"][@type="application/json"]/text()').extract_first()
geodata = json.loads(geojson_data)
# properties = {
# 'name': data['seoData']['name'],
# 'ref': data['seoData']['name'],
# 'addr_full': data['seoData']['address']['streetAddress'],
# 'city': data['seoData']['address']['addressLocality'],
# 'postcode': data['seoData']['address']['postalCode'],
# 'country': data['seoData']['address']['addressCountry'],
# 'website': response.request.url,
# 'opening_hours': str(data['seoData']['openingHours']).replace('[','').replace(']','').replace("'",''),
# 'lat': float(geodata['store']['latlng']['lat']),
# 'lon': float(geodata['store']['latlng']['lng']),
# }
raw = str(data['seoData']['openingHours'])
formatted = str(data['seoData']['openingHours']).replace('[','').replace(']','').replace("'",'')
yield inputoutput(raw,formatted)
| 39.125 | 159 | 0.571353 | [
"MIT"
] | bealbrown/allhours | locations/spiders/aldi_uk.py | 1,878 | Python |
"""
Django settings for api_drf project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2p$!i%#w$3e9(l3v4#%_#fi2_fae2l7ksdsd+1*vrc6_#8_@_*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'mainService'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'api_drf.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'api_drf.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
# When you enable API versioning, the request.version attribute will contain a string
# that corresponds to the version requested in the incoming client request.
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.URLPathVersioning',
} | 26.570313 | 91 | 0.701558 | [
"MIT"
] | kaparis/spa101 | api_drf/api_drf/settings.py | 3,401 | Python |
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2019 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import debug
from tech import drc, parameter, spice
from abc import ABC, abstractmethod
from .stimuli import *
from .charutils import *
class spice_measurement(ABC):
"""Base class for spice stimulus measurements."""
def __init__(self, measure_name, measure_scale=None, has_port=True):
#Names must be unique for correct spice simulation, but not enforced here.
self.name = measure_name
self.measure_scale = measure_scale
self.has_port = has_port #Needed for error checking
#Some meta values used externally. variables are added here for consistency accross the objects
self.meta_str = None
self.meta_add_delay = False
@abstractmethod
def get_measure_function(self):
return None
@abstractmethod
def get_measure_values(self):
return None
def write_measure(self, stim_obj, input_tuple):
measure_func = self.get_measure_function()
if measure_func == None:
debug.error("Did not set measure function",1)
measure_vals = self.get_measure_values(*input_tuple)
measure_func(stim_obj, *measure_vals)
def retrieve_measure(self, port=None):
self.port_error_check(port)
if port != None:
value = parse_spice_list("timing", "{0}{1}".format(self.name.lower(), port))
else:
value = parse_spice_list("timing", "{0}".format(self.name.lower()))
if type(value)!=float or self.measure_scale == None:
return value
else:
return value*self.measure_scale
def port_error_check(self, port):
if self.has_port and port == None:
debug.error("Cannot retrieve measurement, port input was expected.",1)
elif not self.has_port and port != None:
debug.error("Unexpected port input received during measure retrieval.",1)
class delay_measure(spice_measurement):
"""Generates a spice measurement for the delay of 50%-to-50% points of two signals."""
def __init__(self, measure_name, trig_name, targ_name, trig_dir_str, targ_dir_str,\
trig_vdd=0.5, targ_vdd=0.5, measure_scale=None, has_port=True):
spice_measurement.__init__(self, measure_name, measure_scale, has_port)
self.set_meas_constants(trig_name, targ_name, trig_dir_str, targ_dir_str, trig_vdd, targ_vdd)
def get_measure_function(self):
return stimuli.gen_meas_delay
def set_meas_constants(self, trig_name, targ_name, trig_dir_str, targ_dir_str, trig_vdd, targ_vdd):
"""Set the constants for this measurement: signal names, directions, and trigger scales"""
self.trig_dir_str = trig_dir_str
self.targ_dir_str = targ_dir_str
self.trig_val_of_vdd = trig_vdd
self.targ_val_of_vdd = targ_vdd
self.trig_name_no_port = trig_name
self.targ_name_no_port = targ_name
#Time delays and ports are variant and needed as inputs when writing the measurement
def get_measure_values(self, trig_td, targ_td, vdd_voltage, port=None):
"""Constructs inputs to stimulus measurement function. Variant values are inputs here."""
self.port_error_check(port)
trig_val = self.trig_val_of_vdd * vdd_voltage
targ_val = self.targ_val_of_vdd * vdd_voltage
if port != None:
#For dictionary indexing reasons, the name is formatted differently than the signals
meas_name = "{}{}".format(self.name, port)
trig_name = self.trig_name_no_port.format(port)
targ_name = self.targ_name_no_port.format(port)
else:
meas_name = self.name
trig_name = self.trig_name_no_port
targ_name = self.targ_name_no_port
return (meas_name,trig_name,targ_name,trig_val,targ_val,self.trig_dir_str,self.targ_dir_str,trig_td,targ_td)
class slew_measure(delay_measure):
def __init__(self, measure_name, signal_name, slew_dir_str, measure_scale=None, has_port=True):
spice_measurement.__init__(self, measure_name, measure_scale, has_port)
self.set_meas_constants(signal_name, slew_dir_str)
def set_meas_constants(self, signal_name, slew_dir_str):
"""Set the values needed to generate a Spice measurement statement based on the name of the measurement."""
self.trig_dir_str = slew_dir_str
self.targ_dir_str = slew_dir_str
if slew_dir_str == "RISE":
self.trig_val_of_vdd = 0.1
self.targ_val_of_vdd = 0.9
elif slew_dir_str == "FALL":
self.trig_val_of_vdd = 0.9
self.targ_val_of_vdd = 0.1
else:
debug.error("Unrecognised slew measurement direction={}".format(slew_dir_str),1)
self.trig_name_no_port = signal_name
self.targ_name_no_port = signal_name
#Time delays and ports are variant and needed as inputs when writing the measurement
class power_measure(spice_measurement):
"""Generates a spice measurement for the average power between two time points."""
def __init__(self, measure_name, power_type="", measure_scale=None, has_port=True):
spice_measurement.__init__(self, measure_name, measure_scale, has_port)
self.set_meas_constants(power_type)
def get_measure_function(self):
return stimuli.gen_meas_power
def set_meas_constants(self, power_type):
"""Sets values useful for power simulations. This value is only meta related to the lib file (rise/fall)"""
#Not needed for power simulation
self.power_type = power_type #Expected to be "RISE"/"FALL"
def get_measure_values(self, t_initial, t_final, port=None):
"""Constructs inputs to stimulus measurement function. Variant values are inputs here."""
self.port_error_check(port)
if port != None:
meas_name = "{}{}".format(self.name, port)
else:
meas_name = self.name
return (meas_name,t_initial,t_final)
class voltage_when_measure(spice_measurement):
"""Generates a spice measurement to measure the voltage of a signal based on the voltage of another."""
def __init__(self, measure_name, trig_name, targ_name, trig_dir_str, trig_vdd, measure_scale=None, has_port=True):
spice_measurement.__init__(self, measure_name, measure_scale, has_port)
self.set_meas_constants(trig_name, targ_name, trig_dir_str, trig_vdd)
def get_measure_function(self):
return stimuli.gen_meas_find_voltage
def set_meas_constants(self, trig_name, targ_name, trig_dir_str, trig_vdd):
"""Sets values useful for power simulations. This value is only meta related to the lib file (rise/fall)"""
self.trig_dir_str = trig_dir_str
self.trig_val_of_vdd = trig_vdd
self.trig_name_no_port = trig_name
self.targ_name_no_port = targ_name
def get_measure_values(self, trig_td, vdd_voltage, port=None):
"""Constructs inputs to stimulus measurement function. Variant values are inputs here."""
self.port_error_check(port)
if port != None:
#For dictionary indexing reasons, the name is formatted differently than the signals
meas_name = "{}{}".format(self.name, port)
trig_name = self.trig_name_no_port.format(port)
targ_name = self.targ_name_no_port.format(port)
else:
meas_name = self.name
trig_name = self.trig_name_no_port
targ_name = self.targ_name_no_port
trig_voltage = self.trig_val_of_vdd*vdd_voltage
return (meas_name,trig_name,targ_name,trig_voltage,self.trig_dir_str,trig_td)
class voltage_at_measure(spice_measurement):
"""Generates a spice measurement to measure the voltage at a specific time.
The time is considered variant with different periods."""
def __init__(self, measure_name, targ_name, measure_scale=None, has_port=True):
spice_measurement.__init__(self, measure_name, measure_scale, has_port)
self.set_meas_constants(targ_name)
def get_measure_function(self):
return stimuli.gen_meas_find_voltage_at_time
def set_meas_constants(self, targ_name):
"""Sets values useful for power simulations. This value is only meta related to the lib file (rise/fall)"""
self.targ_name_no_port = targ_name
def get_measure_values(self, time_at, port=None):
"""Constructs inputs to stimulus measurement function. Variant values are inputs here."""
self.port_error_check(port)
if port != None:
#For dictionary indexing reasons, the name is formatted differently than the signals
meas_name = "{}{}".format(self.name, port)
targ_name = self.targ_name_no_port.format(port)
else:
meas_name = self.name
targ_name = self.targ_name_no_port
return (meas_name,targ_name,time_at)
| 45.356436 | 118 | 0.697337 | [
"BSD-3-Clause"
] | ckdur/OpenRAM | compiler/characterizer/measurements.py | 9,162 | Python |
from setuptools import find_packages, setup
NAME = "popmon"
MAJOR = 0
REVISION = 3
PATCH = 8
DEV = False
# NOTE: also update version at: README.rst
with open("requirements.txt") as f:
REQUIREMENTS = f.read().splitlines()
# read the contents of abstract file
with open("README.rst", encoding="utf-8") as f:
long_description = f.read()
VERSION = "{major}.{revision}.{patch}".format(
major=MAJOR, revision=REVISION, patch=PATCH
)
FULL_VERSION = VERSION
if DEV:
FULL_VERSION += ".dev"
with open("requirements-test.txt") as f:
REQUIREMENTS += f.read().splitlines()
def write_version_py(filename: str = "popmon/version.py") -> None:
"""Write package version to version.py.
This will ensure that the version in version.py is in sync with us.
:param filename: The version.py to write too.
:type filename: str
"""
# Do not modify the indentation of version_str!
version_str = """\"\"\"THIS FILE IS AUTO-GENERATED BY SETUP.PY.\"\"\"
name = \"{name!s}\"
version = \"{version!s}\"
full_version = \"{full_version!s}\"
release = {is_release!s}
"""
with open(filename, "w") as version_file:
version_file.write(
version_str.format(
name=NAME.lower(),
version=VERSION,
full_version=FULL_VERSION,
is_release=not DEV,
)
)
def setup_package() -> None:
"""The main setup method.
It is responsible for setting up and installing the package.
"""
write_version_py()
setup(
name=NAME,
version=VERSION,
url="https://github.com/ing-bank/popmon",
license="MIT",
author="ING Wholesale Banking Advanced Analytics",
description="Monitor the stability of a pandas or spark dataset",
keywords="pandas spark data-science data-analysis monitoring statistics python jupyter ipython",
long_description=long_description,
long_description_content_type="text/x-rst",
python_requires=">=3.6",
packages=find_packages(),
install_requires=REQUIREMENTS,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
# files to be shipped with the installation, under: popmon/popmon/
# after installation, these can be found with the functions in resources.py
package_data=dict(
popmon=[
"visualization/templates/*.html",
"visualization/templates/assets/css/*.css",
"visualization/templates/assets/js/*.js",
"test_data/*.csv.gz",
"test_data/*.json*",
"notebooks/popmon*tutorial*.ipynb",
]
),
entry_points={
"console_scripts": ["popmon_run = popmon.pipeline.amazing_pipeline:run"]
},
)
if __name__ == "__main__":
setup_package()
| 29.633663 | 104 | 0.610758 | [
"MIT"
] | stephanecollot/popmon | setup.py | 2,993 | Python |
import platform
import subprocess
import sys
from optparse import OptionParser
from util import fileLogger
from util import logger
def parse_start_arguments():
parser = OptionParser()
parser.add_option("--unityPath", dest="UnityPath", default=True, help="Path to Unity application")
parser.add_option("--projectPath", dest="ProjectPath", default=True, help="Path to Unity Project")
parser.add_option("--logPath", dest="LogPath", default=True, help="Path to Unity Log File")
parser.add_option("-e", "--executionMessage", dest="ExecutionMethod", default=True, help="Execution method after unit started completly")
parser.add_option("-t", "--target", dest="Target", help="Build Target of the Build")
parser.add_option("--noTimer", dest="NoTimer", action='store_true', help="no timestamp should be displayed")
(options, args) = parser.parse_args()
return options
def detect_os():
operation_system = platform.system()
LOGGER.info("Detected " + operation_system + " as Operation System")
return operation_system
options = parse_start_arguments()
LOGGER = logger.Logger(options.NoTimer)
os = detect_os()
def start_unity_build_command():
LOGGER.info("Start Unity Build")
try:
build_command = options.UnityPath + " -projectPath " + options.ProjectPath + \
" -logfile " + options.LogPath + \
" -buildTarget " + options.Target + \
" -quit " \
"-batchmode " \
"-nographics " \
"-executeMethod " + options.ExecutionMethod
if os != "Windows":
process = subprocess.Popen(build_command, shell=True, stdout=subprocess.PIPE)
process.wait()
else:
subprocess.call(build_command)
except subprocess.CalledProcessError as e:
sys.exit(e.returncode)
def cleanup_unity_process():
try:
LOGGER.info("Cleaning up Unity process")
if os == "Windows":
subprocess.call(r'TASKKILL /F /IM Unity.exe', stderr=subprocess.PIPE)
except subprocess.CalledProcessError as error:
LOGGER.warn("Couldn't kill unity " + str(error))
def cleanup_old_logfile():
try:
open(options.LogPath, 'w').close()
LOGGER.info("old log cleared")
except FileNotFoundError:
LOGGER.info("No old log file was found")
try:
LOGGER.log("DEBUG", "Starting with arguments: " + str(options))
LOGGER.info("Cleaning old logfile")
cleanup_old_logfile()
LOGGER.info("Read logfile tailing")
logfile = fileLogger.ContinuousFileLogger(options.LogPath, options.NoTimer)
logfile.start()
LOGGER.info("Start unity")
start_unity_build_command()
LOGGER.info("Cleanup Processes")
cleanup_unity_process()
LOGGER.info("Cleanup logger")
logfile.stop()
except Exception as e:
LOGGER.error("Failed to start a thread" + str(e))
| 34.267442 | 141 | 0.657279 | [
"MIT"
] | christian-stockinger/UnityBuilder | UnityBuilder.py | 2,947 | Python |
# -*- coding: utf-8 -*-
"""Access to FAIRsharing via its API.
.. seealso:: https://beta.fairsharing.org/API_doc
"""
from typing import Any, Iterable, Mapping, MutableMapping, Optional
import pystow
import requests
import yaml
from tqdm import tqdm
__all__ = [
"ensure_fairsharing",
"load_fairsharing",
"FairsharingClient",
]
PATH = pystow.join("bio", "fairsharing", name="fairsharing.yaml")
def load_fairsharing(force_download: bool = False, use_tqdm: bool = True, **kwargs):
"""Get the FAIRsharing registry."""
path = ensure_fairsharing(force_download=force_download, use_tqdm=use_tqdm, **kwargs)
with path.open() as file:
return yaml.safe_load(file)
def ensure_fairsharing(force_download: bool = False, use_tqdm: bool = True, **kwargs):
"""Get the FAIRsharing registry."""
if PATH.exists() and not force_download:
return PATH
client = FairsharingClient(**kwargs)
# As of 2021-12-13, there are a bit less than 4k records that take about 3 minutes to download
rv = {
row["prefix"]: row
for row in tqdm(
client.iter_records(),
unit_scale=True,
unit="record",
desc="Downloading FAIRsharing",
disable=not use_tqdm,
)
}
with PATH.open("w") as file:
yaml.safe_dump(rv, file, allow_unicode=True, sort_keys=True)
return PATH
# These fields are the same in each record
REDUNDANT_FIELDS = {
"fairsharing-licence",
}
class FairsharingClient:
"""A client for programmatic access to the FAIRsharing private API."""
def __init__(
self,
login: Optional[str] = None,
password: Optional[str] = None,
base_url: Optional[str] = None,
):
"""Instantiate the client and get an appropriate JWT token.
:param login: FAIRsharing username
:param password: Corresponding FAIRsharing password
:param base_url: The base URL
"""
self.base_url = base_url or "https://api.fairsharing.org"
self.signin_url = f"{self.base_url}/users/sign_in"
self.records_url = f"{self.base_url}/fairsharing_records"
self.username = pystow.get_config(
"fairsharing", "login", passthrough=login, raise_on_missing=True
)
self.password = pystow.get_config(
"fairsharing", "password", passthrough=password, raise_on_missing=True
)
self.jwt = self.get_jwt()
self.session = requests.Session()
self.session.headers.update(
{
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": f"Bearer {self.jwt}",
}
)
def get_jwt(self) -> str:
"""Get the JWT."""
payload = {
"user": {
"login": self.username,
"password": self.password,
},
}
res = requests.post(self.signin_url, json=payload).json()
return res["jwt"]
def iter_records(self) -> Iterable[Mapping[str, Any]]:
"""Iterate over all FAIRsharing records."""
yield from self._iter_records_helper(self.records_url)
def _preprocess_record(
self, record: MutableMapping[str, Any]
) -> Optional[MutableMapping[str, Any]]:
if "type" in record:
del record["type"]
record = {"id": record["id"], **record["attributes"]}
doi = record.get("doi")
if doi is None:
# Records without a DOI can't be resolved
url = record["url"]
if not url.startswith("https://fairsharing.org/fairsharing_records/"):
tqdm.write(f"{record['id']} has no DOI: {record['url']}")
return None
elif doi.startswith("10.25504/"):
record["prefix"] = record.pop("doi")[len("10.25504/") :]
else:
tqdm.write(f"DOI has unexpected prefix: {record['doi']}")
record["description"] = _removeprefix(
record.get("description"), "This FAIRsharing record describes: "
)
record["name"] = _removeprefix(record.get("name"), "FAIRsharing record for: ")
for key in REDUNDANT_FIELDS:
if key in record:
del record[key]
return record
def _iter_records_helper(self, url: str) -> Iterable[Mapping[str, Any]]:
res = self.session.get(url).json()
for record in res["data"]:
yv = self._preprocess_record(record)
if yv:
yield yv
next_url = res["links"].get("next")
if next_url:
yield from self._iter_records_helper(next_url)
def _removeprefix(s: Optional[str], prefix) -> Optional[str]:
if s is None:
return None
if s.startswith(prefix):
return s[len(prefix) :]
return s
if __name__ == "__main__":
ensure_fairsharing(force_download=True)
| 31.414013 | 98 | 0.59854 | [
"MIT"
] | cthoyt/fairsharing-client | src/fairsharing_client/api.py | 4,932 | Python |
#
# Copyright 2022 DMetaSoul
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module computes evaluation metrics for MSMARCO dataset on the ranking task.
Command line:
python msmarco_eval_ranking.py <path_to_reference_file> <path_to_candidate_file>
Creation Date : 06/12/2018
Last Modified : 1/21/2019
Authors : Daniel Campos <[email protected]>, Rutger van Haasteren <[email protected]>
"""
import sys
import json
from collections import Counter
MaxMRRRank = 10
def load_reference_from_stream(f):
qids_to_relevant_passageids = {}
for line in f:
try:
sample = json.loads(line.strip())
qid = sample["question_id"]
if qid in qids_to_relevant_passageids:
pass
else:
qids_to_relevant_passageids[qid] = []
for answer_paragraph in sample["answer_paragraphs"]:
qids_to_relevant_passageids[qid].append(answer_paragraph["paragraph_id"])
except:
raise IOError('\"%s\" is not valid format' % line)
return qids_to_relevant_passageids
def load_reference(path_to_reference):
"""Load Reference reference relevant passages
Args:path_to_reference (str): path to a file to load.
Returns:qids_to_relevant_passageids (dict): dictionary mapping from query_id (int) to relevant passages (list of ints).
"""
with open(path_to_reference, 'r') as f:
qids_to_relevant_passageids = load_reference_from_stream(f)
return qids_to_relevant_passageids
def load_candidate_from_stream(f):
qid_to_ranked_candidate_passages = {}
try:
preds = json.load(f)
for qid in preds.keys():
tmp = [0] * 50
qid_to_ranked_candidate_passages[qid] = tmp
for rank, pid in enumerate(preds[qid][:50]):
qid_to_ranked_candidate_passages[qid][rank] = pid
except:
raise IOError('Submitted file is not valid format')
return qid_to_ranked_candidate_passages
def load_candidate(path_to_candidate):
"""Load candidate data from a file.
Args:path_to_candidate (str): path to file to load.
Returns:qid_to_ranked_candidate_passages (dict): dictionary mapping from query_id (int) to a list of 1000 passage ids(int) ranked by relevance and importance
"""
with open(path_to_candidate, 'r') as f:
qid_to_ranked_candidate_passages = load_candidate_from_stream(f)
return qid_to_ranked_candidate_passages
def quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
"""Perform quality checks on the dictionaries
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
bool,str: Boolean whether allowed, message to be shown in case of a problem
"""
message = ''
allowed = True
# Create sets of the QIDs for the submitted and reference queries
candidate_set = set(qids_to_ranked_candidate_passages.keys())
ref_set = set(qids_to_relevant_passageids.keys())
# Check that we do not have multiple passages per query
for qid in qids_to_ranked_candidate_passages:
# Remove all zeros from the candidates
duplicate_pids = set(
[item for item, count in Counter(qids_to_ranked_candidate_passages[qid]).items() if count > 1])
if len(duplicate_pids - set([0])) > 0:
message = "Cannot rank a passage multiple times for a single query. QID={qid}, PID={pid}".format(
qid=qid, pid=list(duplicate_pids)[0])
allowed = False
return allowed, message
def compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
"""Compute MRR metric
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
dict: dictionary of metrics {'MRR': <MRR Score>}
"""
all_scores = {}
MRR = 0
qids_with_relevant_passages = 0
ranking = []
recall_q_top1 = set()
recall_q_top50 = set()
recall_q_all = set()
for qid in qids_to_ranked_candidate_passages:
if qid in qids_to_relevant_passageids:
ranking.append(0)
target_pid = qids_to_relevant_passageids[qid]
candidate_pid = qids_to_ranked_candidate_passages[qid]
for i in range(0, MaxMRRRank):
if candidate_pid[i] in target_pid:
MRR += 1.0 / (i + 1)
ranking.pop()
ranking.append(i + 1)
break
for i, pid in enumerate(candidate_pid):
if pid in target_pid:
recall_q_all.add(qid)
if i < 50:
recall_q_top50.add(qid)
if i == 0:
recall_q_top1.add(qid)
break
if len(ranking) == 0:
raise IOError("No matching QIDs found. Are you sure you are scoring the evaluation set?")
MRR = MRR / len(qids_to_relevant_passageids)
recall_top1 = len(recall_q_top1) * 1.0 / len(qids_to_relevant_passageids)
recall_top50 = len(recall_q_top50) * 1.0 / len(qids_to_relevant_passageids)
recall_all = len(recall_q_all) * 1.0 / len(qids_to_relevant_passageids)
all_scores['MRR@10'] = MRR
all_scores["recall@1"] = recall_top1
all_scores["recall@50"] = recall_top50
# all_scores["recall@all"] = recall_all
all_scores['QueriesRanked'] = len(qids_to_ranked_candidate_passages)
return all_scores
def compute_metrics_from_files(path_to_reference, path_to_candidate, perform_checks=True):
qids_to_relevant_passageids = load_reference(path_to_reference)
qids_to_ranked_candidate_passages = load_candidate(path_to_candidate)
if perform_checks:
allowed, message = quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages)
if message != '': print(message)
return compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages)
def main():
"""Command line:
python result_eval.py <path_to_reference_file> <path_to_candidate_file>
"""
if len(sys.argv) == 3:
path_to_reference = sys.argv[1]
path_to_candidate = sys.argv[2]
else:
print('Usage: result_eval.py <reference ranking> <candidate ranking>')
exit()
metrics = compute_metrics_from_files(path_to_reference, path_to_candidate)
result = dict()
for metric in sorted(metrics):
result[metric] = metrics[metric]
result_json = json.dumps(result)
print(result_json)
if __name__ == '__main__':
main()
| 37.715736 | 161 | 0.689367 | [
"Apache-2.0"
] | meta-soul/MetaSpore | demo/search/src/eval/evaluation.py | 7,430 | Python |
"""
Copyright [2021] [DenyS]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import typing
__all__: typing.Sequence[str] = ("Info",)
T = typing.TypeVar("T")
class Info(typing.Generic[T]):
"""Annotation for filtering global variables.
Parameters:
-----------
value: :class:`TypeVar`
A parameter that stores the value of a certain variable.
Features:
---------
* `__repr__`: repr(Info())
Development Information.
* `__str__`: str(Info()) | Info()
Will output the value that stores value.
"""
def __init__(self, value: T) -> None:
self.value = value
def __repr__(self) -> str:
return f"Info(value={self.value})"
def __str__(self) -> str:
return str(self.value)
| 24.470588 | 72 | 0.669872 | [
"Apache-2.0"
] | Animatea/DiscordProgressbar | multibar/core/variants/lib_info.py | 1,248 | Python |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = [
# Examples:
# url(r'^$', 'simpleproject.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^simpleapp/', include('simpleapp.urls')),
]
| 26.461538 | 58 | 0.65407 | [
"BSD-2-Clause"
] | Shailendre/simpleproject | simpleproject/simpleproject/urls.py | 344 | Python |
# -*- coding: utf-8 -*-
"""
Demonstrates basic use of LegendItem
"""
import initExample ## Add path to library (just for examples; you do not need this)
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
win = pg.plot()
win.setWindowTitle('pyqtgraph example: BarGraphItem')
# # option1: only for .plot(), following c1,c2 for example-----------------------
# win.addLegend(frame=False, colCount=2)
# bar graph
x = np.arange(10)
y = np.sin(x+2) * 3
bg1 = pg.BarGraphItem(x=x, height=y, width=0.3, brush='b', pen='w', name='bar')
win.addItem(bg1)
# curve
c1 = win.plot([np.random.randint(0,8) for i in range(10)], pen='r', symbol='t', symbolPen='r', symbolBrush='g', name='curve1')
c2 = win.plot([2,1,4,3,1,3,2,4,3,2], pen='g', fillLevel=0, fillBrush=(255,255,255,30), name='curve2')
# scatter plot
s1 = pg.ScatterPlotItem(size=10, pen=pg.mkPen(None), brush=pg.mkBrush(255, 255, 255, 120), name='scatter')
spots = [{'pos': [i, np.random.randint(-3, 3)], 'data': 1} for i in range(10)]
s1.addPoints(spots)
win.addItem(s1)
# # option2: generic method------------------------------------------------
legend = pg.LegendItem((80,60), offset=(70,20))
legend.setParentItem(win.graphicsItem())
legend.addItem(bg1, 'bar')
legend.addItem(c1, 'curve1')
legend.addItem(c2, 'curve2')
legend.addItem(s1, 'scatter')
if __name__ == '__main__':
pg.exec()
| 31.318182 | 126 | 0.646589 | [
"MIT"
] | 3DAlgoLab/pyqtgraph | examples/Legend.py | 1,378 | Python |
class TimedData(object):
"""
Struttura dati per eventi accompagnati da un informazione temporale discreta (timestamp o intervallo)
"""
def __init__(self, data, time, timestamp=True):
"""
I parametri di input sono
- "data": il dato che si vuole memorizzare (di qualsiasi natura)
- "time": l'informazione temporale associata al dato (numero intero)
- "timestamp": flag booleana. Se vero, il campo "time" e' un timestamp; altrimenti,
e' un intervallo di tempo
"""
# Controllo dell'input: parametro "time"
try:
time = int(time)
except:
raise TypeError('"time" parameter is invalid. It must be an integer number')
# Creo la struttura dati
self.data = data
self.time = time
self.timestamp = True if timestamp else False
def __eq__(self, other):
c1 = self.data == other.data
c2 = self.time == other.time
c3 = self.timestamp == other.timestamp
return c1 and c2 and c3
def __str__(self):
return '(data=%s, time=%s, timestamp=%s)' % (self.data, self.time, self.timestamp)
def get_data(self):
"""
Ritorna il campo "data"
"""
return self.data
def get_time(self):
"""
Ritorna il campo "time"
"""
return self.time
class TimedArray(object):
"""
Array di oggetti TimedData
"""
def __init__(self, timestamp=True, empty=True):
"""
La flag "timestamp" serve per specificare se la lista contiene dati con un timestamp (True) oppure un
intervallo temporale (False) associato: la flag "empty" permette invece di creare, se settata a False,
un TimedArray contenente al suo interno un nodo di partenza (d = 0, t = 0)
"""
self._list = []
self.timestamp = (timestamp is True)
if not empty:
# Creo il nodo di partenza
self.append(TimedData(0, 0, self.timestamp))
def __str__(self):
x = ''
first = True
for i in self._list:
if first:
x += str(i)
first = False
else:
x += ', ' + str(i)
return '(timestamp=%s, [%s]' % (self.timestamp, x)
def get_list(self):
"""
Ritorna l'elenco di oggetti "TimedData", memorizzati come lista
"""
return self._list
def get_data_list(self):
"""
Ritorna gli attributi "data" di ogni elemento del vettore, sottoforma di lista
"""
return map(lambda x: x.get_data(), self._list)
def get_time_list(self):
"""
Ritorna gli attributi "time" di ogni elemento del vettore, sottoforma di lista
"""
return map(lambda x: x.get_time(), self._list)
def has_time_intervals(self):
"""
Ritorna True se gli elementi del vettore hanno associato un intervallo temporale
"""
return self.timestamp is False
def append(self, item):
"""
Aggiungo un elemento alla lista
"""
# Controllo dei parametri di input: "item"
if not isinstance(item, TimedData):
raise TypeError('cannot add a non-"TimedData" object to a "TimedArray" list')
if item.timestamp != self.timestamp:
raise ValueError(
'"item" parameter is invalid: its "timestamp" attribute must be equal to %s' % self.timestamp)
# Accodo l'elemento alla lista
self._list.append(item)
def remove(self, item):
"""
Questa funzione rimuove "item" (se presente) dall'array
"""
# Controllo dei parametri di input: "item"
if not isinstance(item, TimedData):
raise TypeError('the item to remove must be a "TimedData" object')
# Elimino l'oggetto, se presente
if item in self._list:
self._list.remove(item)
def remove_all(self, items):
"""
Questa funzione permette di rimuovere un elenco di oggetti "TimedData"
"""
# Controllo dei parametri di input: "items"
if not isinstance(items, (list, tuple)):
raise TypeError('"items" parameter must be an array')
# Elimino un oggetto per volta
try:
for x in items:
self.remove(x)
except TypeError:
raise TypeError('the items list must contain only "TimedData" objects')
def filter(self, f):
"""
Questa funzione applica la funzione f per filtrare il contenuto del vettore
"""
res = TimedArray(self.timestamp, empty=True)
res._list = filter(
f,
self._list
)
return res
def filter_data_range(self, start, end):
"""
La funzione filtra il vettore per range di valori "Data"
"""
return self.filter(
lambda x: start <= x.get_data() <= end
)
def filter_time_range(self, start, end):
"""
La funzione filtra il vettore per range di valori "Data"
"""
return self.filter(
lambda x: start <= x.get_time() <= end
)
def search(self, to_search):
"""
Funzione di ricerca all'interno del contenuto del vettore.
Se "timestamp" e' True, la chiave per la ricerca e' il timestamp: altrimenti,
la chiave diventa il contenuto a cui e' associato l'intervallo temporale.
"""
if self.timestamp:
# La chiave di ricerca e' "time", un numero intero
res = self.search_by_time(to_search)
else:
# La chiave di ricerca e' "data", un dato di qualsiasi tipo
res = self.search_by_data(to_search)
# Risultati di ricerca
return res
def search_by_data(self, to_search):
"""
Funzione di ricerca per campo "data", all'interno del vettore
"""
research = (lambda x: x.data == to_search)
return filter(research, self._list)
def search_by_datas(self, search_params):
"""
Funzione di ricerca per campo "data", all'interno del vettore: il parametro di ricerca e' un vettore
"""
# Controllo dei parametri di input: "searchParams"
if not isinstance(search_params, (list, tuple)):
raise TypeError('"searchParams" parameter is invalid. It must be an array')
# Effettuo tante ricerche quanti sono i parametri specificati
result = []
for x in search_params:
# Ricerca per data, parametro "x"
tmp = self.search_by_data(x)
# Accodo quanto ottenuto al risultato di ricerca globale
for t in tmp:
result.append(t)
# Risultati della ricerca multipla
return result
def search_by_time(self, to_search):
"""
Funzione di ricerca per campo "time", all'interno del vettore
Il parametro "toSearch" deve essere un numero intero
"""
if not isinstance(to_search, (int, long)):
raise TypeError('the research parameter must be an integer number (timestamp)')
research = (lambda x: x.time == to_search)
return filter(research, self._list)
def search_by_times(self, search_params):
"""
Funzione di ricerca per campo "time", all'interno del vettore: il parametro di ricerca e' un vettore
"""
# Controllo dei parametri di input: "searchParams"
if not isinstance(search_params, (list, tuple)):
raise TypeError('"searchParams" parameter is invalid. It must be an array')
# Effettuo tante ricerche quanti sono i parametri specificati
result = []
for x in search_params:
# Ricerca per data, parametro "x"
tmp = self.search_by_time(x)
# Accodo quanto ottenuto al risultato di ricerca globale
for t in tmp:
result.append(t)
# Risultati della ricerca multipla
return result
def contains(self, to_search):
"""
La funzione mi dice se la ricerca nel vettore, sulla base della chiave di ricerca
"toSearch" specificata, produce risultati
"""
return len(self.search(to_search)) > 0
def update(self, to_search, new_value):
"""
Questa funzione aggiorna il contenuto degli elementi del vettore che
soddisfano il criterio di ricerca specificato
- "toSearch" e' la chiave di ricerca
- "newValue" e' il valore aggiornato da inserire
"""
# Effettuo una ricerca
items = self.search(to_search)
# Definisco il criterio di aggiornamento
if self.timestamp:
# La chiave di ricerca e' "time": aggiorno "data"
# update_function = (lambda x: x.data = newValue)
def update_function(x):
x.data = new_value
else:
# La chiave di ricerca e' "data": aggiorno "time"
# update_function = (lambda x: x.time = newValue)
def update_function(x):
x.time = new_value
# Aggiorno gli elementi
map(update_function, items)
def insert_or_update(self, time_to_search, data_value):
if self.contains(time_to_search):
self.update(time_to_search, data_value)
else:
self.append(
TimedData(data_value, time_to_search, self.timestamp)
)
| 35.541045 | 111 | 0.586247 | [
"MIT"
] | gavalle94/P2P-Sim | timed_structures.py | 9,525 | Python |
"""
This problem was asked by Amazon.
Given a matrix of 1s and 0s, return the number of "islands" in the matrix.
A 1 represents land and 0 represents water, so an island is a group of 1s
that are neighboring whose perimeter is surrounded by water.
For example, this matrix has 4 islands.
1 0 0 0 0
0 0 1 1 0
0 1 1 0 0
0 0 0 0 0
1 1 0 0 1
1 1 0 0 1
"""
moves = [
# row, col
(0, 1), # west
(0, -1), # east
(1, 0), # south
(-1, 0), # north
(1,1), # south-west
(1, -1), # south-east
(-1, 1), # north-west
(-1, -1) # north-east
]
def mark_island(row, col, land_map, marker):
if row < 0 or col<0 or row>=len(land_map) or col >= len(land_map[0]):
return land_map
if land_map[row][col]== 0:
return land_map
if land_map[row][col]== marker:
return land_map
if land_map[row][col] == 1:
land_map[row][col] = marker
for r,c in moves:
land_map = mark_island(row+r, col+c, land_map, marker)
return land_map
def find_num_of_islands(land_map):
islands_found = 0
for i in range(len(land_map)):
for j in range(len(land_map[0])):
if land_map[i][j] == 1:
islands_found+= 1
land_map = mark_island(i, j, land_map, marker='i')
# print(*land_map, sep='\n')
return islands_found
if __name__ == '__main__':
land_map = [
[1, 0, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
]
print(find_num_of_islands(land_map)) # 4
land_map = [
[1, 0, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[1, 0, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
]
print(find_num_of_islands(land_map)) # 7 | 21.107527 | 74 | 0.496689 | [
"MIT"
] | RafayAK/CodingPrep | DailyCodingProblem/84_Amazon_Find_Islands_From_Matrix.py | 1,963 | Python |
# The following comments couldn't be translated into the new config version:
# untracked PSet maxEvents = {untracked int32 input = 2}
#include "Configuration/ReleaseValidation/data/Services.cff"
# include "Configuration/StandardSequences/data/FakeConditions.cff"
# untracked PSet options = {
# include "FWCore/Framework/test/cmsExceptionsFatalOption.cff"
# untracked bool makeTriggerResults = true
# }
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
#
# ecal trig prim producer
# # ecal tpg params
# es_module = EcalTrigPrimESProducer {
# untracked string DatabaseFile = "TPG.txt"
# #untracked string DatabaseFile = "TPG_RCT_internal.txt"
# }
#
process.load("FWCore.MessageService.MessageLogger_cfi")
# standard RCT configuration, including input scales
process.load("L1TriggerConfig.RCTConfigProducers.L1RCTConfig_cff")
# using standard scales
process.load("L1TriggerConfig.L1ScalesProducers.L1CaloScalesConfig_cff")
#include "L1TriggerConfig/L1ScalesProducers/data/L1CaloInputScalesConfig.cff"
process.load("L1Trigger.RegionalCaloTrigger.L1RCTTestAnalyzer_cfi")
process.load("L1Trigger.RegionalCaloTrigger.rctDigis_cfi")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(64)
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string('rct.root')
)
process.source = cms.Source("EmptySource")
process.rctInput = cms.EDProducer("RctInputTextToDigi",
inputFile = cms.FileInPath('L1Trigger/TextToDigi/test/data/rctTestInputFileElec.txt')
)
process.input = cms.Path(process.rctInput)
process.p4 = cms.Path(process.rctDigis*process.L1RCTTestAnalyzer)
process.schedule = cms.Schedule(process.input,process.p4)
process.L1RCTTestAnalyzer.ecalDigisLabel = 'rctInput'
process.L1RCTTestAnalyzer.hcalDigisLabel = 'rctInput'
process.rctDigis.ecalDigisLabel = 'rctInput'
process.rctDigis.hcalDigisLabel = 'rctInput'
| 32.965517 | 89 | 0.789749 | [
"Apache-2.0"
] | 4quarks/cmssw | L1Trigger/RegionalCaloTrigger/test/rctInputTest_cfg.py | 1,912 | Python |
from twisted.internet import defer
from signing.processor import expose
class SayHiImplementation(object):
"""
Responds with 'hello, %s' % arg
"""
@expose
def say_hi(self, identifier):
d = defer.Deferred()
d.callback('hello, %s' % identifier)
return d
| 22.846154 | 44 | 0.632997 | [
"Apache-2.0"
] | nkrowlan/signing-server | signing/processorimpl/sayhiimplementation.py | 297 | Python |
from django.urls import path
from . import views
from django.urls import path, include
urlpatterns = [
path('',views.savedata,name="savedata"),
]
| 18.875 | 44 | 0.728477 | [
"MIT"
] | MindMantraSIH/paathshaala | suggestions/urls.py | 151 | Python |
# -*- coding: utf-8 -*-
from collections import namedtuple
from subprocess import check_output
import click
from .utils import cd
try:
from subprocess import call as run
except ImportError:
from subprocess import run
class VueJs(object):
"""
Provide subprocess call to `npm` and `vue-cli`
"""
@staticmethod
def node_check():
"""
Node and npm version checker
"""
node_ver = check_output('node -v'.split()).decode('utf-8').rsplit('.')[0]
npm_ver = check_output('npm -v'.split()).decode('utf-8').rsplit('.')[0]
return all([node_ver > 'v5', npm_ver >= '4'])
@staticmethod
def vue_cli_check():
"""
vue-cli version checker
"""
try:
return check_output('vue -V'.split()).decode('utf-8').rsplit('.')[0]
except OSError:
return False
@staticmethod
def install_cli():
run('npm install -g vue-cli'.split())
@staticmethod
def project_setup(project):
run('vue init webpack {project}'.format(project=project).split())
@staticmethod
def install_dependencies(project):
with cd(project):
run('npm install'.split())
@staticmethod
def dev():
run('npm run dev'.split())
@staticmethod
def build():
run('npm run build'.split())
class VueJsBuilder(object):
@staticmethod
def startproject(project):
nt = namedtuple('Result', ['status', 'message', 'color'])
if VueJs.vue_cli_check():
VueJs.project_setup(project)
VueJs.install_dependencies(project)
return nt(True, 'Application and dependencies installed\n', 'green')
else:
return nt(False, 'Please install vue-cli via `vuecli` command', 'red')
@click.group()
def cli():
"""
Click entry point: vue-cli commands group
By convention all new cli has a cli function with a pass statement
"""
pass
@cli.command()
def vuecheck():
"""
Check if node > 5 and npm > 3 are installed
"""
if VueJs.node_check():
click.echo(click.style('Found node and npm', fg='green'))
else:
click.echo(click.style('Missing node and npm installation', fg='red'))
@cli.command()
def installvuecli():
"""
Install vue-cli
"""
if VueJs.vue_cli_check():
click.echo(click.style('Found valid vue-cli', fg='green'))
else:
VueJs.install_cli()
click.echo(click.style('Installed vue-cli globally', fg='green'))
@cli.command()
@click.argument('project')
def startvueapp(project):
"""
Init vue project via vue-cli
"""
result = VueJsBuilder.startproject(project)
click.echo(click.style(result.message, fg=result.color))
@cli.command()
def vuedev():
"""
Run frontend dev server via npm
"""
VueJs.dev()
@cli.command()
def vuebuild():
"""
Build Vue.js project via npm
"""
VueJs.build()
| 22.792308 | 82 | 0.600405 | [
"MIT"
] | Timtech4u/python-vuejs | python_vuejs/vuejs.py | 2,963 | Python |
import numpy as np
from PIL import Image, ImageDraw
from scipy import interpolate, ndimage, stats, signal, integrate, misc
from astropy.io import ascii, fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
import astropy.units as u
import astropy.constants as c
import corner as triangle # formerly dfm/triangle
# from astropy.modeling import models, fitting
from astropy.modeling.models import custom_model
from astropy.modeling.fitting import LevMarLSQFitter # , SimplexLSQFitter
import matplotlib.pyplot as plt
import matplotlib as mpl
import emcee
#import ipdb;
import pdb
# # # # # # # # # # # # # # # # # # # # # #
# make iPython print immediately
import sys
oldsysstdout = sys.stdout
class flushfile():
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return object.__getattribute__(self.f, name)
def write(self, x):
self.f.write(x)
self.f.flush()
def flush(self):
self.f.flush()
# sys.stdout = flushfile(sys.stdout)
# sys.stdout = oldsysstdout
def rot_matrix(theta):
'''
rot_matrix(theta)
2D rotation matrix for theta in radians
returns numpy matrix
'''
c, s = np.cos(theta), np.sin(theta)
return np.matrix([[c, -s], [s, c]])
def rectangle(c, w, h, angle=0, center=True):
'''
create rotated rectangle
for input into PIL ImageDraw.polygon
to make a rectangle polygon mask
Rectagle is created and rotated with center
at zero, and then translated to center position
accepters centers
Default : center
tl, tr, bl, br
'''
cx, cy = c
# define initial polygon irrespective of center
x = -w / 2., +w / 2., +w / 2., -w / 2.
y = +h / 2., +h / 2., -h / 2., -h / 2.
# correct center if starting from corner
if center is not True:
if center[0] == 'b':
# y = tuple([i + h/2. for i in y])
cy = cy + h / 2.
else:
# y = tuple([i - h/2. for i in y])
cy = cy - h / 2.
if center[1] == 'l':
# x = tuple([i + w/2 for i in x])
cx = cx + w / 2.
else:
# x = tuple([i - w/2 for i in x])
cx = cx - w / 2.
R = rot_matrix(angle * np.pi / 180.)
c = []
for i in range(4):
xr, yr = np.dot(R, np.asarray([x[i], y[i]])).A.ravel()
# coord switch to match ordering of FITs dimensions
c.append((cx + xr, cy + yr))
# print (cx,cy)
return c
def comp(arr):
'''
returns the compressed version
of the input array if it is a
numpy MaskedArray
'''
try:
return arr.compressed()
except:
return arr
def mavg(arr, n=2, mode='valid'):
'''
returns the moving average of an array.
returned array is shorter by (n-1)
'''
if len(arr) > 400:
return signal.fftconvolve(arr, [1. / float(n)] * n, mode=mode)
else:
return signal.convolve(arr, [1. / float(n)] * n, mode=mode)
def mgeo(arr, n=2):
'''
Returns array of lenth len(arr) - (n-1)
# # written by me
# # slower for short loops
# # faster for n ~ len(arr) and large arr
a = []
for i in xrange(len(arr)-(n-1)):
a.append(stats.gmean(arr[i:n+i]))
# # Original method# #
# # written by me ... ~10x faster for short arrays
b = np.array([np.roll(np.pad(arr,(0,n),mode='constant',constant_values=1),i)
for i in xrange(n)])
return np.product(b,axis=0)[n-1:-n]**(1./float(n))
'''
a = []
for i in range(len(arr) - (n - 1)):
a.append(stats.gmean(arr[i:n + i]))
return np.asarray(a)
def avg(arr, n=2):
'''
NOT a general averaging function
return bin centers (lin and log)
'''
diff = np.diff(arr)
# 2nd derivative of linear bin is 0
if np.allclose(diff, diff[::-1]):
return mavg(arr, n=n)
else:
return np.power(10., mavg(np.log10(arr), n=n))
# return mgeo(arr, n=n) # equivalent methods, only easier
def shift_bins(arr,phase=0,nonneg=False):
# assume original bins are nonneg
if phase != 0:
diff = np.diff(arr)
if np.allclose(diff,diff[::-1]):
diff = diff[0]
arr = arr + phase*diff
#pre = arr[0] + phase*diff
return arr
else:
arr = np.log10(arr)
diff = np.diff(arr)[0]
arr = arr + phase * diff
return np.power(10.,arr)
else:
return arr
def llspace(xmin, xmax, n=None, log=False, dx=None, dex=None):
'''
llspace(xmin, xmax, n = None, log = False, dx = None, dex = None)
get values evenly spaced in linear or log spaced
n [10] -- Optional -- number of steps
log [false] : switch for log spacing
dx : spacing for linear bins
dex : spacing for log bins (in base 10)
dx and dex override n
'''
xmin, xmax = float(xmin), float(xmax)
nisNone = n is None
dxisNone = dx is None
dexisNone = dex is None
if nisNone & dxisNone & dexisNone:
print('Error: Defaulting to 10 linears steps')
n = 10.
nisNone = False
# either user specifies log or gives dex and not dx
log = log or (dxisNone and (not dexisNone))
if log:
if xmin == 0:
print("log(0) is -inf. xmin must be > 0 for log spacing")
xmin, xmax = np.log10(xmin), np.log10(xmax)
# print nisNone, dxisNone, dexisNone, log # for debugging logic
if not nisNone: # this will make dex or dx if they are not specified
if log and dexisNone: # if want log but dex not given
dex = (xmax - xmin) / n
# print dex
elif (not log) and dxisNone: # else if want lin but dx not given
dx = (xmax - xmin) / n # takes floor
#print dx
if log:
#return np.power(10, np.linspace(xmin, xmax , (xmax - xmin)/dex + 1))
return np.power(10, np.arange(xmin, xmax + dex, dex))
else:
#return np.linspace(xmin, xmax, (xmax-xmin)/dx + 1)
return np.arange(xmin, xmax + dx, dx)
def nametoradec(name):
'''
Get names formatted as
hhmmss.ss+ddmmss to Decimal Degree
only works for dec > 0 (splits on +, not -)
Will fix this eventually...
'''
if 'string' not in str(type(name)):
rightascen = []
declinatio = []
for n in name:
ra, de = n.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs',
unit=('hourangle', 'degree'))
rightascen.append(coord.ra.value)
declinatio.append(coord.dec.value)
return np.array(rightascen), np.array(declinatio)
else:
ra, de = name.split('+')
ra = ra[0:2] + ':' + ra[2:4] + ':' + ra[4:6] + '.' + ra[6:8]
de = de[0:2] + ':' + de[2:4] + ':' + de[4:6]
coord = SkyCoord(ra, de, frame='icrs', unit=('hourangle', 'degree'))
return np.array(coord.ra.value), np.array(coord.dec.value)
def get_ext(extmap, errmap, extwcs, ra, de):
'''
Get the extinction (errors) for a particular position or
list of positions
More generally get the value (error) for a particular
position given a wcs and world coordinates
'''
try:
xp, yp = extwcs.all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
except:
xp, yp = WCS(extwcs).all_world2pix(
np.array([ra]).flatten(), np.array([de]).flatten(), 0)
ext = []
err = []
for i in range(len(np.array(xp))):
try:
ext.append(extmap[yp[int(round(i))], xp[int(round(i))]])
if errmap is not None:
err.append(errmap[yp[int(round(i))], xp[int(round(i))]])
except IndexError:
ext.append(np.nan)
if errmap is not None:
err.append(np.nan)
if errmap is not None:
return np.array(ext), np.array(err)
else:
return np.array(ext), None
def pdf(values, bins):
'''
** Normalized differential area function. **
(statistical) probability denisty function
normalized so that the integral is 1
and. The integral over a range is the
probability of the value is within
that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, x = np.histogram(values, bins=bins, range=range, density=False)
# From the definition of Pr(x) = dF(x)/dx this
# is the correct form. It returns the correct
# probabilities when tested
pdf = h / (np.sum(h, dtype=float) * np.diff(x))
return pdf, avg(x)
def pdf2(values, bins):
'''
The ~ PDF normalized so that
the integral is equal to the
total amount of a quantity.
The integral over a range is the
total amount within that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
pdf, x = np.histogram(values, bins=bins, range=range, density=False)
pdf = pdf.astype(float) / np.diff(x)
return pdf, avg(x)
def edf(data, pdf=False):
y = np.arange(len(data), dtype=float)
x = np.sort(data).astype(float)
return y, x
def cdf(values, bins):
'''
(statistical) cumulative distribution function
Integral on [-inf, b] is the fraction below b.
CDF is invariant to binning.
This assumes you are using the entire range in the binning.
Returns array of size len(bins)
Plot versus bins[:-1]
'''
if hasattr(bins,'__getitem__'):
range = (np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False) # returns int
c = np.cumsum(h / np.sum(h, dtype=float)) # cumulative fraction below bin_k
# append 0 to beginning because P( X < min(x)) = 0
return np.append(0, c), bins
def cdf2(values, bins):
'''
# # Exclusively for area_function which needs to be unnormalized
(statistical) cumulative distribution function
Value at b is total amount below b.
CDF is invariante to binning
Plot versus bins[:-1]
Not normalized to 1
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False)
c = np.cumsum(h).astype(float)
return np.append(0., c), bins
def area_function(extmap, bins):
'''
Complimentary CDF for cdf2 (not normalized to 1)
Value at b is total amount above b.
'''
c, bins = cdf2(extmap, bins)
return c.max() - c, bins
def diff_area_function(extmap, bins,scale=1):
'''
See pdf2
'''
s, bins = area_function(extmap, bins)
dsdx = -np.diff(s) / np.diff(bins)
return dsdx*scale, avg(bins)
def log_diff_area_function(extmap, bins):
'''
See pdf2
'''
s, bins = diff_area_function(extmap, bins)
g=s>0
dlnsdlnx = np.diff(np.log(s[g])) / np.diff(np.log(bins[g]))
return dlnsdlnx, avg(bins[g])
def mass_function(values, bins, scale=1, aktomassd=183):
'''
M(>Ak), mass weighted complimentary cdf
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False, weights=values*aktomassd*scale)
c = np.cumsum(h).astype(float)
return c.max() - c, bins
def hist(values, bins, err=False, density=False, **kwargs):
'''
really just a wrapper for numpy.histogram
'''
if hasattr(bins,'__getitem__'):
range=(np.nanmin(bins),np.nanmax(bins))
else:
range = None
hist, x = np.histogram(values, bins=bins, range=range, density=density, **kwargs)
if (err is None) or (err is False):
return hist.astype(np.float), avg(x)
else:
return hist.astype(np.float), avg(x), np.sqrt(hist)
def bootstrap(X, X_err=None, n=None, smooth=False):
'''
(smooth) bootstrap
bootstrap(X,Xerr,n,smooth=True)
X : array to be resampled
X_err [optional]: errors to perturb data for smooth bootstrap
only provide is doing smooth bootstrapping
n : number of samples. Default - len(X)
smooth: optionally use smooth bootstrapping.
will be set to False if no X_err is provided
'''
if X_err is None:
smooth = False
if n is None: # default n
n = len(X)
resample_i = np.random.randint(0,len(X),size=(n,))
X_resample = np.asarray(X)[resample_i]
if smooth:
X_resample = np.random.normal(X_resample, \
np.asarray(X_err)[resample_i])
return X_resample
def num_above(values, level):
return np.sum((values >= level) & np.isfinite(values), dtype=np.float)
def num_below(values, level):
return np.sum((values < level) & np.isfinite(values), dtype=np.float)
def alpha_ML(data, xmin,xmax):
'''
uses maximum likelihood to estimation
to determine power-law and error
From Clauset et al. 2010
'''
data = data[np.isfinite(data)]
data = data[(data >= xmin) & (data <= xmax)]
alpha = 1 + len(data) * (np.sum(np.log(data / xmin))**(-1))
error = (alpha -1 )/np.sqrt(len(data))
#loglike = np.sum((-1+alpha)*np.log(xmin)-alpha*np.log(data)+np.log(-1+alpha))
N = len(data)
loglike = N*np.log(alpha-1) - N*np.log(xmin) - alpha * np.sum(np.log(data/xmin))
return alpha , error, loglike, xmin, xmax
def sigconf1d(n):
cdf = (1/2.)*(1+special.erf(n/np.sqrt(2)))
return (1-cdf)*100,100* cdf,100*special.erf(n/np.sqrt(2))
def surfd(X, Xmap, bins, Xerr = None, Xmaperr = None, boot=False, scale=1., return_err=False, smooth=False):
'''
call: surfd(X, map, bins,
xerr = None, merr = None, scale = 1.)
calculates H(X)/H(M) = Nx pdf(x) dx / Nm pdf(m) dm ; dm = dx
so it is independent of whether dx or dlog(x)
'''
# get dn/dx
if boot:
n = np.histogram(bootstrap(X,Xerr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(bootstrap(Xmap,Xmaperr,smooth=True), bins = bins, range=(bins.min(),bins.max()))[0] * scale
else:
n = np.histogram(X, bins = bins, range=(bins.min(),bins.max()))[0]
s = np.histogram(Xmap, bins = bins, range=(bins.min(),bins.max()))[0] * scale
if not return_err:
return n / s
else:
return n / s, n / s * np.sqrt(1. / n - scale / s)
def alpha(y, x, err=None, return_kappa=False, cov=False):
'''
this returns -1*alpha, and optionally kappa and errors
'''
a1 = set(np.nonzero(np.multiply(x, y))[0])
a2 = set(np.where(np.isfinite(np.add(x, y, err)))[0])
a = np.asarray(list(a1 & a2))
y = np.log(y[a])
x = np.log(x[a])
if err is None:
p, covar = np.polyfit(x, y, 1, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
else:
err = err[a]
err = err / y
p, covar = np.polyfit(x, y, 1, w=1. / err**2, cov=True)
m, b = p
me, be = np.sqrt(np.sum(covar * [[1, 0], [0, 1]], axis=1))
me, be
if return_kappa:
if cov:
return m, np.exp(b), me, be
else:
return m, np.exp(b)
else:
if cov:
return m, me
else:
return m
def Heaviside(x):
return 0.5 * (np.sign(x) + 1.)
def schmidt_law(Ak, theta):
'''
schmidt_law(Ak,(beta,kappa))
beta is the power law index (same as alpha)
'''
if len(theta) == 2:
beta, kappa = theta
return kappa * (Ak ** beta)
elif len(theta) == 3:
beta, kappa, Ak0 = theta
sfr = Heaviside(Ak - Ak0) * kappa * (Ak ** beta)
sfr[Ak < Ak0] = 0#np.nan # kappa * (Ak0 ** beta)
return sfr
def lmfit_powerlaw(x, y, yerr=None, xmin=-np.inf, xmax=np.inf, init=None, maxiter=1000000):
@custom_model
def model(x, beta=init[0], kappa=init[1]):
return np.log(kappa * (np.exp(x) ** beta))
keep = np.isfinite(1. / y) & (x >= xmin) & (x <= xmax)
if yerr is not None:
keep = keep & np.isfinite(1. / yerr)
m_init = model()
fit = LevMarLSQFitter()
#weights = (yerr / y)[keep]**(-2.)
m = fit(m_init, np.log(x[keep]), np.log(y[keep]), maxiter=maxiter)
return m, fit
def fit_lmfit_schmidt(x, y, yerr, init=None):
m, _ = lmfit_powerlaw(x,y,yerr,init=init)
return m.parameters
def emcee_schmidt(x, y, yerr, pos=None, pose=None,
nwalkers=None, nsteps=None, burnin=200,verbose=True):
'''
emcee_schmidt provides a convenient wrapper for fitting the schimdt law
to binned x,log(y) data. Generally, it fits a normalization and a slope
'''
def model(x, theta):
'''
theta = (beta, kappa)
'''
return np.log(schmidt_law(x, theta))
def lnlike(theta, x, y, yerr):
mod = model(x, theta)
inv_sigma2 = 1 / yerr**2
# Poisson statistics -- not using this
#mu = (yerr)**2 # often called lambda = poisson variance for bin x_i
#resid = np.abs(y - mod) # where w calculate the poisson probability
#return np.sum(resid * np.log(mu) - mu) - np.sum(np.log(misc.factorial(resid)))
#######################################################
########## CHI^2 log-likelihood #######################
return -0.5 * (np.sum((y - mod)**2 * inv_sigma2))# - 0.5 * 3 * np.log(np.sum(k))
def lnprior(theta):
# different priors for different version of
# the schmidt law
if len(theta) == 3:
beta, kappa, Ak0 = theta
c3 = 0. < Ak0 <= 5.
c4 = True
else:
beta, kappa = theta
c3 = True
c4 = True
c1 = 0 <= beta <= 6# Never run's into this region
c2 = 0 <= kappa # Never run's into this region
if c1 and c2 and c3 and c4:
return 0.0
return -np.inf
def lnprob(theta, x, y, yerr):
## update likelihood
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, yerr)
ndim, nwalkers = len(pos), nwalkers
pos = [np.array(pos) + np.array(pose) * 0.5 *
(0.5 - np.random.rand(ndim)) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(
nwalkers, ndim, lnprob, args=(x, y, yerr))
sampler.run_mcmc(pos, nsteps)
# Get input values
# x, y, yerr = sampler.args
samples = sampler.chain[:, burnin:, :].reshape((-1, sampler.ndim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T
if verbose: print(sampler.acor)
if verbose:
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
return sampler, np.median(samples, axis=0), np.std(samples, axis=0)
def fit(bins, samp, samperr, maps, mapserr, scale=1., sampler=None, log=False,
pos=None, pose=None, nwalkers=100, nsteps=1e4, boot=1000, burnin=200,
threshold=False, threshold2=False,verbose=True):
'''
# # # A Schmidt Law fitting Function using EMCEE by D.F.M.
fit(bins, samp, samperr, maps, mapserr, scale=1.,
pos=None, pose=None, nwalkers=100, nsteps=1e4)
bins: bin edges for binning data (I know it's bad to bin)
samp : values for your sample
samperr : errors on values for you sample
maps: map of values from which you drew your sample
mapserr: error on maps...
pos : initial location of ball of walkers
pose : initial spread of walkers
'''
#print 'Hi!. It\'s hammer time...'
# x values are bin midpoints
x = avg(bins) # assume if log=True, then bins are already log
# x = bins[:-1]
# y = np.asarray([surfd(samp,maps,bins,boot=True,scale=scale) for i in xrange(boot)])
# yerr = np.nanstd(y,axis=0)
#if log:
# samp = np.log10(samp)
# maps = np.log10(maps)
# bins = np.log10(bins) # because bins doesn't get used again after surfd
y, yerr = surfd(samp, maps, bins, scale=scale, return_err=True)
###########################################+
###### ADDED FOR SHIFTING EXPERIMENT ######+
###########################################+
bins2 = shift_bins(bins,0.5)
bin
x2 = avg(bins2)
y2, yerr2 = surfd(samp, maps, bins2, scale=scale, return_err=True)
concatx = np.concatenate((x,x2))
concaty = np.concatenate((y,y2))
concatyerr = np.concatenate((yerr,yerr2))
srt = np.argsort(concatx)
x = concatx[srt]
y = concaty[srt]
yerr = concatyerr[srt]
nonzero = np.isfinite(1. / y) & np.isfinite(yerr) & np.isfinite(1./yerr)
y = y[nonzero]
yerr = yerr[nonzero]
x = x[nonzero]
# initialize walker positions and walker bundle size
init = alpha(y, x, return_kappa=True, cov=True)
if pos is None:
pos = init[:2]
if pose is None:
if np.isnan(init[2] + init[3]):
pose = (1, 1)
else:
pose = (init[2], init[3])
if threshold | threshold2:
pos = pos + (0.4,)
pose = pose + (0.2,)
if threshold2:
pos = pos + (8.,)
pose = pose + (.5,)
#print pos
#print pose
pos = np.asarray(pos)
pose = .1*pos#np.asarray(pose)
# This function only fits sources, it doesn't plot, so don't pass
# and emcee sampler type. it will spit it back out
# # # # # # # RUN EMCEE # # # # # # #
# pdb.set_trace()
if sampler is None:
if verbose: print('Sampler autocorrelation times . . .')
sampler, theta, theta_std = emcee_schmidt(x, np.log(y), yerr/y,
pos=pos, pose=pose,
nwalkers=nwalkers,
nsteps=nsteps, burnin=burnin,verbose=verbose)
else:
print('Next time don\'t give me a ' + str(type(sampler)) + '.')
#
try:
return sampler, x, y, yerr, theta, theta_std
except:
return sampler, x, y, yerr
def schmidt_results_plots(sampler, model, x, y, yerr, burnin=200, akmap=None,
bins=None, scale=None, triangle_plot=True):
'''
model: should pass schmidt_law()
'''
try:
mpl.style.use('john')
except:
None
# Get input values
# x, y, yerr = sampler.args
if hasattr(sampler,'__getitem__'):
chain = sampler
dim = chain.shape[-1]
else:
chain = sampler.chain
dim = sampler.dim
samples = chain[:, burnin:, :].reshape((-1, dim))
# # Print out final values # #
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T # Get percentiles for each parameter
n_params = len(theta_mcmc[:,1])
#print n_params
for i, item in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}','A_{K,f}']
inserts = (j[i], item[1], item[2] - item[1], item[1] - item[0])
print('%s = %0.2f (+%0.2f,-%0.2f)' % inserts)
# Plot corner plot
if triangle_plot:
if n_params == 3:
labels = ['beta', 'kappa', 'A_{K,0}']
elif n_params == 4:
labels = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
else:
labels = ['beta', 'kappa']
#print labels
_ = triangle.corner(samples, labels=labels,
truths=theta_mcmc[:, 1], quantiles=[.16, .84],
verbose=False)
# generate schmidt laws from parameter samples
xln = np.logspace(np.log10(x.min()*.5),np.log10(x.max()*2.),100)
smlaw_samps = np.asarray([schmidt_law(xln, samp) for samp in samples])
# get percentile bands
percent = lambda x: np.nanpercentile(smlaw_samps, x, interpolation='linear', axis=0)
# Plot fits
fig = plt.figure()
# Plot data with errorbars
plt.plot(xln, percent(50), 'k') # 3 sigma band
# yperr = np.abs(np.exp(np.log(y)+yerr/y) - y)
# ynerr = np.abs(np.exp(np.log(y)-yerr/y) - y)
plt.errorbar(x, y, yerr, fmt='rs', alpha=0.7, mec='none')
plt.legend(['Median', 'Data'],
loc='upper left', fontsize=12)
# draw 1,2,3 sigma bands
plt.fill_between(xln, percent(1), percent(99), color='0.9') # 1 sigma band
plt.fill_between(xln, percent(2), percent(98), color='0.75') # 2 sigma band
plt.fill_between(xln, percent(16), percent(84), color='0.5') # 3 sigma band
plt.loglog(nonposy='clip')
return plt.gca()
def flatchain(chain):
return chain.reshape((-1,chain.shape[-1]))
def norm_chain(chain, axis=0):
std = np.std(flatchain(chain), axis=axis)
med = np.median(flatchain(chain), axis=axis)
return (chain-med)/std
def plot_walkers(sampler,limits = None, bad = None):
'''
sampler : emcee Sampler class
'''
if hasattr(sampler,'__getitem__'):
chain = sampler
ndim = chain.shape[-1]
else:
chain = sampler.chain
ndim = sampler.ndim
fig = plt.figure(figsize=(8 * ndim, 4 * ndim))
if hasattr(limits,'__getitem__'):
limits += [None] * (3-len(limits))
slices = slice(limits[0],limits[1],limits[2])
else:
slices = slice(None,limits,None)
for w,walk in enumerate(chain[:,slices,:]):
if bad is None:
color = 'k'
elif bad[w]:
color = 'r'
else:
color = 'k'
for p, param in enumerate(walk.T):
ax = plt.subplot(ndim, 1, p + 1)
ax.plot(param, color, alpha=.75, lw=0.75)
# ax.set_ylim(param.min()*0.5,param.max()*1.5)
# ax.semilogy()
plt.tight_layout()
return fig
def tester():
print('hi ya\'ll')
| 30.970484 | 116 | 0.564692 | [
"MIT"
] | johnarban/arban | schmidt_funcs.py | 26,232 | Python |
"""A word2vec implementation using Tensorflow and estimators."""
import os
from collections import defaultdict
import logging
import tensorflow as tf
# from tensorflow.python import debug as tf_debug # pylint: disable=E0611
import word2vec.utils.datasets as datasets_utils
import word2vec.models.word2vec as w2v_model
from word2vec.evaluation.men import MEN
logger = logging.getLogger(__name__)
__all__ = ('Word2Vec')
class Word2Vec():
"""Tensorflow implementation of Word2vec."""
def __init__(self):
"""Initialize vocab dictionaries."""
self._words = []
self._counts = []
self._total_count = 0
@property
def vocab_size(self):
"""Return the number of items in vocabulary.
Since we use len(word_freq_dict) as the default index for UKN in
the index_table, we have to add 1 to the length
"""
return len(self._words) + 1
def build_vocab(self, data_filepath, vocab_filepath, min_count):
"""Create vocabulary-related data."""
logger.info('Building vocabulary from file {}'.format(data_filepath))
logger.info('Loading word counts...')
if self.vocab_size > 1:
logger.warning('This instance of W2V\'s vocabulary does not seem '
'to be empty. Erasing previously stored vocab...')
self._words, self._counts, self._total_count = [], [], 0
word_count_dict = defaultdict(int)
with open(data_filepath, 'r') as data_stream:
for line in data_stream:
for word in line.strip().split():
word_count_dict[word] += 1
logger.info('Saving word frequencies to file: {}'
.format(vocab_filepath))
with open(vocab_filepath, 'w') as vocab_stream:
# words need to be sorted in decreasing frequency to be able
# to rely on the default tf.nn.log_uniform_candidate_sampler
# later on in the tf.nn.nce_loss
for word, count in sorted(word_count_dict.items(),
key=lambda x: x[1], reverse=True):
print('{}\t{}'.format(word, count), file=vocab_stream)
if count >= min_count:
self._words.append(word)
self._counts.append(count)
self._total_count += count
def load_vocab(self, vocab_filepath, min_count):
"""Load a previously saved vocabulary file."""
logger.info('Loading word counts from file {}'.format(vocab_filepath))
self._words, self._counts, self._total_count = [], [], 0
with open(vocab_filepath, 'r', encoding='UTF-8') as vocab_stream:
for line in vocab_stream:
word_count = line.strip().split('\t', 1)
word, count = word_count[0], int(word_count[1])
if count >= min_count:
self._words.append(word)
self._counts.append(count)
self._total_count += count
logger.info('Done loading word counts')
# pylint: disable=R0914,W0613
def train(self, train_mode, training_data_filepath, model_dirpath,
batch_size, embedding_size, num_neg_samples,
learning_rate, window_size, num_epochs, sampling_rate,
p_num_threads, t_num_threads, shuffling_buffer_size,
save_summary_steps, save_checkpoints_steps, keep_checkpoint_max,
log_step_count_steps, debug, debug_port, xla):
"""Train Word2Vec."""
if self.vocab_size == 1:
raise Exception('You need to build or load a vocabulary before '
'training word2vec')
if train_mode not in ('cbow', 'skipgram'):
raise Exception('Unsupported train_mode \'{}\''.format(train_mode))
sess_config = tf.compat.v1.ConfigProto(log_device_placement=True)
sess_config.intra_op_parallelism_threads = t_num_threads
sess_config.inter_op_parallelism_threads = t_num_threads
# if xla:
# sess_config.graph_options.optimizer_options.global_jit_level = \
# tf.OptimizerOptions.ON_1 # JIT compilation on GPU
run_config = tf.estimator.RunConfig(
session_config=sess_config, save_summary_steps=save_summary_steps,
save_checkpoints_steps=save_checkpoints_steps,
keep_checkpoint_max=keep_checkpoint_max,
log_step_count_steps=log_step_count_steps)
estimator = tf.estimator.Estimator(
model_fn=w2v_model.model,
model_dir=model_dirpath,
config=run_config,
params={
'mode': train_mode,
'vocab_size': self.vocab_size,
'batch_size': batch_size,
'embedding_size': embedding_size,
'num_neg_samples': num_neg_samples,
'learning_rate': learning_rate,
'words': self._words,
'p_num_threads': p_num_threads,
'xla': xla,
'men': MEN(os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'resources', 'MEN_dataset_natural_form_full'))
})
# waiting for v2 fix in tf.summary.FileWriter:
tf.compat.v1.disable_eager_execution()
if debug:
raise Exception('Unsupported parameter: waiting for the TF team '
'to release v2 equivalents for TensorBoardDebugHook')
# hooks = [tf.estimator.ProfilerHook(
# save_steps=save_summary_steps, show_dataflow=True,
# show_memory=True, output_dir=model_dirpath),
# tf_debug.TensorBoardDebugHook('localhost:{}'
# .format(debug_port))]
# else:
hooks = [tf.estimator.ProfilerHook(
save_steps=save_summary_steps, show_dataflow=True,
show_memory=True, output_dir=model_dirpath)]
estimator.train(
input_fn=lambda: datasets_utils.get_w2v_train_dataset(
training_data_filepath, train_mode, self._words, self._counts,
self._total_count, window_size, sampling_rate, batch_size,
num_epochs, p_num_threads, shuffling_buffer_size),
hooks=hooks)
| 44.950704 | 81 | 0.608491 | [
"MIT"
] | akb89/word2vec | word2vec/estimators/word2vec.py | 6,383 | Python |
from .gui import * | 18 | 18 | 0.722222 | [
"MIT"
] | jakebrehm/ezpz | lemons/__init__.py | 18 | Python |
class Persona:
def __init__(self):
self.edad = 18
self.nombre = "juan"
print "Se ha creado a", self.nombre, "de", self.edad
def hablar(self,palabras ="No se que decir"):
print self.nombre,': ', palabras
juan = Persona()
juan.hablar()
juan.hablar("Hola estoy hablando")
| 24.846154 | 61 | 0.591331 | [
"MIT"
] | gcardosov/PythonAprendeOrg | EjemploMetodos.py | 323 | Python |
import argparse
import collections
import fnmatch
import os.path
import pprint
import re
import sys
#######################
### OSimStatsHelper ###
#######################
class OSimStatsHelper:
"""Takes a list of stats and returns a stat containing their summation by each sample."""
@staticmethod
def sumStats(stats):
totalStat = {
'abs' : { 'units' : stats[0]['abs']['units'] },
'category' : stats[0]['category'],
'container' : "Total",
'name' : stats[0]['name'],
'fullName' : ".".join((stats[0]['category'], "Total", stats[0]['name']))
}
totalStat['abs']['values'] = OSimStatsHelper.sumStatsToValues(stats, 'abs')
#print "Summing %s" % (totalStat['name'])
if 'delta' in stats[0]:
totalStat['delta'] = { 'units' : stats[0]['delta']['units'] }
totalStat['delta']['values'] = OSimStatsHelper.sumStatsToValues(stats, 'delta')
return totalStat
@staticmethod
def sumStatsToValues(stats, type):
totals = []
for stat in stats:
values = stat[type]['values']
for i in range(0, len(values)):
if i + 1 > len(totals):
totals.append(values[i])
else:
totals[i] += values[i]
return totals
@staticmethod
def splitStatsFullName(fullName):
return statNamePartsRe.match(fullName).groups();
#lineRe = re.compile("(.* .*) - (.*) : (\d+)[ ,]([^:]*)")
#lineRe = re.compile("(.* .*) - (.*) : (?P<abs>[\d\.-]+)(?: (?:\D+))?(?P<delta>[\d\.-]+)?")
lineRe = re.compile("(.* .*) - (.*) : (?P<abs>[^,]+)(?:, )?(?P<delta>[^,]+)?")
statsReportStartRe = re.compile(" - \*\*\* STATS REPORT AT")
statNamePartsRe = re.compile("^(.*?)\.(.*)\.(.*?)$");
valueRe = re.compile("([^ %/]+)(.*)")
#######################
### OSimStatsCorpus ###
#######################
class OSimStatsCorpus:
_data = {}
_samplesCount = 0
@property
def data(self):
return self._data
def __init__(self):
self.clear()
def __len__(self):
return self._samplesCount
@staticmethod
def parseValue(rawValue, valueRe):
valueMatch = valueRe.match(rawValue)
return float(valueMatch.group(1)), valueMatch.group(2)
def getStat(self, statFullName):
"""
Get a statistic given its full name.
FIXME: Does not allow one to interrogate a given set yet.
"""
if self._data == None:
return None
(category, container, name) = OSimStatsHelper.splitStatsFullName(statFullName);
for set in self._data.items():
if category in set and container in set[category] and name in set[category][container]:
return set[category][container][name]
else:
return None
def getStats(self, setGlob = "*", selectGlob = "*"):
"""
Returns a dictionary of stats where fullName => stat.
If glob is specified then this is used to match stats using their full name
If no stats are found then an empty dictionary is returned.
"""
if selectGlob == None:
selectGlob = "*"
if setGlob == None:
setGlob = "*"
matchingStats = collections.OrderedDict()
for setName, set in self._data.items():
if fnmatch.fnmatch(setName, setGlob):
for category, containers in set.items():
for container, stats in containers.items():
for statName, stat in stats.items():
if fnmatch.fnmatch(stat['fullName'], selectGlob):
matchingStats[stat['fullName']] = stat
return matchingStats
def clear(self):
"""Clear out any existing dataset."""
self._data = {}
self._samplesCount = 0
def load(self, path):
"""Load OpenSimulator stats log data from the given path and merge into any existing data."""
# Set structure
# category : {
# container : {
# stat : {
# 'abs' : { 'values' : [], 'units' : "" },
# 'delta' : { 'values' : [], 'units' : "" }
# 'name' : string
# 'fullName' : string
# 'category' : string
# 'container' : string
# }
# delta may not be present
with open(path) as f:
setName = os.path.splitext(os.path.basename(path))[0]
print "Loading set %s" % (setName)
if not setName in self._data:
self._data[setName] = {}
set = self.data[setName]
for line in f:
match = lineRe.match(line)
if match != None:
statFullName = match.group(2)
#(category, container, name) = statFullName.split(".")
(category, container, name) = OSimStatsHelper.splitStatsFullName(statFullName);
rawValue = match.group("abs")
#print match.lastindex
#print rawValue
value = OSimStatsCorpus.parseValue(rawValue, valueRe)
if not category in set:
set[category] = collections.OrderedDict()
if not container in set[category]:
set[category][container] = collections.OrderedDict()
if not name in set[category][container]:
entry = {
'abs' : { 'values' : [], 'units' : value[1] },
'category' : category,
'container' : container,
'fullName' : statFullName,
'name' : name
}
set[category][container][name] = entry
stat = set[category][container][name]
stat['abs']['values'].append(value[0])
# Handle delta value if present
if match.group("delta"):
rawValue = match.group("delta")
value = OSimStatsCorpus.parseValue(rawValue, valueRe)
if not 'delta' in stat:
stat['delta'] = { 'values' : [], 'units' : value[1] }
stat['delta']['values'].append(value[0])
else:
match = statsReportStartRe.search(line)
if (match != None):
self._samplesCount += 1
else:
print "Ignoring [%s]" % (line) | 38.068627 | 109 | 0.425573 | [
"BSD-3-Clause-Clear"
] | ConnectionMaster/opensimulator-tools | analysis/opensimulator-stats-analyzer/src/osta/osta.py | 7,766 | Python |
import os
import sys
import errno
import random
import pickle
import numpy as np
import torch
import torchvision
import torch.nn.functional as F
from torch.utils.data.dataset import Dataset
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import BatchSampler
from torchvision.datasets import DatasetFolder
from torchvision import transforms
from torch import nn
from torch import optim
import matplotlib.pyplot as plt
#==============================================================================
# Network definition
#==============================================================================
class SE_HIPP_3D_Net(nn.Module):
def __init__(self):
super(SE_HIPP_3D_Net, self).__init__()
self.conv1 = nn.Conv2d(28, 32, kernel_size=4, stride=1, padding=1)
self.bn1 = nn.BatchNorm2d(32)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=2, stride=1, padding=0)
self.bn2 = nn.BatchNorm2d(64)
self.fc1 = nn.Linear(64*7*7, 120)
self.dropout = nn.Dropout(0.5)
self.fc2 = nn.Linear(120, 2)
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=0)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, kernel_size=2, stride=2, padding=1)
x = self.bn2(x)
x = self.relu(x)
# print("size", x.size())
x = x.view(-1, self.num_flat_features(x))
x = self.dropout(x)
# print("size", x.size())
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def num_flat_features(self, x):
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features | 27.626866 | 79 | 0.562939 | [
"MIT"
] | kaderghal/ADNI_Data_processing | src/pytorch-template/old/models/baseline_3D_single.py | 1,851 | Python |
import oneflow as flow
import oneflow as flow_exp
from oneflow import Tensor
def nms(boxes: Tensor, scores: Tensor, iou_threshold: float) -> Tensor:
scores_inds = flow_exp.argsort(scores, dim=0, descending=True)
boxes = flow._C.gather(boxes, scores_inds, axis=0)
_nms_op = (
flow_exp.builtin_op("nms")
.Input("in")
.Output("out")
.Attr("iou_threshold", iou_threshold)
.Attr("keep_n", -1)
.Build()
)
keep = _nms_op(boxes)[0]
index = flow_exp.squeeze(flow_exp.argwhere(keep), dim=[1])
return flow._C.gather(scores_inds, index, axis=0)
| 30.55 | 71 | 0.648118 | [
"Apache-2.0"
] | BakerMara/models | ops/nms.py | 611 | Python |
#!/usr/bin/python
"""Cartesian execution of options for experiments"""
import itertools
from pprint import pprint
import os
# GROUPS = [
# ('train', {'type': 'option',
# 'order': 0,
# 'values': ['train5k']}),
# ('lang', {'type': 'option',
# 'order': 1,
# 'values': 'hungarian,basque,french,korean,polish,swedish'.split(',')}),
# ('infuse', {'type': 'option',
# 'order': 2,
# 'values': ['true', 'false']}),
# ('maxmsr', {'type': 'option',
# 'order': 3,
# 'values': '1'.split(',')})
# ]
#
GROUPS = [
('train', {'type': 'option',
'order': 0,
'values': ['train', 'train5k']}),
('lang', {'type': 'option',
'order': 1,
'values': 'hungarian,basque,french,korean,polish,swedish'.split(',')}),
('infuse', {'type': 'option',
'order': 2,
'values': ['true', 'false']}),
('maxmsr', {'type': 'option',
'order': 3,
'values': '1,2,4,8'.split(',')})
]
# GROUPS = [
# ('gram', {'type': 'file',
# 'use': 'agg',
# 'order': 0,
# 'values': ['unigram', 'bigram', 'trigram', 'nextunigram', 'nextbigram', 'nexttrigram']}),
# # ('prev', {'type': 'file',
# # 'use': 'optional',
# # 'value': 'prev'}),
# ('pop', {'type': 'option',
# 'use': 'optional',
# 'value': '-pop'})
# ]
# BASE = """nohup ./chukuparser md -f $conf -td corpus/train4k.hebtb.gold.lattices -tl corpus/train4k.hebtb.pred.lattices -in corpus/dev.hebtb.gold.conll.pred.lattices -ing corpus/dev.hebtb.gold.conll.gold.lattices -om devo.$exp.b32.hebtb.mapping -it 1 -b 32 -p Funcs_Main_POS_Both_Prop -wb -bconc $flags > runstatus.$exp.b32"""
MALEARN = """nohup ./yap malearn -lattice spmrl/train.$lang.gold.conll.tobeparsed.tagged.lattices -raw spmrl/train.$lang.gold.conll.tobeparsed.raw -out $lang.json > malearn.$exp.out"""
MATRAIN = """nohup ./yap ma -dict $lang.json -raw spmrl/$train.$lang.gold.conll.tobeparsed.raw -out $train.$lang.$maxmsr.analyzed.lattices -maxmsrperpos $maxmsr > matrain.$exp.out"""
MADEV = """nohup ./yap ma -dict $lang.json -raw spmrl/dev.$lang.gold.conll.tobeparsed.raw -out dev.$lang.$maxmsr.analyzed.lattices -maxmsrperpos $maxmsr > madev.$exp.out"""
MD = """nohup ./yap md -f conf/standalone.md.yaml -td spmrl/$train.$lang.gold.conll.tobeparsed.tagged.lattices -tl $train.$lang.$maxmsr.analyzed.lattices -in dev.$lang.$maxmsr.analyzed.lattices -ing spmrl/dev.$lang.gold.conll.tobeparsed.tagged.lattices -om devo.$train_$lang_$maxmsr_$infuse.mapping -infusedev=$infuse -it 1 -b 32 -p Funcs_Main_POS_Both_Prop -bconc -pop > runstatus.$exp.out"""
cmds = [MALEARN, MATRAIN, MADEV, MD]
REPLACE_STR = '$exp'
CONF_FILE = 'standalone.md.%s.yaml'
BASE_FILE = 'standalone.base.md.yaml'
# first transform optional to empty, existing
for (name, conf) in GROUPS:
if conf.get('use', None) == 'optional':
conf['values'] = [None, conf['value']]
conf_values = map(lambda (name, conf): conf['values'], GROUPS)
executions = list(itertools.product(*conf_values))
def gen_agg_file(values, out_name):
with open(out_name, 'w') as outf:
for value in values:
with open(value) as inf:
outf.write(inf.read())
for execution in executions:
print 'At execution %s' % str(execution)
files = [BASE_FILE]
exp_strings = []
command_line_options = []
options = {}
# for i, param in enumerate(execution):
# conf_name, conf = GROUPS[i]
# # print "\tAt conf %s" % conf_name
# # pprint(conf)
# # print "\tparam is %s" % str(param)
# if conf['type'] == 'option' and param:
# print "\t\tadd %s=%s to command line" % (conf_name, str(param))
# options[conf_name] = param
# # print "\t\tadd %s to command line" % str(conf['value'])
# # command_line_options.append(conf['value'])
# if conf.get('use', None) == 'optional':
# exp_strings.append(conf_name if param else 'no%s' % conf_name)
# else:
# exp_strings.append(param)
# if conf['type'] == 'file':
# if conf['use'] == 'agg':
# files += conf['values'][:conf['values'].index(param)+1]
# if conf['use'] == 'optional' and param:
# files.append(param)
for cmd in cmds:
execcmd = cmd[:]
for name, value in zip(map(lambda (k,v):k, GROUPS), execution):
execcmd = execcmd.replace('$'+name, value)
execcmd = execcmd.replace('$exp', '_'.join(execution))
print execcmd
os.system(execcmd)
# exp_string = '_'.join(exp_strings)
# outname = CONF_FILE % exp_string
# print command_line_options
# gen_agg_file(files, outname)
# new_command = BASE.replace('$conf', outname).replace('$exp', exp_string, 2).replace('$flags', ' '.join(command_line_options))
# print 'Executing %s' % new_command
# os.system(new_command)
| 43.237288 | 393 | 0.571541 | [
"Apache-2.0"
] | CoNLL-UD-2017/OpenU-NLP-Lab | scripts/cartesian_experiments.py | 5,102 | Python |
# Copyright (c) 2020 The Regents of the University of Michigan
# All rights reserved.
# This software is licensed under the BSD 3-Clause License.
import garnett
import hoomd
import hoomd.hpmc
# Vertices of a cube
cube_verts = [[-1, -1, -1], [-1, -1, 1], [-1, 1, 1], [-1, 1, -1],
[1, -1, -1], [1, -1, 1], [1, 1, 1], [1, 1, -1]]
with hoomd.context.SimulationContext():
box = hoomd.data.boxdim(L=10, dimensions=3)
snapshot = hoomd.data.make_snapshot(N=4, box=box)
snapshot.particles.position[:] = [
[2, 0, 0],
[4, 0, 0],
[0, 4, 0],
[0, 0, 4],
]
system = hoomd.init.read_snapshot(snapshot)
mc = hoomd.hpmc.integrate.convex_polyhedron(seed=452784, d=0.2, a=0.4)
mc.shape_param.set('A', vertices=cube_verts)
gsd_dump = hoomd.dump.gsd('cube.gsd', period=10, group=hoomd.group.all())
gsd_dump.dump_state(mc)
gsd_dump.dump_shape(mc)
hoomd.run(1000)
# Restore a snapshot from saved data
with garnett.read('cube.gsd') as traj:
snapshot2 = system.take_snapshot()
traj[-1].to_hoomd_snapshot(snapshot2)
with hoomd.context.SimulationContext():
# Create a HOOMD snapshot from a garnett Trajectory frame
with garnett.read('cube.gsd') as traj:
snapshot = traj[-1].to_hoomd_snapshot()
system = hoomd.init.read_snapshot(snapshot)
mc = hoomd.hpmc.integrate.convex_polyhedron(seed=452784, d=0.2, a=0.4)
mc.shape_param.set('A', vertices=cube_verts)
gsd_dump = hoomd.dump.gsd('cube.gsd', period=10, group=hoomd.group.all())
gsd_dump.dump_state(mc)
gsd_dump.dump_shape(mc)
hoomd.run(1000)
| 34.020833 | 77 | 0.644825 | [
"BSD-3-Clause"
] | glotzerlab/garne | examples/example-hpmc.py | 1,633 | Python |
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
import json
import xmind
import logging
from xmind2testcase2021.zentao import xmind_to_zentao_csv_file
from xmind2testcase2021.testlink import xmind_to_testlink_xml_file
from xmind2testcase2021.utils import xmind_testcase_to_json_file
from xmind2testcase2021.utils import xmind_testsuite_to_json_file
from xmind2testcase2021.utils import get_xmind_testcase_list
from xmind2testcase2021.utils import get_xmind_testsuite_list
logging.basicConfig(level=logging.INFO)
def main():
xmind_file = 'docs/xmind_testcase_template_v1.1.xmind'
print('Start to convert XMind file: %s' % xmind_file)
# 1、testcases import file
# (1) zentao
zentao_csv_file = xmind_to_zentao_csv_file(xmind_file)
print('Convert XMind file to zentao csv file successfully: %s' % zentao_csv_file)
# (2) testlink
testlink_xml_file = xmind_to_testlink_xml_file(xmind_file)
print('Convert XMind file to testlink xml file successfully: %s' % testlink_xml_file)
# 2、 testcases json file
# (1) testsuite
testsuite_json_file = xmind_testsuite_to_json_file(xmind_file)
print('Convert XMind file to testsuite json file successfully: %s' % testsuite_json_file)
# (2) testcase
testcase_json_file = xmind_testcase_to_json_file(xmind_file)
print('Convert XMind file to testcase json file successfully: %s' % testcase_json_file)
# 3、test dict/json data
# (1) testsuite
testsuites = get_xmind_testsuite_list(xmind_file)
print('Convert XMind to testsuits dict data:\n%s' %
json.dumps(testsuites, indent=2, separators=(',', ': '), ensure_ascii=False))
# (2) testcase
testcases = get_xmind_testcase_list(xmind_file)
print('Convert Xmind to testcases dict data:\n%s' %
json.dumps(testcases, indent=4, separators=(',', ': '), ensure_ascii=False))
# (3) xmind file
workbook = xmind.load(xmind_file)
print('Convert XMind to Json data:\n%s' %
json.dumps(workbook.getData(), indent=2, separators=(',', ': '), ensure_ascii=False))
print('Finished conversion, Congratulations!')
if __name__ == '__main__':
main() | 39.740741 | 95 | 0.741379 | [
"MIT"
] | lovpuss/xmind2testcase2021 | samples.py | 2,152 | Python |
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import os
import shutil
from datetime import datetime
from traits.api import Bool
from uncertainties import ufloat
from pychron.core.helpers.datetime_tools import ISO_FORMAT_STR
from pychron.core.helpers.filetools import glob_list_directory, add_extension, \
list_directory
from pychron.dvc import dvc_dump, dvc_load, repository_path, list_frozen_productions
from pychron.dvc.meta_object import IrradiationGeometry, Chronology, Production, cached, Gains, LoadGeometry, \
MetaObjectException
from pychron.git_archive.repo_manager import GitRepoManager
from pychron.paths import paths, r_mkdir
from pychron.pychron_constants import INTERFERENCE_KEYS, RATIO_KEYS, DEFAULT_MONITOR_NAME, DATE_FORMAT, NULL_STR
# ============= enthought library imports =======================
def irradiation_geometry(name):
p = os.path.join(paths.meta_root, 'irradiation_holders', add_extension(name))
return IrradiationGeometry(p)
def irradiation_geometry_holes(name):
geom = irradiation_geometry(name)
return geom.holes
def irradiation_chronology(name, allow_null=False):
p = os.path.join(paths.meta_root, name, 'chronology.txt')
return Chronology(p, allow_null=allow_null)
def dump_chronology(path, doses):
if doses is None:
doses = []
with open(path, 'w') as wfile:
for p, s, e in doses:
if not isinstance(s, str):
s = s.strftime(ISO_FORMAT_STR)
if not isinstance(s, str):
s = s.strftime(ISO_FORMAT_STR)
if not isinstance(p, str):
p = '{:0.3f}'.format(p)
line = '{},{},{}\n'.format(p, s, e)
wfile.write(line)
def gain_path(name):
root = os.path.join(paths.meta_root, 'spectrometers')
if not os.path.isdir(root):
os.mkdir(root)
p = os.path.join(root, add_extension('{}.gain'.format(name), '.json'))
return p
def get_frozen_productions(repo):
prods = {}
for name, path in list_frozen_productions(repo):
prods[name] = Production(path)
return prods
def get_frozen_flux(repo, irradiation):
path = repository_path(repo, '{}.json'.format(irradiation))
fd = {}
if path:
fd = dvc_load(path)
for fi in fd.values():
fi['j'] = ufloat(*fi['j'], tag='J')
return fd
class MetaRepo(GitRepoManager):
clear_cache = Bool
def get_monitor_info(self, irrad, level):
age, decay = NULL_STR, NULL_STR
positions = self._get_level_positions(irrad, level)
# assume all positions have same monitor_age/decay constant. Not strictly true. Potential some ambiquity but
# will not be resolved now 8/26/18.
if positions:
position = positions[0]
opt = position.get('options')
if opt:
age = position.get('monitor_age', NULL_STR)
decayd = position.get('decay_constants')
if decayd:
decay = decayd.get('lambda_k_total', NULL_STR)
return str(age), str(decay)
def add_unstaged(self, *args, **kw):
super(MetaRepo, self).add_unstaged(self.path, **kw)
def save_gains(self, ms, gains_dict):
p = gain_path(ms)
dvc_dump(gains_dict, p)
if self.add_paths(p):
self.commit('Updated gains')
def update_script(self, rootname, name, path_or_blob):
self._update_text(os.path.join('scripts', rootname.lower()), name, path_or_blob)
def update_experiment_queue(self, rootname, name, path_or_blob):
self._update_text(os.path.join('experiments', rootname.lower()), name, path_or_blob)
def update_level_production(self, irrad, name, prname, note=None):
prname = prname.replace(' ', '_')
pathname = add_extension(prname, '.json')
src = os.path.join(paths.meta_root, irrad, 'productions', pathname)
if os.path.isfile(src):
self.update_productions(irrad, name, prname, note=note)
else:
self.warning_dialog('Invalid production name'.format(prname))
def update_level_monitor(self, irradiation, level, monitor_name, monitor_material, monitor_age, lambda_k):
path = self.get_level_path(irradiation, level)
obj = dvc_load(path)
positions = self._get_level_positions(irradiation, level)
options = {'monitor_name': monitor_name,
'monitor_material': monitor_material,
'monitor_age': monitor_age}
decay_constants = {'lambda_k_total': lambda_k, 'lambda_k_total_error': 0}
for p in positions:
p['options'] = options
p['decay_constants'] = decay_constants
obj['positions'] = positions
dvc_dump(obj, path)
def add_production_to_irradiation(self, irrad, name, params, add=True, commit=False):
self.debug('adding production {} to irradiation={}'.format(name, irrad))
p = os.path.join(paths.meta_root, irrad, 'productions', add_extension(name, '.json'))
prod = Production(p, new=not os.path.isfile(p))
prod.update(params)
prod.dump()
if add:
self.add(p, commit=commit)
def add_production(self, irrad, name, obj, commit=False, add=True):
p = self.get_production(irrad, name, force=True)
p.attrs = attrs = INTERFERENCE_KEYS + RATIO_KEYS
kef = lambda x: '{}_err'.format(x)
if obj:
def values():
return ((k, getattr(obj, k), kef(k), getattr(obj, kef(k))) for k in attrs)
else:
def values():
return ((k, 0, kef(k), 0) for k in attrs)
for k, v, ke, e in values():
setattr(p, k, v)
setattr(p, ke, e)
p.dump()
if add:
self.add(p.path, commit=commit)
def update_production(self, prod, irradiation=None):
ip = self.get_production(prod.name)
self.debug('saving production {}'.format(prod.name))
params = prod.get_params()
for k, v in params.items():
self.debug('setting {}={}'.format(k, v))
setattr(ip, k, v)
ip.note = prod.note
self.add(ip.path, commit=False)
self.commit('updated production {}'.format(prod.name))
def update_productions(self, irrad, level, production, note=None, add=True):
p = os.path.join(paths.meta_root, irrad, 'productions.json')
obj = dvc_load(p)
obj['note'] = str(note) or ''
if level in obj:
if obj[level] != production:
self.debug('setting production to irrad={}, level={}, prod={}'.format(irrad, level, production))
obj[level] = production
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
else:
obj[level] = production
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
def set_identifier(self, irradiation, level, pos, identifier):
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
positions = self._get_level_positions(irradiation, level)
d = next((p for p in positions if p['position'] == pos), None)
if d:
d['identifier'] = identifier
jd['positions'] = positions
dvc_dump(jd, p)
self.add(p, commit=False)
def get_level_path(self, irrad, level):
return os.path.join(paths.meta_root, irrad, '{}.json'.format(level))
def add_level(self, irrad, level, add=True):
p = self.get_level_path(irrad, level)
lv = dict(z=0, positions=[])
dvc_dump(lv, p)
if add:
self.add(p, commit=False)
def add_chronology(self, irrad, doses, add=True):
p = os.path.join(paths.meta_root, irrad, 'chronology.txt')
dump_chronology(p, doses)
if add:
self.add(p, commit=False)
def add_irradiation(self, name):
p = os.path.join(paths.meta_root, name)
if not os.path.isdir(p):
os.mkdir(p)
def add_position(self, irradiation, level, pos, add=True):
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
if isinstance(jd, list):
positions = jd
z = 0
else:
positions = jd.get('positions', [])
z = jd.get('z', 0)
pd = next((p for p in positions if p['position'] == pos), None)
if pd is None:
positions.append({'position': pos, 'decay_constants': {}})
dvc_dump({'z': z, 'positions': positions}, p)
if add:
self.add(p, commit=False)
def add_irradiation_geometry_file(self, path):
try:
holder = IrradiationGeometry(path)
if not holder.holes:
raise BaseException
except BaseException:
self.warning_dialog('Invalid Irradiation Geometry file. Failed to import')
return
self.smart_pull()
root = os.path.join(paths.meta_root, 'irradiation_holders')
if not os.path.isdir(root):
os.mkdir(root)
name = os.path.basename(path)
dest = os.path.join(root, name)
shutil.copyfile(path, dest)
self.add(dest, commit=False)
self.commit('added irradiation geometry file {}'.format(name))
self.push()
self.information_dialog('Irradiation Geometry "{}" added'.format(name))
# p = os.path.join(root, add_extension(name))
# def add_irradiation_holder(self, name, blob, commit=False, overwrite=False, add=True):
# root = os.path.join(paths.meta_root, 'irradiation_holders')
# if not os.path.isdir(root):
# os.mkdir(root)
# p = os.path.join(root, add_extension(name))
#
# if not os.path.isfile(p) or overwrite:
# with open(p, 'w') as wfile:
# holes = list(iter_geom(blob))
# n = len(holes)
# wfile.write('{},0.0175\n'.format(n))
# for idx, (x, y, r) in holes:
# wfile.write('{:0.4f},{:0.4f},{:0.4f}\n'.format(x, y, r))
# if add:
# self.add(p, commit=commit)
def get_load_holders(self):
p = os.path.join(paths.meta_root, 'load_holders')
return list_directory(p, extension='.txt', remove_extension=True)
def add_load_holder(self, name, path_or_txt, commit=False, add=True):
p = os.path.join(paths.meta_root, 'load_holders', name)
if os.path.isfile(path_or_txt):
shutil.copyfile(path_or_txt, p)
else:
with open(p, 'w') as wfile:
wfile.write(path_or_txt)
if add:
self.add(p, commit=commit)
def update_level_z(self, irradiation, level, z):
p = self.get_level_path(irradiation, level)
obj = dvc_load(p)
try:
add = obj['z'] != z
obj['z'] = z
except TypeError:
obj = {'z': z, 'positions': obj}
add = True
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
def remove_irradiation_position(self, irradiation, level, hole):
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
if jd:
if isinstance(jd, list):
positions = jd
z = 0
else:
positions = jd['positions']
z = jd['z']
npositions = [ji for ji in positions if not ji['position'] == hole]
obj = {'z': z, 'positions': npositions}
dvc_dump(obj, p)
self.add(p, commit=False)
def new_flux_positions(self, irradiation, level, positions, add=True):
p = self.get_level_path(irradiation, level)
obj = {'positions': positions, 'z': 0}
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
def update_fluxes(self, irradiation, level, j, e, add=True):
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
if isinstance(jd, list):
positions = jd
else:
positions = jd.get('positions')
if positions:
for ip in positions:
ip['j'] = j
ip['j_err'] = e
dvc_dump(jd, p)
if add:
self.add(p, commit=False)
def update_flux(self, irradiation, level, pos, identifier, j, e, mj, me, decay=None,
position_jerr=None,
analyses=None, options=None, add=True):
if options is None:
options = {}
if decay is None:
decay = {}
if analyses is None:
analyses = []
p = self.get_level_path(irradiation, level)
jd = dvc_load(p)
if isinstance(jd, list):
positions = jd
z = 0
else:
positions = jd.get('positions', [])
z = jd.get('z', 0)
npos = {'position': pos, 'j': j, 'j_err': e,
'mean_j': mj, 'mean_j_err': me,
'position_jerr': position_jerr,
'decay_constants': decay,
'identifier': identifier,
'options': options,
'analyses': [{'uuid': ai.uuid,
'record_id': ai.record_id,
'is_omitted': ai.is_omitted()}
for ai in analyses]}
if positions:
added = any((ji['position'] == pos for ji in positions))
npositions = [ji if ji['position'] != pos else npos for ji in positions]
if not added:
npositions.append(npos)
else:
npositions = [npos]
obj = {'z': z, 'positions': npositions}
dvc_dump(obj, p)
if add:
self.add(p, commit=False)
def update_chronology(self, name, doses):
p = os.path.join(paths.meta_root, name, 'chronology.txt')
dump_chronology(p, doses)
self.add(p, commit=False)
def get_irradiation_holder_names(self):
return glob_list_directory(os.path.join(paths.meta_root, 'irradiation_holders'),
extension='.txt',
remove_extension=True)
def get_cocktail_irradiation(self):
"""
example cocktail.json
{
"chronology": "2016-06-01 17:00:00",
"j": 4e-4,
"j_err": 4e-9
}
:return:
"""
p = os.path.join(paths.meta_root, 'cocktail.json')
ret = dvc_load(p)
nret = {}
if ret:
lines = ['1.0, {}, {}'.format(ret['chronology'], ret['chronology'])]
c = Chronology.from_lines(lines)
nret['chronology'] = c
nret['flux'] = ufloat(ret['j'], ret['j_err'])
return nret
def get_default_productions(self):
p = os.path.join(paths.meta_root, 'reactors.json')
if not os.path.isfile(p):
with open(p, 'w') as wfile:
from pychron.file_defaults import REACTORS_DEFAULT
wfile.write(REACTORS_DEFAULT)
return dvc_load(p)
def get_flux_positions(self, irradiation, level):
positions = self._get_level_positions(irradiation, level)
return positions
def get_flux(self, irradiation, level, position):
positions = self.get_flux_positions(irradiation, level)
return self.get_flux_from_positions(position, positions)
def get_flux_from_positions(self, position, positions):
j, je, pe, lambda_k = 0, 0, 0, None
monitor_name, monitor_material, monitor_age = DEFAULT_MONITOR_NAME, 'sanidine', ufloat(28.201, 0)
if positions:
pos = next((p for p in positions if p['position'] == position), None)
if pos:
j, je, pe = pos.get('j', 0), pos.get('j_err', 0), pos.get('position_jerr', 0)
dc = pos.get('decay_constants')
if dc:
# this was a temporary fix and likely can be removed
if isinstance(dc, float):
v, e = dc, 0
else:
v, e = dc.get('lambda_k_total', 0), dc.get('lambda_k_total_error', 0)
lambda_k = ufloat(v, e)
mon = pos.get('monitor')
if mon:
monitor_name = mon.get('name', DEFAULT_MONITOR_NAME)
sa = mon.get('age', 28.201)
se = mon.get('error', 0)
monitor_age = ufloat(sa, se, tag='monitor_age')
monitor_material = mon.get('material', 'sanidine')
fd = {'j': ufloat(j, je, tag='J'),
'position_jerr': pe,
'lambda_k': lambda_k,
'monitor_name': monitor_name,
'monitor_material': monitor_material,
'monitor_age': monitor_age}
return fd
def get_gains(self, name):
g = self.get_gain_obj(name)
return g.gains
def save_sensitivities(self, sens):
ps = []
for k, v in sens.items():
root = os.path.join(paths.meta_root, 'spectrometers')
p = os.path.join(root, add_extension('{}.sens'.format(k), '.json'))
dvc_dump(v, p)
ps.append(p)
if self.add_paths(ps):
self.commit('Updated sensitivity')
def get_sensitivities(self):
specs = {}
root = os.path.join(paths.meta_root, 'spectrometers')
for p in list_directory(root):
if p.endswith('.sens.json'):
name = p.split('.')[0]
p = os.path.join(root, p)
obj = dvc_load(p)
for r in obj:
if r['create_date']:
r['create_date'] = datetime.strptime(r['create_date'], DATE_FORMAT)
specs[name] = obj
return specs
def get_sensitivity(self, name):
sens = self.get_sensitivities()
spec = sens.get(name)
v = 1
if spec:
# get most recent sensitivity
record = spec[-1]
v = record.get('sensitivity', 1)
return v
@cached('clear_cache')
def get_gain_obj(self, name, **kw):
p = gain_path(name)
return Gains(p)
# @cached('clear_cache')
def get_production(self, irrad, level, allow_null=False, **kw):
path = os.path.join(paths.meta_root, irrad, 'productions.json')
obj = dvc_load(path)
pname = obj.get(level, '')
p = os.path.join(paths.meta_root, irrad, 'productions', add_extension(pname, ext='.json'))
ip = Production(p, allow_null=allow_null)
# print 'new production id={}, name={}, irrad={}, level={}'.format(id(ip), pname, irrad, level)
return pname, ip
# @cached('clear_cache')
def get_chronology(self, name, allow_null=False, **kw):
chron = None
try:
chron = irradiation_chronology(name, allow_null=allow_null)
if self.application:
chron.use_irradiation_endtime = self.application.get_boolean_preference(
'pychron.arar.constants.use_irradiation_endtime', False)
except MetaObjectException:
if name != 'NoIrradiation':
self.warning('Could not locate the irradiation chronology "{}"'.format(name))
return chron
@cached('clear_cache')
def get_irradiation_holder_holes(self, name, **kw):
return irradiation_geometry_holes(name)
@cached('clear_cache')
def get_load_holder_holes(self, name, **kw):
p = os.path.join(paths.meta_root, 'load_holders', add_extension(name))
holder = LoadGeometry(p)
return holder.holes
@property
def sensitivity_path(self):
return os.path.join(paths.meta_root, 'sensitivity.json')
# private
def _get_level_positions(self, irrad, level):
p = self.get_level_path(irrad, level)
obj = dvc_load(p)
if isinstance(obj, list):
positions = obj
else:
positions = obj.get('positions', [])
return positions
def _update_text(self, tag, name, path_or_blob):
if not name:
self.debug('cannot update text with no name. tag={} name={}'.format(tag, name))
return
root = os.path.join(paths.meta_root, tag)
if not os.path.isdir(root):
r_mkdir(root)
p = os.path.join(root, name)
if os.path.isfile(path_or_blob):
shutil.copyfile(path_or_blob, p)
else:
with open(p, 'w') as wfile:
wfile.write(path_or_blob)
self.add(p, commit=False)
# ============= EOF =============================================
| 34.158228 | 116 | 0.564712 | [
"Apache-2.0"
] | UManPychron/pychron | pychron/dvc/meta_repo.py | 21,588 | Python |
#!/usr/bin/python
import glob,re,sys,math,pyfits
import numpy as np
import utils
if len(sys.argv) < 2:
print '\nconvert basti SSP models to ez_gal fits format'
print 'Run in directory with SED models for one metallicity'
print 'Usage: convert_basti.py ez_gal.ascii\n'
sys.exit(2)
fileout = sys.argv[1]
# try to extract meta data out of fileout
sfh = ''; tau = ''; met = ''; imf = ''
# split on _ but get rid of the extension
parts = '.'.join(fileout.split('.')[:-1]).split('_')
# look for sfh
for (check,val) in zip(['ssp','exp'], ['SSP','Exponential']):
if parts.count(check):
sfh = val
sfh_index = parts.index(check)
break
# tau?
if sfh:
tau = parts[sfh_index+1] if sfh == 'exp' else ''
# metallicity
if parts.count('z'):
met = parts[parts.index('z') + 1]
# imf
for (check,val) in zip(['krou','salp','chab'], ['Kroupa', 'Salpeter', 'Chabrier']):
if parts.count(check):
imf = val
break
if parts.count('n'):
n = parts[parts.index('n') + 1]
ae = False
if parts.count('ae'): ae = True
# does the file with masses exist?
has_masses = False
mass_file = glob.glob('MLR*.txt')
if len(mass_file):
# read it in!
print 'Loading masses from %s' % mass_file[0]
data = utils.rascii(mass_file[0], silent=True)
masses = data[:,10:14].sum(axis=1)
has_masses = True
files = glob.glob('SPEC*agb*')
nages = len(files)
ages = []
for (i,file) in enumerate(files):
ls = []
this = []
# extract the age from the filename and convert to years
m = re.search('t60*(\d+)$', file)
ages.append(int(m.group(1))*1e6)
# read in this file
fp = open(file, 'r')
for line in fp:
parts = line.strip().split()
ls.append(float(parts[0].strip()))
this.append(float(parts[1].strip()))
if i == 0:
# if this is the first file, generate the data table
nls = len(ls)
seds = np.empty((nls,nages))
# convert to ergs/s/angstrom
seds[:,i] = np.array(this)/4.3607e-33/1e10
# convert to numpy
ages = np.array(ages)
ls = np.array(ls)*10.0
# make sure we are sorted in age
sinds = ages.argsort()
ages = ages[sinds]
seds = seds[:,sinds]
# speed of light
c = utils.convert_length(utils.c, incoming='m', outgoing='a')
# convert from angstroms to hertz
vs = c/ls
# convert from ergs/s/A to ergs/s/Hz
seds *= ls.reshape((ls.size,1))**2.0/c
# and now from ergs/s/Hz to ergs/s/Hz/cm^2.0
seds /= (4.0*math.pi*utils.convert_length(10, incoming='pc', outgoing='cm')**2.0)
# sort in frequency space
sinds = vs.argsort()
# generate fits frame with sed in it
primary_hdu = pyfits.PrimaryHDU(seds[sinds,:])
primary_hdu.header.update('units', 'ergs/s/cm^2/Hz')
primary_hdu.header.update('has_seds', True)
primary_hdu.header.update('nfilters', 0)
primary_hdu.header.update('nzfs', 0)
# store meta data
if sfh and met and imf:
primary_hdu.header.update('has_meta', True)
primary_hdu.header.update('model', 'BaSTI', comment='meta data')
primary_hdu.header.update('met', met, comment='meta data')
primary_hdu.header.update('imf', imf, comment='meta data')
primary_hdu.header.update('sfh', sfh, comment='meta data')
if sfh == 'Exponential': primary_hdu.header.update('tau', tau, comment='meta data')
primary_hdu.header.update('n', n, comment='meta data')
primary_hdu.header.update('ae', ae, comment='meta data')
# store the list of frequencies in a table
vs_hdu = pyfits.new_table(pyfits.ColDefs([pyfits.Column(name='vs', array=vs[sinds], format='D', unit='hertz')]))
vs_hdu.header.update('units', 'hertz')
# and the list of ages
cols = [pyfits.Column(name='ages', array=ages, format='D', unit='years')]
# and masses
if has_masses: cols.append(pyfits.Column(name='masses', array=masses, format='D', unit='m_sun'))
ages_hdu = pyfits.new_table(pyfits.ColDefs(cols))
if has_masses: ages_hdu.header.update('has_mass', True)
# make the fits file in memory
hdulist = pyfits.HDUList([primary_hdu,vs_hdu,ages_hdu])
# and write it out
hdulist.writeto(fileout, clobber=True) | 30.545455 | 112 | 0.657738 | [
"MIT"
] | dpgettings/ezgal | ezgal/scripts/convert_basti.py | 4,032 | Python |
import path4gmns as pg
from time import time
def test_download_sample_data_sets():
pg.download_sample_data_sets()
def test_find_shortest_path():
load_demand = False
network = pg.read_network(load_demand)
print('\nshortest path (node id) from node 1 to node 2, '
+network.find_shortest_path(1, 2))
print('\nshortest path (link id) from node 1 to node 2, '
+network.find_shortest_path(1, 2, seq_type='link'))
# retrieve the shortest path under a specific mode (which must be defined
# in settings.yaml)
print('\nshortest path (node id) from node 1 to node 2, '
+network.find_shortest_path(1, 2, mode='w'))
print('\nshortest path (link id) from node 1 to node 2, '
+network.find_shortest_path(1, 2, mode='w', seq_type='link'))
def test_find_shortest_path_for_agents():
network = pg.read_network()
st = time()
# find agent paths under a specific mode defined in settings.yaml,
# say, w (i.e., walk)
# network.find_path_for_agents('w') or network.find_path_for_agents('walk')
network.find_path_for_agents()
print('\nprocessing time of finding shortest paths for all agents: '
f'{time()-st:.2f} s')
agent_id = 300
print('\norigin node id of agent is '
f'{network.get_agent_orig_node_id(agent_id)}')
print('destination node id of agent is '
f'{network.get_agent_dest_node_id(agent_id)}')
print('shortest path (node id) of agent, '
f'{network.get_agent_node_path(agent_id)}')
print('shortest path (link id) of agent, '
f'{network.get_agent_link_path(agent_id)}')
agent_id = 1000
print('\norigin node id of agent is '
f'{network.get_agent_orig_node_id(agent_id)}')
print('destination node id of agent is '
f'{network.get_agent_dest_node_id(agent_id)}')
print('shortest path (node id) of agent, '
f'{network.get_agent_node_path(agent_id)}')
print('shortest path (link id) of agent, '
f'{network.get_agent_link_path(agent_id)}')
# output unique agent paths to a csv file
# if you do not want to include geometry info in the output file,
# you can do pg.output_agent_paths(network, False)
pg.output_agent_paths(network)
def test_column_generation_py():
network = pg.read_network()
print('\nstart column generation\n')
st = time()
iter_num = 20
column_update_num = 20
# pg.perform_network_assignment(assignment_mode=1, assignment_num,
# column_update_num, network)
# has been deprecated starting from v0.7.2, and will be removed later.
pg.perform_column_generation(iter_num, column_update_num, network)
print(f'processing time of column generation: {time()-st:.2f} s'
f' for {iter_num} assignment iterations and '
f'{column_update_num} iterations in column generation')
# if you do not want to include geometry info in the output file,
# use pg.output_columns(network, False)
pg.output_columns(network)
pg.output_link_performance(network)
def test_column_generation_dtalite():
""" validation using DTALite """
print('start column generation using DTALite')
st = time()
mode = 1
iter_num = 20
column_update_num = 20
pg.perform_network_assignment_DTALite(mode, iter_num, column_update_num)
print(f'processing time of column generation: {time()-st:.2f} s'
f' for {iter_num} assignment iterations and '
f'{column_update_num} iterations in column generation')
print('\npath finding results can be found in agent.csv')
def test_loading_columns():
network = pg.read_network()
print('\nstart loading columns\n')
st = time()
pg.load_columns(network)
print(f'processing time of loading columns: {time()-st:.2f} s')
print('\nstart column generation\n')
st = time()
iter_num = 0
column_update_num = 10
# pg.perform_network_assignment(assignment_mode=1, assignment_num,
# column_update_num, network)
# has been deprecated starting from v0.7.2, and will be removed in later.
pg.perform_column_generation(iter_num, column_update_num, network)
print(f'processing time of column generation: {time()-st:.2f} s'
f' for {iter_num} assignment iterations and '
f'{column_update_num} iterations in column generation')
pg.output_columns(network)
pg.output_link_performance(network)
def test_accessibility():
load_demand = False
network = pg.read_network(load_demand)
print('\nstart accessibility evaluation\n')
st = time()
# multimodal accessibility evaluation
pg.evaluate_accessibility(network)
# accessibility evalutation for a target mode
# pg.evaluate_accessibility(network, multimodal=False, mode='p')
print('complete accessibility evaluation.\n')
print(f'processing time of accessibility evaluation: {time()-st:.2f} s')
# get accessible nodes and links starting from node 1 with a 5-minitue
# time window for the default mode auto (i.e., 'p')
network.get_accessible_nodes(1, 5)
network.get_accessible_links(1, 5)
# get accessible nodes and links starting from node 1 with a 15-minitue
# time window for mode walk (i.e., 'w')
network.get_accessible_nodes(1, 15, 'w')
network.get_accessible_links(1, 15, 'w')
def demo_mode(mode):
print(f'the selected mode is {mode}\n')
if mode == 0:
# option 0: download the sample data set from GitHub
test_download_sample_data_sets()
elif mode == 1:
# option 1: find shortest path between O and D on Chicago network
test_find_shortest_path()
elif mode == 2:
# option 2: find shortest paths for all agents on Chicago network
test_find_shortest_path_for_agents()
elif mode == 3:
# option 3: perform column generation using Python engine
# on Chicago network
test_column_generation_py()
elif mode == 4:
# option 4: perform column generation using DTALite on Chicago network
test_column_generation_dtalite()
elif mode == 5:
# option 5: load columns generated from option 3 or 4
# on Chicago network
test_loading_columns()
else:
# option 6: evaluate multimodal accessibility on Chicago network
test_accessibility()
if __name__=="__main__":
demo_mode(6) | 34.529412 | 79 | 0.678179 | [
"Apache-2.0"
] | FangTang999/Path4GMNS | tests/demo.py | 6,457 | Python |
"""Define family of algorithms and make them interchangeable
The algorithms vary independetly from the clients using it.
This class implements to IngestorInterface and dynamically invoke
a suitable algorithm (strategy.algorithm()), through parse()
abstract method. i.e. it is independent of how an algorithm
is implemented.
That means, the behavior can be changed without breaking the classes
that use it, and the classes can switch between behaviors by changing
the specific implementation used without requiring any
significant code changes.
"""
from typing import List
from .IngestorInterface import IngestorInterface
from .QuoteModel import QuoteModel
from .CSVImporter import CSVImporter
from .PDFImporter import PDFImporter
from .DocxImporter import DocxImporter
from .TXTImporter import TXTImporter
class Ingestor(IngestorInterface):
"""Define family of algorithms & dynamically invoke the one of interest"""
importer_classes = [CSVImporter, PDFImporter, DocxImporter, TXTImporter]
@classmethod
def parse(cls, path: str) -> List[QuoteModel]:
for importer in cls.importer_classes:
if importer.can_ingest(path):
return importer.parse(path)
| 34.472222 | 79 | 0.759871 | [
"MIT"
] | 1ayham1/Data_Science-DynamicMemes_-generator | QuoteEngine/Ingestor.py | 1,241 | Python |
from infoblox_netmri.utils.utils import locate, to_snake
from infoblox_netmri.api.exceptions.netmri_exceptions import NotImplementedException
class Broker(object):
""" Base class for broker instances, provides methods for API requests.
And return responces wrapped with specific class
:param client: InfobloxNetMRI client
"""
controller = None
def __init__(self, client):
self.client = client
def api_request(self, method_name, params):
""" Make api request and return single wrapped object
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
"""
data = self.client.api_request(method_name, params)
if isinstance(data, dict) and len(data) > 1:
for x in data.keys():
data[x] = self._get_return_object_type(data.get(x))
return data
class_name = to_snake(self.__class__.__name__.replace("Broker", ""))
if class_name in data:
result_name = class_name
else:
result_name = method_name.split('/')[-1]
if result_name not in data:
return data
return self._get_return_object_type(data.get(result_name))
# See NETMRI-31545
def api_mixed_request(self, method_name, params):
""" Make api request and download a file and return
JSON response or request status dictionary
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
"""
data = self.client.api_request(method_name, params, downloadable=True)
class_name = to_snake(self.__class__.__name__.replace("Broker", ""))
if class_name in data:
result_name = class_name
else:
result_name = method_name.split('/')[-1]
if result_name not in data:
return data
return self._get_return_object_type(data.get(result_name))
def api_list_request(self, method_name, params):
""" Make api request and return list of wrapped objects
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call
"""
data = self.client.api_request(method_name, params)
if not data:
return None
try:
return [self._get_return_object_type(x) for x in data[self.controller]]
except KeyError:
print("Sorry, this method will be implemented in the\
future versions of NetMRI")
raise NotImplementedException(self.controller, method_name)
def _get_method_fullname(self, method):
""" Returns full API method name using controller name
**Input**
:param method: method name
:return: full API path
"""
return "{}/{}".format(self.controller, method)
def _get_return_object_type(self, data):
""" Returns wrapped response which inherits from RemoteModel class
:param data: API responce data
:return: RemoteModel child class
"""
if not data or type(data) != dict:
return data
class_name = data.get("_class")
obj_class = locate(self._get_remote_class_name(class_name))
return obj_class(data, self.client)
def _get_remote_class_name(self, name):
""" Generate full path to specific RemoteModel instance
:param name: name of model
:return: full path for model
"""
return "infoblox_netmri.api.remote.models.{pckg}_remote.{name}Remote".format(
pckg=to_snake(name),
name=name
) | 34.172727 | 85 | 0.623304 | [
"Apache-2.0"
] | IngmarVG-IB/infoblox-netmri | infoblox_netmri/api/broker/broker.py | 3,759 | Python |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six.moves.urllib.parse import unquote
from swift import gettext_ as _
from swift.account.utils import account_listing_response
from swift.common.middleware.acl import parse_acl, format_acl
from swift.common.utils import public
from swift.common.constraints import check_metadata
from swift.common.http import HTTP_NOT_FOUND, HTTP_GONE
from swift.proxy.controllers.base import Controller, clear_info_cache, \
set_info_cache
from swift.common.middleware import listing_formats
from swift.common.swob import HTTPBadRequest, HTTPMethodNotAllowed
from swift.common.request_helpers import get_sys_meta_prefix
class AccountController(Controller):
"""WSGI controller for account requests"""
server_type = 'Account'
def __init__(self, app, account_name, **kwargs):
super(AccountController, self).__init__(app)
self.account_name = unquote(account_name)
if not self.app.allow_account_management:
self.allowed_methods.remove('PUT')
self.allowed_methods.remove('DELETE')
def add_acls_from_sys_metadata(self, resp):
if resp.environ['REQUEST_METHOD'] in ('HEAD', 'GET', 'PUT', 'POST'):
prefix = get_sys_meta_prefix('account') + 'core-'
name = 'access-control'
(extname, intname) = ('x-account-' + name, prefix + name)
acl_dict = parse_acl(version=2, data=resp.headers.pop(intname))
if acl_dict: # treat empty dict as empty header
resp.headers[extname] = format_acl(
version=2, acl_dict=acl_dict)
def GETorHEAD(self, req):
"""Handler for HTTP GET/HEAD requests."""
length_limit = self.get_name_length_limit()
if len(self.account_name) > length_limit:
resp = HTTPBadRequest(request=req)
resp.body = b'Account name length of %d longer than %d' % \
(len(self.account_name), length_limit)
# Don't cache this. We know the account doesn't exist because
# the name is bad; we don't need to cache that because it's
# really cheap to recompute.
return resp
partition = self.app.account_ring.get_part(self.account_name)
concurrency = self.app.account_ring.replica_count \
if self.app.get_policy_options(None).concurrent_gets else 1
node_iter = self.app.iter_nodes(self.app.account_ring, partition)
params = req.params
params['format'] = 'json'
req.params = params
resp = self.GETorHEAD_base(
req, _('Account'), node_iter, partition,
req.swift_entity_path.rstrip('/'), concurrency)
if resp.status_int == HTTP_NOT_FOUND:
if resp.headers.get('X-Account-Status', '').lower() == 'deleted':
resp.status = HTTP_GONE
elif self.app.account_autocreate:
# This is kind of a lie; we pretend like the account is
# there, but it's not. We'll create it as soon as something
# tries to write to it, but we don't need databases on disk
# to tell us that nothing's there.
#
# We set a header so that certain consumers can tell it's a
# fake listing. The important one is the PUT of a container
# to an autocreate account; the proxy checks to see if the
# account exists before actually performing the PUT and
# creates the account if necessary. If we feed it a perfect
# lie, it'll just try to create the container without
# creating the account, and that'll fail.
resp = account_listing_response(
self.account_name, req,
listing_formats.get_listing_content_type(req))
resp.headers['X-Backend-Fake-Account-Listing'] = 'yes'
# Cache this. We just made a request to a storage node and got
# up-to-date information for the account.
resp.headers['X-Backend-Recheck-Account-Existence'] = str(
self.app.recheck_account_existence)
set_info_cache(self.app, req.environ, self.account_name, None, resp)
if req.environ.get('swift_owner'):
self.add_acls_from_sys_metadata(resp)
else:
for header in self.app.swift_owner_headers:
resp.headers.pop(header, None)
return resp
@public
def PUT(self, req):
"""HTTP PUT request handler."""
if not self.app.allow_account_management:
return HTTPMethodNotAllowed(
request=req,
headers={'Allow': ', '.join(self.allowed_methods)})
error_response = check_metadata(req, 'account')
if error_response:
return error_response
length_limit = self.get_name_length_limit()
if len(self.account_name) > length_limit:
resp = HTTPBadRequest(request=req)
resp.body = b'Account name length of %d longer than %d' % \
(len(self.account_name), length_limit)
return resp
account_partition, accounts = \
self.app.account_ring.get_nodes(self.account_name)
headers = self.generate_request_headers(req, transfer=True)
clear_info_cache(self.app, req.environ, self.account_name)
resp = self.make_requests(
req, self.app.account_ring, account_partition, 'PUT',
req.swift_entity_path, [headers] * len(accounts))
self.add_acls_from_sys_metadata(resp)
return resp
@public
def POST(self, req):
"""HTTP POST request handler."""
length_limit = self.get_name_length_limit()
if len(self.account_name) > length_limit:
resp = HTTPBadRequest(request=req)
resp.body = b'Account name length of %d longer than %d' % \
(len(self.account_name), length_limit)
return resp
error_response = check_metadata(req, 'account')
if error_response:
return error_response
account_partition, accounts = \
self.app.account_ring.get_nodes(self.account_name)
headers = self.generate_request_headers(req, transfer=True)
clear_info_cache(self.app, req.environ, self.account_name)
resp = self.make_requests(
req, self.app.account_ring, account_partition, 'POST',
req.swift_entity_path, [headers] * len(accounts))
if resp.status_int == HTTP_NOT_FOUND and self.app.account_autocreate:
self.autocreate_account(req, self.account_name)
resp = self.make_requests(
req, self.app.account_ring, account_partition, 'POST',
req.swift_entity_path, [headers] * len(accounts))
self.add_acls_from_sys_metadata(resp)
return resp
@public
def DELETE(self, req):
"""HTTP DELETE request handler."""
# Extra safety in case someone typos a query string for an
# account-level DELETE request that was really meant to be caught by
# some middleware.
if req.query_string:
return HTTPBadRequest(request=req)
if not self.app.allow_account_management:
return HTTPMethodNotAllowed(
request=req,
headers={'Allow': ', '.join(self.allowed_methods)})
account_partition, accounts = \
self.app.account_ring.get_nodes(self.account_name)
headers = self.generate_request_headers(req)
clear_info_cache(self.app, req.environ, self.account_name)
resp = self.make_requests(
req, self.app.account_ring, account_partition, 'DELETE',
req.swift_entity_path, [headers] * len(accounts))
return resp
| 46.175824 | 77 | 0.642789 | [
"Apache-2.0"
] | AymericDu/swift | swift/proxy/controllers/account.py | 8,404 | Python |
"""
ASGI config for backend project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')
application = get_asgi_application()
| 23.941176 | 79 | 0.7543 | [
"MIT"
] | CSXL/csxlabs.org | backend/backend/asgi.py | 407 | Python |
from thriftpy2.thrift import TType
class ThriftError(Exception):
""" Base Exception defined by `aiothrift` """
class ConnectionClosedError(ThriftError):
"""Raised if connection to server was closed."""
class PoolClosedError(ThriftError):
"""Raised when operating on a closed thrift connection pool"""
class ThriftAppError(ThriftError):
"""Application level thrift exceptions."""
thrift_spec = {
1: (TType.STRING, 'message', False),
2: (TType.I32, 'type', False),
}
UNKNOWN = 0
UNKNOWN_METHOD = 1
INVALID_MESSAGE_TYPE = 2
WRONG_METHOD_NAME = 3
BAD_SEQUENCE_ID = 4
MISSING_RESULT = 5
INTERNAL_ERROR = 6
PROTOCOL_ERROR = 7
def __init__(self, type=UNKNOWN, message=None):
super().__init__()
self.type = type
self.message = message
def __str__(self):
if self.message:
return self.message
if self.type == self.UNKNOWN_METHOD:
return 'Unknown method'
elif self.type == self.INVALID_MESSAGE_TYPE:
return 'Invalid message type'
elif self.type == self.WRONG_METHOD_NAME:
return 'Wrong method name'
elif self.type == self.BAD_SEQUENCE_ID:
return 'Bad sequence ID'
elif self.type == self.MISSING_RESULT:
return 'Missing result'
else:
return 'Default (unknown) TApplicationException'
| 26.444444 | 66 | 0.633053 | [
"MIT"
] | achimnol/aiothrift | aiothrift/errors.py | 1,428 | Python |
"""
From https://zenodo.org/record/3539363
"""
import re
def section_text(text):
"""Splits text into sections.
Assumes text is in a radiology report format, e.g.:
COMPARISON: Chest radiograph dated XYZ.
IMPRESSION: ABC...
Given text like this, it will output text from each section,
where the section type is determined by the all caps header.
Returns a three element tuple:
sections - list containing the text of each section
section_names - a normalized version of the section name
section_idx - list of start indices of the text in the section
"""
p_section = re.compile(
r'\n ([A-Z ()/,-]+):\s', re.DOTALL)
sections = list()
section_names = list()
section_idx = list()
idx = 0
s = p_section.search(text, idx)
if s:
sections.append(text[0:s.start(1)])
section_names.append('preamble')
section_idx.append(0)
while s:
current_section = s.group(1).lower()
# get the start of the text for this section
idx_start = s.end()
# skip past the first newline to avoid some bad parses
idx_skip = text[idx_start:].find('\n')
if idx_skip == -1:
idx_skip = 0
s = p_section.search(text, idx_start + idx_skip)
if s is None:
idx_end = len(text)
else:
idx_end = s.start()
sections.append(text[idx_start:idx_end])
section_names.append(current_section)
section_idx.append(idx_start)
else:
sections.append(text)
section_names.append('full report')
section_idx.append(0)
section_names = normalize_section_names(section_names)
# remove empty sections
# this handles when the report starts with a finding-like statement
# .. but this statement is not a section, more like a report title
# e.g. p10/p10103318/s57408307
# CHEST, PA LATERAL:
#
# INDICATION: This is the actual section ....
# it also helps when there are multiple findings sections
# usually one is empty
for i in reversed(range(len(section_names))):
if section_names[i] in ('impression', 'findings'):
if sections[i].strip() == '':
sections.pop(i)
section_names.pop(i)
section_idx.pop(i)
if ('impression' not in section_names) & ('findings' not in section_names):
# create a new section for the final paragraph
if '\n \n' in sections[-1]:
sections.append('\n \n'.join(sections[-1].split('\n \n')[1:]))
sections[-2] = sections[-2].split('\n \n')[0]
section_names.append('last_paragraph')
section_idx.append(section_idx[-1] + len(sections[-2]))
return sections, section_names, section_idx
def normalize_section_names(section_names):
# first, lower case all
section_names = [s.lower().strip() for s in section_names]
frequent_sections = {
"preamble": "preamble", # 227885
"impression": "impression", # 187759
"comparison": "comparison", # 154647
"indication": "indication", # 153730
"findings": "findings", # 149842
"examination": "examination", # 94094
"technique": "technique", # 81402
"history": "history", # 45624
"comparisons": "comparison", # 8686
"clinical history": "history", # 7121
"reason for examination": "indication", # 5845
"notification": "notification", # 5749
"reason for exam": "indication", # 4430
"clinical information": "history", # 4024
"exam": "examination", # 3907
"clinical indication": "indication", # 1945
"conclusion": "impression", # 1802
"chest, two views": "findings", # 1735
"recommendation(s)": "recommendations", # 1700
"type of examination": "examination", # 1678
"reference exam": "comparison", # 347
"patient history": "history", # 251
"addendum": "addendum", # 183
"comparison exam": "comparison", # 163
"date": "date", # 108
"comment": "comment", # 88
"findings and impression": "impression", # 87
"wet read": "wet read", # 83
"comparison film": "comparison", # 79
"recommendations": "recommendations", # 72
"findings/impression": "impression", # 47
"pfi": "history",
'recommendation': 'recommendations',
'wetread': 'wet read',
'ndication': 'impression', # 1
'impresson': 'impression', # 2
'imprression': 'impression', # 1
'imoression': 'impression', # 1
'impressoin': 'impression', # 1
'imprssion': 'impression', # 1
'impresion': 'impression', # 1
'imperssion': 'impression', # 1
'mpression': 'impression', # 1
'impession': 'impression', # 3
'findings/ impression': 'impression', # ,1
'finding': 'findings', # ,8
'findins': 'findings',
'findindgs': 'findings', # ,1
'findgings': 'findings', # ,1
'findngs': 'findings', # ,1
'findnings': 'findings', # ,1
'finidngs': 'findings', # ,2
'idication': 'indication', # ,1
'reference findings': 'findings', # ,1
'comparision': 'comparison', # ,2
'comparsion': 'comparison', # ,1
'comparrison': 'comparison', # ,1
'comparisions': 'comparison' # ,1
}
p_findings = [
'chest',
'portable',
'pa and lateral',
'lateral and pa',
'ap and lateral',
'lateral and ap',
'frontal and',
'two views',
'frontal view',
'pa view',
'ap view',
'one view',
'lateral view',
'bone window',
'frontal upright',
'frontal semi-upright',
'ribs',
'pa and lat'
]
p_findings = re.compile('({})'.format('|'.join(p_findings)))
main_sections = [
'impression', 'findings', 'history', 'comparison',
'addendum'
]
for i, s in enumerate(section_names):
if s in frequent_sections:
section_names[i] = frequent_sections[s]
continue
main_flag = False
for m in main_sections:
if m in s:
section_names[i] = m
main_flag = True
break
if main_flag:
continue
m = p_findings.search(s)
if m is not None:
section_names[i] = 'findings'
# if it looks like it is describing the entire study
# it's equivalent to findings
# group similar phrasings for impression
return section_names
def custom_mimic_cxr_rules():
custom_section_names = {
's50913680': 'recommendations', # files/p11/p11851243/s50913680.txt
's59363654': 'examination', # files/p12/p12128253/s59363654.txt
's59279892': 'technique', # files/p13/p13150370/s59279892.txt
's59768032': 'recommendations', # files/p13/p13249077/s59768032.txt
's57936451': 'indication', # files/p14/p14325424/s57936451.txt
's50058765': 'indication', # files/p14/p14731346/s50058765.txt
's53356173': 'examination', # files/p15/p15898350/s53356173.txt
's53202765': 'technique', # files/p16/p16076182/s53202765.txt
's50808053': 'technique', # files/p16/p16631485/s50808053.txt
's51966317': 'indication', # files/p10/p10817099/s51966317.txt
's50743547': 'examination', # files/p11/p11388341/s50743547.txt
's56451190': 'note', # files/p11/p11842879/s56451190.txt
's59067458': 'recommendations', # files/p11/p11984647/s59067458.txt
's59215320': 'examination', # files/p12/p12408912/s59215320.txt
's55124749': 'indication', # files/p12/p12428492/s55124749.txt
's54365831': 'indication', # files/p13/p13876470/s54365831.txt
's59087630': 'recommendations', # files/p14/p14267880/s59087630.txt
's58157373': 'recommendations', # files/p15/p15032392/s58157373.txt
's56482935': 'recommendations', # files/p15/p15388421/s56482935.txt
's58375018': 'recommendations', # files/p15/p15505556/s58375018.txt
's54654948': 'indication', # files/p17/p17090359/s54654948.txt
's55157853': 'examination', # files/p18/p18975498/s55157853.txt
's51491012': 'history', # files/p19/p19314266/s51491012.txt
}
custom_indices = {
's50525523': [201, 349], # files/p10/p10602608/s50525523.txt
's57564132': [233, 554], # files/p10/p10637168/s57564132.txt
's59982525': [313, 717], # files/p11/p11989982/s59982525.txt
's53488209': [149, 475], # files/p12/p12458657/s53488209.txt
's54875119': [234, 988], # files/p13/p13687044/s54875119.txt
's50196495': [59, 399], # files/p13/p13894879/s50196495.txt
's56579911': [59, 218], # files/p15/p15394326/s56579911.txt
's52648681': [292, 631], # files/p15/p15666238/s52648681.txt
's59889364': [172, 453], # files/p15/p15835529/s59889364.txt
's53514462': [73, 377], # files/p16/p16297706/s53514462.txt
's59505494': [59, 450], # files/p16/p16730991/s59505494.txt
's53182247': [59, 412], # files/p16/p16770442/s53182247.txt
's51410602': [47, 320], # files/p17/p17069955/s51410602.txt
's56412866': [522, 822], # files/p17/p17612000/s56412866.txt
's54986978': [59, 306], # files/p17/p17912487/s54986978.txt
's59003148': [262, 505], # files/p17/p17916384/s59003148.txt
's57150433': [61, 394], # files/p18/p18335791/s57150433.txt
's56760320': [219, 457], # files/p18/p18418794/s56760320.txt
's59562049': [158, 348], # files/p18/p18502016/s59562049.txt
's52674888': [145, 296], # files/p19/p19381919/s52674888.txt
's55258338': [192, 568], # files/p13/p13719117/s55258338.txt
's59330497': [140, 655], # files/p15/p15479218/s59330497.txt
's52119491': [179, 454], # files/p17/p17959278/s52119491.txt
# below have no findings at all in the entire report
's58235663': [0, 0], # files/p11/p11573679/s58235663.txt
's50798377': [0, 0], # files/p12/p12632853/s50798377.txt
's54168089': [0, 0], # files/p14/p14463099/s54168089.txt
's53071062': [0, 0], # files/p15/p15774521/s53071062.txt
's56724958': [0, 0], # files/p16/p16175671/s56724958.txt
's54231141': [0, 0], # files/p16/p16312859/s54231141.txt
's53607029': [0, 0], # files/p17/p17603668/s53607029.txt
's52035334': [0, 0], # files/p19/p19349312/s52035334.txt
}
return custom_section_names, custom_indices
| 39.59854 | 79 | 0.589677 | [
"MIT"
] | philip-mueller/lovt | src/data/datasets/mimic_cxr/section_parser.py | 10,850 | Python |
# SVG Path specification parser
import re
from . import path
COMMANDS = set('MmZzLlHhVvCcSsQqTtAa')
UPPERCASE = set('MZLHVCSQTA')
COMMAND_RE = re.compile("([MmZzLlHhVvCcSsQqTtAa])")
FLOAT_RE = re.compile("[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?")
def _tokenize_path(pathdef):
for x in COMMAND_RE.split(pathdef):
if x in COMMANDS:
yield x
for token in FLOAT_RE.findall(x):
yield token
def parse_path(pathdef, current_pos=0j):
# In the SVG specs, initial movetos are absolute, even if
# specified as 'm'. This is the default behavior here as well.
# But if you pass in a current_pos variable, the initial moveto
# will be relative to that current_pos. This is useful.
elements = list(_tokenize_path(pathdef))
# Reverse for easy use of .pop()
elements.reverse()
segments = path.Path()
start_pos = None
command = None
while elements:
if elements[-1] in COMMANDS:
# New command.
last_command = command # Used by S and T
command = elements.pop()
absolute = command in UPPERCASE
command = command.upper()
else:
# If this element starts with numbers, it is an implicit command
# and we don't change the command. Check that it's allowed:
if command is None:
raise ValueError("Unallowed implicit command in %s, position %s" % (
pathdef, len(pathdef.split()) - len(elements)))
if command == 'M':
# Moveto command.
x = elements.pop()
y = elements.pop()
pos = float(x) + float(y) * 1j
if absolute:
current_pos = pos
else:
current_pos += pos
# when M is called, reset start_pos
# This behavior of Z is defined in svg spec:
# http://www.w3.org/TR/SVG/paths.html#PathDataClosePathCommand
start_pos = current_pos
# Implicit moveto commands are treated as lineto commands.
# So we set command to lineto here, in case there are
# further implicit commands after this moveto.
command = 'L'
elif command == 'Z':
# Close path
segments.append(path.Line(current_pos, start_pos))
segments.closed = True
current_pos = start_pos
start_pos = None
command = None # You can't have implicit commands after closing.
elif command == 'L':
x = elements.pop()
y = elements.pop()
pos = float(x) + float(y) * 1j
if not absolute:
pos += current_pos
segments.append(path.Line(current_pos, pos))
current_pos = pos
elif command == 'H':
x = elements.pop()
pos = float(x) + current_pos.imag * 1j
if not absolute:
pos += current_pos.real
segments.append(path.Line(current_pos, pos))
current_pos = pos
elif command == 'V':
y = elements.pop()
pos = current_pos.real + float(y) * 1j
if not absolute:
pos += current_pos.imag * 1j
segments.append(path.Line(current_pos, pos))
current_pos = pos
elif command == 'C':
try:
control1 = float(elements.pop()) + float(elements.pop()) * 1j
control2 = float(elements.pop()) + float(elements.pop()) * 1j
end = float(elements.pop()) + float(elements.pop()) * 1j
except ValueError:
print elements
if not absolute:
control1 += current_pos
control2 += current_pos
end += current_pos
segments.append(path.CubicBezier(current_pos, control1, control2, end))
current_pos = end
elif command == 'S':
# Smooth curve. First control point is the "reflection" of
# the second control point in the previous path.
if last_command not in 'CS':
# If there is no previous command or if the previous command
# was not an C, c, S or s, assume the first control point is
# coincident with the current point.
control1 = current_pos
else:
# The first control point is assumed to be the reflection of
# the second control point on the previous command relative
# to the current point.
control1 = current_pos + current_pos - segments[-1].control2
control2 = float(elements.pop()) + float(elements.pop()) * 1j
end = float(elements.pop()) + float(elements.pop()) * 1j
if not absolute:
control2 += current_pos
end += current_pos
segments.append(path.CubicBezier(current_pos, control1, control2, end))
current_pos = end
elif command == 'Q':
control = float(elements.pop()) + float(elements.pop()) * 1j
end = float(elements.pop()) + float(elements.pop()) * 1j
if not absolute:
control += current_pos
end += current_pos
segments.append(path.QuadraticBezier(current_pos, control, end))
current_pos = end
elif command == 'T':
# Smooth curve. Control point is the "reflection" of
# the second control point in the previous path.
if last_command not in 'QT':
# If there is no previous command or if the previous command
# was not an Q, q, T or t, assume the first control point is
# coincident with the current point.
control = current_pos
else:
# The control point is assumed to be the reflection of
# the control point on the previous command relative
# to the current point.
control = current_pos + current_pos - segments[-1].control
end = float(elements.pop()) + float(elements.pop()) * 1j
if not absolute:
end += current_pos
segments.append(path.QuadraticBezier(current_pos, control, end))
current_pos = end
elif command == 'A':
radius = float(elements.pop()) + float(elements.pop()) * 1j
rotation = float(elements.pop())
arc = float(elements.pop())
sweep = float(elements.pop())
end = float(elements.pop()) + float(elements.pop()) * 1j
if not absolute:
end += current_pos
segments.append(path.Arc(current_pos, radius, rotation, arc, sweep, end))
current_pos = end
return segments
| 36.188482 | 85 | 0.54702 | [
"MIT"
] | judithfan/sketch-rnn | svg/path/parser.py | 6,912 | Python |
from circularImportA import a
def f():
print(a)
b = 2
| 8.571429 | 29 | 0.633333 | [
"MIT"
] | Daniel-Chin/mini-Python | experiments/circularImportB.py | 60 | Python |
from ._visible import VisibleValidator
from ._valuesuffix import ValuesuffixValidator
from ._valueformat import ValueformatValidator
from ._uid import UidValidator
from ._textfont import TextfontValidator
from ._stream import StreamValidator
from ._showlegend import ShowlegendValidator
from ._selectedpoints import SelectedpointsValidator
from ._orientation import OrientationValidator
from ._opacity import OpacityValidator
from ._node import NodeValidator
from ._name import NameValidator
from ._link import LinkValidator
from ._legendgroup import LegendgroupValidator
from ._idssrc import IdssrcValidator
from ._ids import IdsValidator
from ._hoverlabel import HoverlabelValidator
from ._hoverinfosrc import HoverinfosrcValidator
from ._hoverinfo import HoverinfoValidator
from ._domain import DomainValidator
from ._customdatasrc import CustomdatasrcValidator
from ._customdata import CustomdataValidator
from ._arrangement import ArrangementValidator
| 39.875 | 52 | 0.879833 | [
"MIT"
] | Dexter2772/bitcoin-tracker | myvenv/lib/python3.7/site-packages/plotly/validators/sankey/__init__.py | 957 | Python |
# -*- coding: utf-8 -*-
from tests.integration import TestsBase
from chsdi.models.bod import Catalog
from sqlalchemy.orm import scoped_session, sessionmaker
from chsdi.views.catalog import create_digraph
from chsdi.lib.filters import filter_by_geodata_staging
class TestCatalogService(TestsBase):
def test_nodes_connection(self):
try:
geodata_staging = self.testapp.app.registry.settings['geodata_staging']
session = scoped_session(sessionmaker())
topics = self.testapp.get('/rest/services', status=200)
for t in topics.json['topics']:
topic = t.get('id')
query = session.query(Catalog).filter(Catalog.topic == topic)\
.order_by(Catalog.orderKey)
query = filter_by_geodata_staging(query, Catalog.staging, geodata_staging)
rows = query.all()
if (rows):
graph, meta, root_id = create_digraph(rows, 'fr')
nodes = graph.nodes()
if len(nodes) != len(rows):
for row in rows:
if row.id not in nodes:
raise Exception('%s %s %s is unconnected leaf' % (topic, row.category, row.layerBodId))
finally:
if session:
session.close()
def test_catalog_no_params(self):
resp = self.testapp.get('/rest/services/blw/CatalogServer', status=200)
self.assertTrue(resp.content_type == 'application/json')
self.assertTrue('root' in resp.json['results'])
self.assertTrue('children' in resp.json['results']['root'])
self.assertTrue('selectedOpen' in resp.json['results']['root']['children'][0])
self.assertTrue('category' in resp.json['results']['root'])
def test_catalog_with_callback(self):
resp = self.testapp.get('/rest/services/blw/CatalogServer', params={'callback': 'cb_'}, status=200)
self.assertEqual(resp.content_type, 'application/javascript')
def test_catalog_existing_map_no_catalog(self):
self.testapp.get('/rest/services/all/CatalogServer', status=404)
def test_catalog_wrong_map(self):
self.testapp.get('/rest/services/foo/CatalogServer', status=400)
def test_catalog_ordering(self):
resp = self.testapp.get('/rest/services/inspire/CatalogServer', params={'lang': 'en'}, status=200)
self.assertEqual(resp.content_type, 'application/json')
self.assertTrue('AGNES' in resp.json['results']['root']['children'][0]['children'][0]['children'][0]['label'])
self.assertTrue('Geoid in CH1903' in resp.json['results']['root']['children'][0]['children'][0]['children'][1]['label'])
def test_catalog_languages(self):
for lang in ('de', 'fr', 'it', 'rm', 'en'):
link = '/rest/services/ech/CatalogServer?lang=' + lang
resp = self.testapp.get(link)
self.assertEqual(resp.status_int, 200, link)
def test_layersconfig_with_callback(self):
resp = self.testapp.get('/rest/services/blw/MapServer/layersConfig', params={'callback': 'cb_'}, status=200)
self.assertEqual(resp.content_type, 'application/javascript')
def test_all_catalogs(self):
def existInList(node, l):
found = False
for entry in l:
if entry.id == node.get('id'):
found = True
break
if not found:
print node.get('id')
return False
if 'children' in node:
for child in node.get('children'):
if not existInList(child, l):
return False
return True
from chsdi.models.bod import Catalog
from sqlalchemy.orm import scoped_session, sessionmaker
DBSession = scoped_session(sessionmaker())
old_staging = self.testapp.app.registry.settings['geodata_staging']
# We fix staging for next calls to prod
self.testapp.app.registry.settings['geodata_staging'] = u'prod'
try:
topics = self.testapp.get('/rest/services', status=200)
for t in topics.json['topics']:
topic = t.get('id')
# Get catalog
catalog = self.testapp.get('/rest/services/' + topic + '/CatalogServer', status=200)
# Get flat catalog table entries
query = DBSession.query(Catalog).filter(Catalog.topic == topic).filter(Catalog.staging == u'prod')
entries = query.all()
# Check if every node in the catalog is in view_catalog of db
self.assertTrue(existInList(catalog.json['results']['root'], entries))
finally:
# reset staging to previous setting
self.testapp.app.registry.settings['geodata_staging'] = old_staging
DBSession.close()
def test_catalogs_with_layersconfig(self):
def existInList(node, l):
if node.get('category') != 'layer':
return True
found = False
for entry in l:
if entry == node.get('layerBodId'):
found = True
break
if not found:
print node.get('layerBodId')
return False
if 'children' in node:
for child in node.get('children'):
if not existInList(child, l):
return False
return True
from sqlalchemy.orm import scoped_session, sessionmaker
DBSession = scoped_session(sessionmaker())
old_staging = self.testapp.app.registry.settings['geodata_staging']
# We fix staging for next calls to prod
self.testapp.app.registry.settings['geodata_staging'] = u'prod'
try:
topics = self.testapp.get('/rest/services', status=200)
for t in topics.json['topics']:
topic = t.get('id')
# Get catalog
catalog = self.testapp.get('/rest/services/' + topic + '/CatalogServer', status=200)
# Get LayersConfig for this topic
layersconf = self.testapp.get('/rest/services/' + topic + '/MapServer/layersConfig', status=200)
# Check if all layers of catalog are in LayersConfig
self.assertTrue(existInList(catalog.json['results']['root'], layersconf.json), 'For Topic: ' + topic)
finally:
# reset staging to previous setting
self.testapp.app.registry.settings['geodata_staging'] = old_staging
DBSession.close()
| 44.072368 | 128 | 0.587252 | [
"BSD-3-Clause"
] | fredj/mf-chsdi3 | tests/integration/test_catalog.py | 6,699 | Python |
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import typing as t
from elastic_transport import ObjectApiResponse
from ._base import NamespacedClient
from .utils import _rewrite_parameters
class TextStructureClient(NamespacedClient):
@_rewrite_parameters(
body_name="text_files",
)
def find_structure(
self,
*,
text_files: t.Union[t.List[t.Any], t.Tuple[t.Any, ...]],
charset: t.Optional[str] = None,
column_names: t.Optional[str] = None,
delimiter: t.Optional[str] = None,
explain: t.Optional[bool] = None,
format: t.Optional[str] = None,
grok_pattern: t.Optional[str] = None,
has_header_row: t.Optional[bool] = None,
line_merge_size_limit: t.Optional[int] = None,
lines_to_sample: t.Optional[int] = None,
quote: t.Optional[str] = None,
should_trim_fields: t.Optional[bool] = None,
timeout: t.Optional[t.Union[int, str]] = None,
timestamp_field: t.Optional[str] = None,
timestamp_format: t.Optional[str] = None,
) -> ObjectApiResponse[t.Any]:
"""
Finds the structure of a text file. The text file must contain data that is suitable
to be ingested into Elasticsearch.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/find-structure.html>`_
:param text_files:
:param charset: The text’s character set. It must be a character set that is
supported by the JVM that Elasticsearch uses. For example, UTF-8, UTF-16LE,
windows-1252, or EUC-JP. If this parameter is not specified, the structure
finder chooses an appropriate character set.
:param column_names: If you have set format to delimited, you can specify the
column names in a comma-separated list. If this parameter is not specified,
the structure finder uses the column names from the header row of the text.
If the text does not have a header role, columns are named "column1", "column2",
"column3", etc.
:param delimiter: If you have set format to delimited, you can specify the character
used to delimit the values in each row. Only a single character is supported;
the delimiter cannot have multiple characters. By default, the API considers
the following possibilities: comma, tab, semi-colon, and pipe (|). In this
default scenario, all rows must have the same number of fields for the delimited
format to be detected. If you specify a delimiter, up to 10% of the rows
can have a different number of columns than the first row.
:param explain: If this parameter is set to true, the response includes a field
named explanation, which is an array of strings that indicate how the structure
finder produced its result.
:param format: The high level structure of the text. Valid values are ndjson,
xml, delimited, and semi_structured_text. By default, the API chooses the
format. In this default scenario, all rows must have the same number of fields
for a delimited format to be detected. If the format is set to delimited
and the delimiter is not set, however, the API tolerates up to 5% of rows
that have a different number of columns than the first row.
:param grok_pattern: If you have set format to semi_structured_text, you can
specify a Grok pattern that is used to extract fields from every message
in the text. The name of the timestamp field in the Grok pattern must match
what is specified in the timestamp_field parameter. If that parameter is
not specified, the name of the timestamp field in the Grok pattern must match
"timestamp". If grok_pattern is not specified, the structure finder creates
a Grok pattern.
:param has_header_row: If you have set format to delimited, you can use this
parameter to indicate whether the column names are in the first row of the
text. If this parameter is not specified, the structure finder guesses based
on the similarity of the first row of the text to other rows.
:param line_merge_size_limit: The maximum number of characters in a message when
lines are merged to form messages while analyzing semi-structured text. If
you have extremely long messages you may need to increase this, but be aware
that this may lead to very long processing times if the way to group lines
into messages is misdetected.
:param lines_to_sample: The number of lines to include in the structural analysis,
starting from the beginning of the text. The minimum is 2; If the value of
this parameter is greater than the number of lines in the text, the analysis
proceeds (as long as there are at least two lines in the text) for all of
the lines.
:param quote: If you have set format to delimited, you can specify the character
used to quote the values in each row if they contain newlines or the delimiter
character. Only a single character is supported. If this parameter is not
specified, the default value is a double quote ("). If your delimited text
format does not use quoting, a workaround is to set this argument to a character
that does not appear anywhere in the sample.
:param should_trim_fields: If you have set format to delimited, you can specify
whether values between delimiters should have whitespace trimmed from them.
If this parameter is not specified and the delimiter is pipe (|), the default
value is true. Otherwise, the default value is false.
:param timeout: Sets the maximum amount of time that the structure analysis make
take. If the analysis is still running when the timeout expires then it will
be aborted.
:param timestamp_field: Optional parameter to specify the timestamp field in
the file
:param timestamp_format: The Java time format of the timestamp field in the text.
"""
if text_files is None:
raise ValueError("Empty value passed for parameter 'text_files'")
__path = "/_text_structure/find_structure"
__query: t.Dict[str, t.Any] = {}
if charset is not None:
__query["charset"] = charset
if column_names is not None:
__query["column_names"] = column_names
if delimiter is not None:
__query["delimiter"] = delimiter
if explain is not None:
__query["explain"] = explain
if format is not None:
__query["format"] = format
if grok_pattern is not None:
__query["grok_pattern"] = grok_pattern
if has_header_row is not None:
__query["has_header_row"] = has_header_row
if line_merge_size_limit is not None:
__query["line_merge_size_limit"] = line_merge_size_limit
if lines_to_sample is not None:
__query["lines_to_sample"] = lines_to_sample
if quote is not None:
__query["quote"] = quote
if should_trim_fields is not None:
__query["should_trim_fields"] = should_trim_fields
if timeout is not None:
__query["timeout"] = timeout
if timestamp_field is not None:
__query["timestamp_field"] = timestamp_field
if timestamp_format is not None:
__query["timestamp_format"] = timestamp_format
__body = text_files
__headers = {
"accept": "application/json",
"content-type": "application/x-ndjson",
}
return self.perform_request( # type: ignore[return-value]
"POST", __path, params=__query, headers=__headers, body=__body
)
| 55.238994 | 96 | 0.669703 | [
"Apache-2.0"
] | neubloc/elasticsearch-py | elasticsearch/_sync/client/text_structure.py | 8,785 | Python |
"""
Tests for the utils module
"""
import datetime
import operator as op
from math import ceil
from types import SimpleNamespace
import pytest
import pytz
from mitol.common.utils import (
is_near_now,
has_equal_properties,
first_or_none,
first_matching_item,
max_or_none,
partition_to_lists,
unique,
unique_ignore_case,
item_at_index_or_none,
all_equal,
all_unique,
has_all_keys,
group_into_dict,
now_in_utc,
filter_dict_by_key_set,
chunks,
get_error_response_summary,
)
from ecommerce.factories import Order, ReceiptFactory
from main.utils import (
get_field_names,
is_empty_file,
serialize_model_object,
is_blank,
partition_around_index,
format_month_day,
)
from main.test_utils import format_as_iso8601, MockResponse
def test_now_in_utc():
"""now_in_utc() should return the current time set to the UTC time zone"""
now = now_in_utc()
assert is_near_now(now)
assert now.tzinfo == pytz.UTC
def test_is_near_now():
"""
Test is_near_now for now
"""
now = datetime.datetime.now(tz=pytz.UTC)
assert is_near_now(now) is True
later = now + datetime.timedelta(0, 6)
assert is_near_now(later) is False
earlier = now - datetime.timedelta(0, 6)
assert is_near_now(earlier) is False
def test_first_or_none():
"""
Assert that first_or_none returns the first item in an iterable or None
"""
assert first_or_none([]) is None
assert first_or_none(set()) is None
assert first_or_none([1, 2, 3]) == 1
assert first_or_none(range(1, 5)) == 1
def test_first_matching_item():
"""first_matching_item should return the first item where the predicate function returns true"""
assert first_matching_item([1, 2, 3, 4, 5], lambda x: x % 2 == 0) == 2
assert first_matching_item([], lambda x: True) is None
assert first_matching_item(["x", "y", "z"], lambda x: False) is None
def test_max_or_none():
"""
Assert that max_or_none returns the max of some iterable, or None if the iterable has no items
"""
assert max_or_none(i for i in [5, 4, 3, 2, 1]) == 5
assert max_or_none([1, 3, 5, 4, 2]) == 5
assert max_or_none([]) is None
def test_unique():
"""
Assert that unique() returns a generator of unique elements from a provided iterable
"""
assert list(unique([1, 2, 2, 3, 3, 0, 3])) == [1, 2, 3, 0]
assert list(unique(("a", "b", "a", "c", "C", None))) == ["a", "b", "c", "C", None]
def test_unique_ignore_case():
"""
Assert that unique_ignore_case() returns a generator of unique lowercase strings from a
provided iterable
"""
assert list(unique_ignore_case(["ABC", "def", "AbC", "DEf"])) == ["abc", "def"]
def test_item_at_index_or_none():
"""
Assert that item_at_index_or_none returns an item at a given index, or None if that index
doesn't exist
"""
arr = [1, 2, 3]
assert item_at_index_or_none(arr, 1) == 2
assert item_at_index_or_none(arr, 10) is None
def test_all_equal():
"""
Assert that all_equal returns True if all of the provided args are equal to each other
"""
assert all_equal(1, 1, 1) is True
assert all_equal(1, 2, 1) is False
assert all_equal() is True
def test_all_unique():
"""
Assert that all_unique returns True if all of the items in the iterable argument are unique
"""
assert all_unique([1, 2, 3, 4]) is True
assert all_unique((1, 2, 3, 4)) is True
assert all_unique([1, 2, 3, 1]) is False
def test_has_all_keys():
"""
Assert that has_all_keys returns True if the given dict has all of the specified keys
"""
d = {"a": 1, "b": 2, "c": 3}
assert has_all_keys(d, ["a", "c"]) is True
assert has_all_keys(d, ["a", "z"]) is False
def test_is_blank():
"""
Assert that is_blank returns True if the given value is None or a blank string
"""
assert is_blank("") is True
assert is_blank(None) is True
assert is_blank(0) is False
assert is_blank(" ") is False
assert is_blank(False) is False
assert is_blank("value") is False
def test_group_into_dict():
"""
Assert that group_into_dict takes an iterable of items and returns a dictionary of those items
grouped by generated keys
"""
class Car: # pylint: disable=missing-docstring
def __init__(self, make, model):
self.make = make
self.model = model
cars = [
Car(make="Honda", model="Civic"),
Car(make="Honda", model="Accord"),
Car(make="Ford", model="F150"),
Car(make="Ford", model="Focus"),
Car(make="Jeep", model="Wrangler"),
]
grouped_cars = group_into_dict(cars, key_fn=op.attrgetter("make"))
assert set(grouped_cars.keys()) == {"Honda", "Ford", "Jeep"}
assert set(grouped_cars["Honda"]) == set(cars[0:2])
assert set(grouped_cars["Ford"]) == set(cars[2:4])
assert grouped_cars["Jeep"] == [cars[4]]
nums = [1, 2, 3, 4, 5, 6]
grouped_nums = group_into_dict(nums, key_fn=lambda num: (num % 2 == 0))
assert grouped_nums.keys() == {True, False}
assert set(grouped_nums[True]) == {2, 4, 6}
assert set(grouped_nums[False]) == {1, 3, 5}
def test_filter_dict_by_key_set():
"""
Test that filter_dict_by_key_set returns a dict with only the given keys
"""
d = {"a": 1, "b": 2, "c": 3, "d": 4}
assert filter_dict_by_key_set(d, {"a", "c"}) == {"a": 1, "c": 3}
assert filter_dict_by_key_set(d, {"a", "c", "nonsense"}) == {"a": 1, "c": 3}
assert filter_dict_by_key_set(d, {"nonsense"}) == {}
def test_partition_to_lists():
"""
Assert that partition_to_lists splits an iterable into two lists according to a condition
"""
nums = [1, 2, 1, 3, 1, 4, 0, None, None]
not_ones, ones = partition_to_lists(nums, lambda n: n == 1)
assert not_ones == [2, 3, 4, 0, None, None]
assert ones == [1, 1, 1]
# The default predicate is the standard Python bool() function
falsey, truthy = partition_to_lists(nums)
assert falsey == [0, None, None]
assert truthy == [1, 2, 1, 3, 1, 4]
def test_partition_around_index():
"""partition_around_index should split a list into two lists around an index"""
assert partition_around_index([1, 2, 3, 4], 2) == ([1, 2], [4])
assert partition_around_index([1, 2, 3, 4], 0) == ([], [2, 3, 4])
assert partition_around_index([1, 2, 3, 4], 3) == ([1, 2, 3], [])
with pytest.raises(ValueError):
partition_around_index([1, 2, 3, 4], 4)
@pytest.mark.parametrize(
"content,content_type,exp_summary_content,exp_url_in_summary",
[
['{"bad": "response"}', "application/json", '{"bad": "response"}', False],
["plain text", "text/plain", "plain text", False],
[
"<div>HTML content</div>",
"text/html; charset=utf-8",
"(HTML body ignored)",
True,
],
],
)
def test_get_error_response_summary(
content, content_type, exp_summary_content, exp_url_in_summary
):
"""
get_error_response_summary should provide a summary of an error HTTP response object with the correct bits of
information depending on the type of content.
"""
status_code = 400
url = "http://example.com"
mock_response = MockResponse(
status_code=status_code, content=content, content_type=content_type, url=url
)
summary = get_error_response_summary(mock_response)
assert f"Response - code: {status_code}" in summary
assert f"content: {exp_summary_content}" in summary
assert (f"url: {url}" in summary) is exp_url_in_summary
@pytest.mark.django_db
def test_jsonfield(settings):
"""
Test a model with a JSONField is handled correctly
"""
settings.CYBERSOURCE_SECURITY_KEY = "asdf"
receipt = ReceiptFactory.create()
assert serialize_model_object(receipt) == {
"created_on": format_as_iso8601(receipt.created_on),
"data": receipt.data,
"id": receipt.id,
"updated_on": format_as_iso8601(receipt.updated_on),
"order": receipt.order.id,
}
def test_get_field_names():
"""
Assert that get_field_names does not include related fields
"""
assert set(get_field_names(Order)) == {
"user",
"status",
"total_price_paid",
"application",
"created_on",
"updated_on",
"payment_type",
}
def test_is_empty_file():
"""is_empty_file should return True if the given object is None or has a blank name property"""
fake_file = None
assert is_empty_file(fake_file) is True
fake_file = SimpleNamespace(name="")
assert is_empty_file(fake_file) is True
fake_file = SimpleNamespace(name="path/to/file.txt")
assert is_empty_file(fake_file) is False
def test_chunks():
"""
test for chunks
"""
input_list = list(range(113))
output_list = []
for nums in chunks(input_list):
output_list += nums
assert output_list == input_list
output_list = []
for nums in chunks(input_list, chunk_size=1):
output_list += nums
assert output_list == input_list
output_list = []
for nums in chunks(input_list, chunk_size=124):
output_list += nums
assert output_list == input_list
def test_chunks_iterable():
"""
test that chunks works on non-list iterables too
"""
count = 113
input_range = range(count)
chunk_output = []
for chunk in chunks(input_range, chunk_size=10):
chunk_output.append(chunk)
assert len(chunk_output) == ceil(113 / 10)
range_list = []
for chunk in chunk_output:
range_list += chunk
assert range_list == list(range(count))
def test_format_month_day():
"""
format_month_day should format the month and day from a datetime
"""
dt = datetime.datetime(year=2020, month=1, day=1, tzinfo=pytz.UTC)
assert format_month_day(dt) == "Jan 1"
assert format_month_day(dt, month_fmt="%b") == "Jan 1"
assert format_month_day(dt, month_fmt="%B") == "January 1"
def test_has_equal_properties():
"""
Assert that has_equal_properties returns True if an object has equivalent properties to a given dict
"""
obj = SimpleNamespace(a=1, b=2, c=3)
assert has_equal_properties(obj, {}) is True
assert has_equal_properties(obj, dict(a=1, b=2)) is True
assert has_equal_properties(obj, dict(a=1, b=2, c=3)) is True
assert has_equal_properties(obj, dict(a=2)) is False
assert has_equal_properties(obj, dict(d=4)) is False
| 30.435159 | 113 | 0.648802 | [
"BSD-3-Clause"
] | mitodl/bootcamp-ecommerce | main/utils_test.py | 10,561 | Python |
"""
Test admin tools
"""
from io import BytesIO, TextIOWrapper
import csv
import six
import zipfile
import django
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.test import Client, TestCase
import gdpr_assist
from .gdpr_assist_tests_app.factories import (
ModelWithPrivacyMetaFactory,
FirstSearchModelFactory,
SecondSearchModelFactory,
)
from .gdpr_assist_tests_app.models import (
FirstSearchModel,
SecondSearchModel,
)
model_root_url = '/admin/gdpr_assist_tests_app/modelwithprivacymeta/'
tool_root_url = '/admin/gdpr_assist/personaldata/'
class AdminTestCase(TestCase):
def setUp(self):
self.client = Client()
User = get_user_model()
user = User.objects.create_superuser(
username='test',
email='[email protected]',
password='test',
)
if django.VERSION <= (1, 9):
# Django 1.8 support - no client.force_login
self.client.login(username='test', password='test')
else:
# Django 1.9+
self.client.force_login(user)
class TestModelAdmin(AdminTestCase):
def test_changelist__anonymise_action_present(self):
ModelWithPrivacyMetaFactory.create()
response = self.client.get(model_root_url)
self.assertContains(response, '<option value="anonymise">')
def test_anonymise_action_submit__redirect_to_anonymise_view(self):
obj_1 = ModelWithPrivacyMetaFactory.create()
obj_2 = ModelWithPrivacyMetaFactory.create()
response = self.client.post(
model_root_url,
{
'action': 'anonymise',
'_selected_action': [obj_1.pk, obj_2.pk],
},
follow=True,
)
test_url = '{root_url}anonymise/?ids={pk1},{pk2}'.format(
root_url=model_root_url,
pk1=obj_1.pk,
pk2=obj_2.pk,
)
if django.VERSION <= (1, 9):
# Django 1.8 support - redirects include host
self.assertEqual(len(response.redirect_chain), 1)
self.assertTrue(response.redirect_chain[0][0].endswith(
test_url
))
self.assertEqual(response.redirect_chain[0][1], 302)
else:
# Django 1.9+
self.assertEqual(
response.redirect_chain,
[(test_url, 302)],
)
self.assertContains(
response,
'<p>Are you sure you want to anonymise the following Model With Privacy Metas:</p>',
)
self.assertContains(
response,
'<input type="hidden" name="ids" value="{pk1},{pk2}">'.format(
pk1=obj_1.pk,
pk2=obj_2.pk,
),
)
def test_anonymise_view_submit__redirect_to_anonymise_view(self):
obj_1 = ModelWithPrivacyMetaFactory.create(anonymised=False)
obj_2 = ModelWithPrivacyMetaFactory.create(anonymised=False)
response = self.client.post(
model_root_url + 'anonymise/',
{
'ids': ','.join([str(obj_1.pk), str(obj_2.pk)]),
},
follow=True,
)
obj_1.refresh_from_db()
obj_2.refresh_from_db()
self.assertTrue(obj_1.anonymised)
self.assertTrue(obj_2.anonymised)
if django.VERSION <= (1, 9):
# Django 1.8 support - redirects include host
self.assertEqual(len(response.redirect_chain), 1)
self.assertTrue(response.redirect_chain[0][0].endswith(model_root_url))
self.assertEqual(response.redirect_chain[0][1], 302)
else:
# Django 1.9+
self.assertEqual(
response.redirect_chain,
[(model_root_url, 302)],
)
self.assertContains(
response,
'<li class="success">2 Model With Privacy Metas anonymised</li>',
)
class TestAdminTool(AdminTestCase):
def test_tool_is_available(self):
FirstSearchModelFactory.create()
response = self.client.get(tool_root_url)
self.assertContains(response, '<h1>Personal Data</h1>')
def test_search__returns_correct_results(self):
obj_1 = FirstSearchModelFactory.create(
email='[email protected]',
)
FirstSearchModelFactory.create(
email='[email protected]',
)
response = self.client.post(tool_root_url, {'term': '[email protected]'})
self.assertContains(
response,
'<h2>Gdpr_Assist_Tests_App: First Search Model</h2>',
)
self.assertContains(
response,
'<input name="obj_pk" value="{}-{}" class="action-select" type="checkbox">'.format(
ContentType.objects.get_for_model(FirstSearchModel).pk,
obj_1.pk,
),
)
def test_anonymise__records_anonymised(self):
obj_1 = FirstSearchModelFactory.create(
email='[email protected]',
anonymised=False,
)
obj_2 = FirstSearchModelFactory.create(
email='[email protected]',
anonymised=False,
)
content_type = ContentType.objects.get_for_model(FirstSearchModel).pk
response = self.client.post(
tool_root_url,
{
'term': '[email protected]',
'action': gdpr_assist.admin.tool.PersonalDataSearchForm.ACTION_ANONYMISE,
'obj_pk': ['{}-{}'.format(content_type, obj_1.pk)],
},
follow=True,
)
obj_1.refresh_from_db()
obj_2.refresh_from_db()
self.assertTrue(obj_1.anonymised)
self.assertFalse(obj_2.anonymised)
if django.VERSION <= (1, 9):
# Django 1.8 support - redirects include host
self.assertEqual(len(response.redirect_chain), 1)
self.assertTrue(response.redirect_chain[0][0].endswith(tool_root_url))
self.assertEqual(response.redirect_chain[0][1], 302)
else:
# Django 1.9+
self.assertEqual(
response.redirect_chain,
[(tool_root_url, 302)],
)
def test_export_no_matches__reports_error(self):
# Request an object we know doesn't exist
self.assertEqual(FirstSearchModel.objects.count(), 0)
response = self.client.post(
tool_root_url,
{
'term': '[email protected]',
'action': gdpr_assist.admin.tool.PersonalDataSearchForm.ACTION_EXPORT,
'obj_pk': [
'{}-1'.format(
ContentType.objects.get_for_model(FirstSearchModel).pk,
),
],
},
)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
'<li class="error">No objects selected</li>',
)
def test_export_matches__records_export(self):
# Creating 4 records:
# * One matching in FirstSearchModel so we collect multiple models
# * One not matching in FirstSearchModel so we exclude ignored records
# * Two in SecondSearchModel so we collect multiple records
obj_1 = FirstSearchModelFactory.create(
chars='test1',
email='[email protected]',
anonymised=False,
)
obj_2 = FirstSearchModelFactory.create(
chars='test2',
email='[email protected]',
anonymised=False,
)
obj_3 = SecondSearchModelFactory.create(
chars='test3',
email='[email protected]',
anonymised=False,
)
obj_4 = SecondSearchModelFactory.create(
chars='test4',
email='[email protected]',
anonymised=False,
)
content_type_1 = ContentType.objects.get_for_model(FirstSearchModel).pk
content_type_2 = ContentType.objects.get_for_model(SecondSearchModel).pk
response = self.client.post(
tool_root_url,
{
'term': '[email protected]',
'action': gdpr_assist.admin.tool.PersonalDataSearchForm.ACTION_EXPORT,
'obj_pk': [
'{}-{}'.format(content_type_1, obj_1.pk),
'{}-{}'.format(content_type_2, obj_3.pk),
'{}-{}'.format(content_type_2, obj_4.pk),
],
},
follow=True,
)
# Check they didn't get anonymised by mistake
obj_1.refresh_from_db()
obj_2.refresh_from_db()
obj_3.refresh_from_db()
obj_4.refresh_from_db()
self.assertFalse(obj_1.anonymised)
self.assertFalse(obj_2.anonymised)
self.assertFalse(obj_3.anonymised)
self.assertFalse(obj_4.anonymised)
# Download zip into memory and check it's as expected
zip_data = BytesIO()
zip_data.write(response.content)
zip_file = zipfile.ZipFile(zip_data)
self.assertEqual(
sorted(zip_file.namelist()),
[
'gdpr_assist_tests_app-FirstSearchModel.csv',
'second_search.csv',
],
)
if six.PY2:
mode = 'rU'
else:
mode = 'r'
with zip_file.open(
'gdpr_assist_tests_app-FirstSearchModel.csv',
mode,
) as f:
reader = csv.DictReader(TextIOWrapper(f))
self.assertEqual(
reader.fieldnames,
['email'],
)
rows = list(reader)
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0]['email'], '[email protected]')
with zip_file.open('second_search.csv', mode) as f:
reader = csv.DictReader(TextIOWrapper(f))
self.assertEqual(
sorted(reader.fieldnames),
['chars', 'email'],
)
rows = list(reader)
self.assertEqual(len(rows), 2)
self.assertEqual(rows[0]['chars'], 'test3')
self.assertEqual(rows[0]['email'], '[email protected]')
self.assertEqual(rows[1]['chars'], 'test4')
self.assertEqual(rows[1]['email'], '[email protected]')
| 33.171975 | 96 | 0.569796 | [
"BSD-3-Clause"
] | minervaproject/django-gdpr-assist | tests/test_admin.py | 10,416 | Python |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A source and a sink for reading from and writing to text files."""
# pytype: skip-file
from __future__ import absolute_import
import logging
from builtins import object
from builtins import range
from functools import partial
from typing import Optional
from past.builtins import long
from apache_beam.coders import coders
from apache_beam.io import filebasedsink
from apache_beam.io import filebasedsource
from apache_beam.io import iobase
from apache_beam.io.filebasedsource import ReadAllFiles
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.iobase import Read
from apache_beam.io.iobase import Write
from apache_beam.transforms import PTransform
from apache_beam.transforms.display import DisplayDataItem
__all__ = [
'ReadFromText',
'ReadFromTextWithFilename',
'ReadAllFromText',
'WriteToText'
]
_LOGGER = logging.getLogger(__name__)
class _TextSource(filebasedsource.FileBasedSource):
r"""A source for reading text files.
Parses a text file as newline-delimited elements. Supports newline delimiters
'\n' and '\r\n.
This implementation only supports reading text encoded using UTF-8 or
ASCII.
"""
DEFAULT_READ_BUFFER_SIZE = 8192
class ReadBuffer(object):
# A buffer that gives the buffered data and next position in the
# buffer that should be read.
def __init__(self, data, position):
self._data = data
self._position = position
@property
def data(self):
return self._data
@data.setter
def data(self, value):
assert isinstance(value, bytes)
self._data = value
@property
def position(self):
return self._position
@position.setter
def position(self, value):
assert isinstance(value, (int, long))
if value > len(self._data):
raise ValueError(
'Cannot set position to %d since it\'s larger than '
'size of data %d.' % (value, len(self._data)))
self._position = value
def reset(self):
self.data = b''
self.position = 0
def __init__(self,
file_pattern,
min_bundle_size,
compression_type,
strip_trailing_newlines,
coder, # type: coders.Coder
buffer_size=DEFAULT_READ_BUFFER_SIZE,
validate=True,
skip_header_lines=0,
header_processor_fns=(None, None)):
"""Initialize a _TextSource
Args:
header_processor_fns (tuple): a tuple of a `header_matcher` function
and a `header_processor` function. The `header_matcher` should
return `True` for all lines at the start of the file that are part
of the file header and `False` otherwise. These header lines will
not be yielded when reading records and instead passed into
`header_processor` to be handled. If `skip_header_lines` and a
`header_matcher` are both provided, the value of `skip_header_lines`
lines will be skipped and the header will be processed from
there.
Raises:
ValueError: if skip_lines is negative.
Please refer to documentation in class `ReadFromText` for the rest
of the arguments.
"""
super(_TextSource, self).__init__(
file_pattern,
min_bundle_size,
compression_type=compression_type,
validate=validate)
self._strip_trailing_newlines = strip_trailing_newlines
self._compression_type = compression_type
self._coder = coder
self._buffer_size = buffer_size
if skip_header_lines < 0:
raise ValueError(
'Cannot skip negative number of header lines: %d' % skip_header_lines)
elif skip_header_lines > 10:
_LOGGER.warning(
'Skipping %d header lines. Skipping large number of header '
'lines might significantly slow down processing.')
self._skip_header_lines = skip_header_lines
self._header_matcher, self._header_processor = header_processor_fns
def display_data(self):
parent_dd = super(_TextSource, self).display_data()
parent_dd['strip_newline'] = DisplayDataItem(
self._strip_trailing_newlines, label='Strip Trailing New Lines')
parent_dd['buffer_size'] = DisplayDataItem(
self._buffer_size, label='Buffer Size')
parent_dd['coder'] = DisplayDataItem(self._coder.__class__, label='Coder')
return parent_dd
def read_records(self, file_name, range_tracker):
start_offset = range_tracker.start_position()
read_buffer = _TextSource.ReadBuffer(b'', 0)
next_record_start_position = -1
def split_points_unclaimed(stop_position):
return (
0 if stop_position <= next_record_start_position else
iobase.RangeTracker.SPLIT_POINTS_UNKNOWN)
range_tracker.set_split_points_unclaimed_callback(split_points_unclaimed)
with self.open_file(file_name) as file_to_read:
position_after_processing_header_lines = (
self._process_header(file_to_read, read_buffer))
start_offset = max(start_offset, position_after_processing_header_lines)
if start_offset > position_after_processing_header_lines:
# Seeking to one position before the start index and ignoring the
# current line. If start_position is at beginning if the line, that line
# belongs to the current bundle, hence ignoring that is incorrect.
# Seeking to one byte before prevents that.
file_to_read.seek(start_offset - 1)
read_buffer.reset()
sep_bounds = self._find_separator_bounds(file_to_read, read_buffer)
if not sep_bounds:
# Could not find a separator after (start_offset - 1). This means that
# none of the records within the file belongs to the current source.
return
_, sep_end = sep_bounds
read_buffer.data = read_buffer.data[sep_end:]
next_record_start_position = start_offset - 1 + sep_end
else:
next_record_start_position = position_after_processing_header_lines
while range_tracker.try_claim(next_record_start_position):
record, num_bytes_to_next_record = self._read_record(file_to_read,
read_buffer)
# For compressed text files that use an unsplittable OffsetRangeTracker
# with infinity as the end position, above 'try_claim()' invocation
# would pass for an empty record at the end of file that is not
# followed by a new line character. Since such a record is at the last
# position of a file, it should not be a part of the considered range.
# We do this check to ignore such records.
if len(record) == 0 and num_bytes_to_next_record < 0: # pylint: disable=len-as-condition
break
# Record separator must be larger than zero bytes.
assert num_bytes_to_next_record != 0
if num_bytes_to_next_record > 0:
next_record_start_position += num_bytes_to_next_record
yield self._coder.decode(record)
if num_bytes_to_next_record < 0:
break
def _process_header(self, file_to_read, read_buffer):
# Returns a tuple containing the position in file after processing header
# records and a list of decoded header lines that match
# 'header_matcher'.
header_lines = []
position = self._skip_lines(
file_to_read, read_buffer,
self._skip_header_lines) if self._skip_header_lines else 0
if self._header_matcher:
while True:
record, num_bytes_to_next_record = self._read_record(file_to_read,
read_buffer)
decoded_line = self._coder.decode(record)
if not self._header_matcher(decoded_line):
# We've read past the header section at this point, so go back a line.
file_to_read.seek(position)
read_buffer.reset()
break
header_lines.append(decoded_line)
if num_bytes_to_next_record < 0:
break
position += num_bytes_to_next_record
if self._header_processor:
self._header_processor(header_lines)
return position
def _find_separator_bounds(self, file_to_read, read_buffer):
# Determines the start and end positions within 'read_buffer.data' of the
# next separator starting from position 'read_buffer.position'.
# Currently supports following separators.
# * '\n'
# * '\r\n'
# This method may increase the size of buffer but it will not decrease the
# size of it.
current_pos = read_buffer.position
while True:
if current_pos >= len(read_buffer.data):
# Ensuring that there are enough bytes to determine if there is a '\n'
# at current_pos.
if not self._try_to_ensure_num_bytes_in_buffer(
file_to_read, read_buffer, current_pos + 1):
return
# Using find() here is more efficient than a linear scan of the byte
# array.
next_lf = read_buffer.data.find(b'\n', current_pos)
if next_lf >= 0:
if next_lf > 0 and read_buffer.data[next_lf - 1:next_lf] == b'\r':
# Found a '\r\n'. Accepting that as the next separator.
return (next_lf - 1, next_lf + 1)
else:
# Found a '\n'. Accepting that as the next separator.
return (next_lf, next_lf + 1)
current_pos = len(read_buffer.data)
def _try_to_ensure_num_bytes_in_buffer(
self, file_to_read, read_buffer, num_bytes):
# Tries to ensure that there are at least num_bytes bytes in the buffer.
# Returns True if this can be fulfilled, returned False if this cannot be
# fulfilled due to reaching EOF.
while len(read_buffer.data) < num_bytes:
read_data = file_to_read.read(self._buffer_size)
if not read_data:
return False
read_buffer.data += read_data
return True
def _skip_lines(self, file_to_read, read_buffer, num_lines):
"""Skip num_lines from file_to_read, return num_lines+1 start position."""
if file_to_read.tell() > 0:
file_to_read.seek(0)
position = 0
for _ in range(num_lines):
_, num_bytes_to_next_record = self._read_record(file_to_read, read_buffer)
if num_bytes_to_next_record < 0:
# We reached end of file. It is OK to just break here
# because subsequent _read_record will return same result.
break
position += num_bytes_to_next_record
return position
def _read_record(self, file_to_read, read_buffer):
# Returns a tuple containing the current_record and number of bytes to the
# next record starting from 'read_buffer.position'. If EOF is
# reached, returns a tuple containing the current record and -1.
if read_buffer.position > self._buffer_size:
# read_buffer is too large. Truncating and adjusting it.
read_buffer.data = read_buffer.data[read_buffer.position:]
read_buffer.position = 0
record_start_position_in_buffer = read_buffer.position
sep_bounds = self._find_separator_bounds(file_to_read, read_buffer)
read_buffer.position = sep_bounds[1] if sep_bounds else len(
read_buffer.data)
if not sep_bounds:
# Reached EOF. Bytes up to the EOF is the next record. Returning '-1' for
# the starting position of the next record.
return (read_buffer.data[record_start_position_in_buffer:], -1)
if self._strip_trailing_newlines:
# Current record should not contain the separator.
return (
read_buffer.data[record_start_position_in_buffer:sep_bounds[0]],
sep_bounds[1] - record_start_position_in_buffer)
else:
# Current record should contain the separator.
return (
read_buffer.data[record_start_position_in_buffer:sep_bounds[1]],
sep_bounds[1] - record_start_position_in_buffer)
class _TextSourceWithFilename(_TextSource):
def read_records(self, file_name, range_tracker):
records = super(_TextSourceWithFilename,
self).read_records(file_name, range_tracker)
for record in records:
yield (file_name, record)
class _TextSink(filebasedsink.FileBasedSink):
"""A sink to a GCS or local text file or files."""
def __init__(self,
file_path_prefix,
file_name_suffix='',
append_trailing_newlines=True,
num_shards=0,
shard_name_template=None,
coder=coders.ToStringCoder(), # type: coders.Coder
compression_type=CompressionTypes.AUTO,
header=None):
"""Initialize a _TextSink.
Args:
file_path_prefix: The file path to write to. The files written will begin
with this prefix, followed by a shard identifier (see num_shards), and
end in a common extension, if given by file_name_suffix. In most cases,
only this argument is specified and num_shards, shard_name_template, and
file_name_suffix use default values.
file_name_suffix: Suffix for the files written.
append_trailing_newlines: indicate whether this sink should write an
additional newline char after writing each element.
num_shards: The number of files (shards) used for output. If not set, the
service will decide on the optimal number of shards.
Constraining the number of shards is likely to reduce
the performance of a pipeline. Setting this value is not recommended
unless you require a specific number of output files.
shard_name_template: A template string containing placeholders for
the shard number and shard count. When constructing a filename for a
particular shard number, the upper-case letters 'S' and 'N' are
replaced with the 0-padded shard number and shard count respectively.
This argument can be '' in which case it behaves as if num_shards was
set to 1 and only one file will be generated. The default pattern used
is '-SSSSS-of-NNNNN' if None is passed as the shard_name_template.
coder: Coder used to encode each line.
compression_type: Used to handle compressed output files. Typical value
is CompressionTypes.AUTO, in which case the final file path's
extension (as determined by file_path_prefix, file_name_suffix,
num_shards and shard_name_template) will be used to detect the
compression.
header: String to write at beginning of file as a header. If not None and
append_trailing_newlines is set, '\n' will be added.
Returns:
A _TextSink object usable for writing.
"""
super(_TextSink, self).__init__(
file_path_prefix,
file_name_suffix=file_name_suffix,
num_shards=num_shards,
shard_name_template=shard_name_template,
coder=coder,
mime_type='text/plain',
compression_type=compression_type)
self._append_trailing_newlines = append_trailing_newlines
self._header = header
def open(self, temp_path):
file_handle = super(_TextSink, self).open(temp_path)
if self._header is not None:
file_handle.write(coders.ToStringCoder().encode(self._header))
if self._append_trailing_newlines:
file_handle.write(b'\n')
return file_handle
def display_data(self):
dd_parent = super(_TextSink, self).display_data()
dd_parent['append_newline'] = DisplayDataItem(
self._append_trailing_newlines, label='Append Trailing New Lines')
return dd_parent
def write_encoded_record(self, file_handle, encoded_value):
"""Writes a single encoded record."""
file_handle.write(encoded_value)
if self._append_trailing_newlines:
file_handle.write(b'\n')
def _create_text_source(
file_pattern=None,
min_bundle_size=None,
compression_type=None,
strip_trailing_newlines=None,
coder=None,
skip_header_lines=None):
return _TextSource(
file_pattern=file_pattern,
min_bundle_size=min_bundle_size,
compression_type=compression_type,
strip_trailing_newlines=strip_trailing_newlines,
coder=coder,
validate=False,
skip_header_lines=skip_header_lines)
class ReadAllFromText(PTransform):
"""A ``PTransform`` for reading a ``PCollection`` of text files.
Reads a ``PCollection`` of text files or file patterns and and produces a
``PCollection`` of strings.
Parses a text file as newline-delimited elements, by default assuming
UTF-8 encoding. Supports newline delimiters '\\n' and '\\r\\n'.
This implementation only supports reading text encoded using UTF-8 or ASCII.
This does not support other encodings such as UTF-16 or UTF-32.
"""
DEFAULT_DESIRED_BUNDLE_SIZE = 64 * 1024 * 1024 # 64MB
def __init__(
self,
min_bundle_size=0,
desired_bundle_size=DEFAULT_DESIRED_BUNDLE_SIZE,
compression_type=CompressionTypes.AUTO,
strip_trailing_newlines=True,
coder=coders.StrUtf8Coder(), # type: coders.Coder
skip_header_lines=0,
**kwargs):
"""Initialize the ``ReadAllFromText`` transform.
Args:
min_bundle_size: Minimum size of bundles that should be generated when
splitting this source into bundles. See ``FileBasedSource`` for more
details.
desired_bundle_size: Desired size of bundles that should be generated when
splitting this source into bundles. See ``FileBasedSource`` for more
details.
compression_type: Used to handle compressed input files. Typical value
is ``CompressionTypes.AUTO``, in which case the underlying file_path's
extension will be used to detect the compression.
strip_trailing_newlines: Indicates whether this source should remove
the newline char in each line it reads before decoding that line.
validate: flag to verify that the files exist during the pipeline
creation time.
skip_header_lines: Number of header lines to skip. Same number is skipped
from each source file. Must be 0 or higher. Large number of skipped
lines might impact performance.
coder: Coder used to decode each line.
"""
super(ReadAllFromText, self).__init__(**kwargs)
source_from_file = partial(
_create_text_source,
min_bundle_size=min_bundle_size,
compression_type=compression_type,
strip_trailing_newlines=strip_trailing_newlines,
coder=coder,
skip_header_lines=skip_header_lines)
self._desired_bundle_size = desired_bundle_size
self._min_bundle_size = min_bundle_size
self._compression_type = compression_type
self._read_all_files = ReadAllFiles(
True,
compression_type,
desired_bundle_size,
min_bundle_size,
source_from_file)
def expand(self, pvalue):
return pvalue | 'ReadAllFiles' >> self._read_all_files
class ReadFromText(PTransform):
r"""A :class:`~apache_beam.transforms.ptransform.PTransform` for reading text
files.
Parses a text file as newline-delimited elements, by default assuming
``UTF-8`` encoding. Supports newline delimiters ``\n`` and ``\r\n``.
This implementation only supports reading text encoded using ``UTF-8`` or
``ASCII``.
This does not support other encodings such as ``UTF-16`` or ``UTF-32``.
"""
_source_class = _TextSource
def __init__(
self,
file_pattern=None,
min_bundle_size=0,
compression_type=CompressionTypes.AUTO,
strip_trailing_newlines=True,
coder=coders.StrUtf8Coder(), # type: coders.Coder
validate=True,
skip_header_lines=0,
**kwargs):
"""Initialize the :class:`ReadFromText` transform.
Args:
file_pattern (str): The file path to read from as a local file path or a
GCS ``gs://`` path. The path can contain glob characters
(``*``, ``?``, and ``[...]`` sets).
min_bundle_size (int): Minimum size of bundles that should be generated
when splitting this source into bundles. See
:class:`~apache_beam.io.filebasedsource.FileBasedSource` for more
details.
compression_type (str): Used to handle compressed input files.
Typical value is :attr:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the
underlying file_path's extension will be used to detect the compression.
strip_trailing_newlines (bool): Indicates whether this source should
remove the newline char in each line it reads before decoding that line.
validate (bool): flag to verify that the files exist during the pipeline
creation time.
skip_header_lines (int): Number of header lines to skip. Same number is
skipped from each source file. Must be 0 or higher. Large number of
skipped lines might impact performance.
coder (~apache_beam.coders.coders.Coder): Coder used to decode each line.
"""
super(ReadFromText, self).__init__(**kwargs)
self._source = self._source_class(
file_pattern,
min_bundle_size,
compression_type,
strip_trailing_newlines,
coder,
validate=validate,
skip_header_lines=skip_header_lines)
def expand(self, pvalue):
return pvalue.pipeline | Read(self._source)
class ReadFromTextWithFilename(ReadFromText):
r"""A :class:`~apache_beam.io.textio.ReadFromText` for reading text
files returning the name of the file and the content of the file.
This class extend ReadFromText class just setting a different
_source_class attribute.
"""
_source_class = _TextSourceWithFilename
class WriteToText(PTransform):
"""A :class:`~apache_beam.transforms.ptransform.PTransform` for writing to
text files."""
def __init__(
self,
file_path_prefix, # type: str
file_name_suffix='',
append_trailing_newlines=True,
num_shards=0,
shard_name_template=None, # type: Optional[str]
coder=coders.ToStringCoder(), # type: coders.Coder
compression_type=CompressionTypes.AUTO,
header=None):
r"""Initialize a :class:`WriteToText` transform.
Args:
file_path_prefix (str): The file path to write to. The files written will
begin with this prefix, followed by a shard identifier (see
**num_shards**), and end in a common extension, if given by
**file_name_suffix**. In most cases, only this argument is specified and
**num_shards**, **shard_name_template**, and **file_name_suffix** use
default values.
file_name_suffix (str): Suffix for the files written.
append_trailing_newlines (bool): indicate whether this sink should write
an additional newline char after writing each element.
num_shards (int): The number of files (shards) used for output.
If not set, the service will decide on the optimal number of shards.
Constraining the number of shards is likely to reduce
the performance of a pipeline. Setting this value is not recommended
unless you require a specific number of output files.
shard_name_template (str): A template string containing placeholders for
the shard number and shard count. Currently only ``''`` and
``'-SSSSS-of-NNNNN'`` are patterns accepted by the service.
When constructing a filename for a particular shard number, the
upper-case letters ``S`` and ``N`` are replaced with the ``0``-padded
shard number and shard count respectively. This argument can be ``''``
in which case it behaves as if num_shards was set to 1 and only one file
will be generated. The default pattern used is ``'-SSSSS-of-NNNNN'``.
coder (~apache_beam.coders.coders.Coder): Coder used to encode each line.
compression_type (str): Used to handle compressed output files.
Typical value is :class:`CompressionTypes.AUTO
<apache_beam.io.filesystem.CompressionTypes.AUTO>`, in which case the
final file path's extension (as determined by **file_path_prefix**,
**file_name_suffix**, **num_shards** and **shard_name_template**) will
be used to detect the compression.
header (str): String to write at beginning of file as a header.
If not :data:`None` and **append_trailing_newlines** is set, ``\n`` will
be added.
"""
self._sink = _TextSink(
file_path_prefix,
file_name_suffix,
append_trailing_newlines,
num_shards,
shard_name_template,
coder,
compression_type,
header)
def expand(self, pcoll):
return pcoll | Write(self._sink)
| 39.605304 | 97 | 0.697483 | [
"Apache-2.0"
] | AhnLab-OSS/beam | sdks/python/apache_beam/io/textio.py | 25,387 | Python |
import os
import click
from flask import Flask, render_template
from flask_wtf.csrf import CSRFError
from telechat.extensions import db, login_manager, csrf, moment
from telechat.blueprints.auth import auth_bp
from telechat.blueprints.chat import chat_bp
from telechat.blueprints.admin import admin_bp
from telechat.blueprints.oauth import oauth_bp
from telechat.settings import config
from telechat.models import User, Message
def register_extensions(app: Flask):
"""注册需要的扩展程序包到 Flask 程序实例 app 中"""
db.init_app(app) # 数据库 ORM
login_manager.init_app(app) # 登录状态管理
csrf.init_app(app) # CSRF 令牌管理
moment.init_app(app) # 时间格式化管理
def register_blueprints(app: Flask):
"""注册需要的蓝图程序包到 Flask 程序实例 app 中"""
app.register_blueprint(auth_bp)
app.register_blueprint(oauth_bp)
app.register_blueprint(chat_bp)
app.register_blueprint(admin_bp)
def register_errors(app: Flask):
"""注册需要的错误处理程序包到 Flask 程序实例 app 中"""
@app.errorhandler(400) # Bad Request 客户端请求的语法错误,服务器无法理解
def bad_request(e):
return render_template('error.html', description=e.description, code=e.code), 400
@app.errorhandler(404) # Not Found 服务器无法根据客户端的请求找到资源(网页)
def page_not_found(e):
return render_template('error.html', description=e.description, code=e.code), 404
@app.errorhandler(500) # Internal Server Error 服务器内部错误,无法完成请求
def internal_server_error(e):
return render_template('error.html', description="服务器内部错误,无法完成请求!", code="500"), 500
@app.errorhandler(CSRFError) # CSRF 验证失败
def csrf_error_handle(e):
return render_template('error.html', description=e.description, code=e.code), 400
def register_commands(app: Flask):
"""注册需要的CLI命令程序包到 Flask 程序实例 app 中"""
@app.cli.command()
@click.option('--drop', is_flag=True, help="创建之前销毁数据库")
def initdb(drop: bool):
"""初始化数据库结构"""
if drop:
# 确认删除
pass
pass
@app.cli.command()
@click.option('--num', default=300, help="消息数量,默认为300")
def forge(num: int):
"""生成虚拟数据"""
pass
def create_app(config_name=None):
"""程序工厂:创建 Flask 程序,加载配置,注册扩展、蓝图等程序包"""
# 从环境变量载入配置环境名称
if config_name is None:
config_name = os.getenv('FLASK_CONFIG', 'development')
# 创建 Flask 程序实例,程序名称为 telechat
app = Flask('telechat')
# 载入相应的配置
app.config.from_object(config[config_name])
# 注册程序包
register_extensions(app) # 扩展
register_blueprints(app) # 蓝图
register_errors(app) # 错误处理
register_commands(app) # CLI命令
# 返回已配置好的 Flask 程序实例
return app
| 28.336957 | 92 | 0.693901 | [
"MIT"
] | Sefank/telechat | telechat/__init__.py | 3,161 | Python |
from django.urls import path
from . import views
app_name = 'partners'
urlpatterns = [
path('registerCampusPartner/', views.registerCampusPartner, name='registerCampusPartner'),
path('registerCommunityPartner/', views.registerCommunityPartner, name='registerCommunityPartner'),
path('profile/userprofile/', views.userProfile, name='userprofile'),
path('profile/userprofileupdate/', views.userProfileUpdate,name='userprofileupdate'),
path('profile/orgprofile/', views.orgProfile, name='orgprofile'),
path('profile/orgprofileupdate/', views.orgProfileUpdate, name='orgprofileupdate'),
]
| 47.384615 | 104 | 0.761364 | [
"MIT"
] | Goutham2591/mav-cpi | partners/urls.py | 616 | Python |
"""Configuration format loaders"""
import locale
import os
from abc import ABC, abstractmethod
import yaml
from pydantic import create_model
def load_configuration(configuration_file_path, parameters_file_path, bundles):
"""Combines the configuration and parameters and build the configuration object"""
mappings = {}
for bundle in bundles:
if hasattr(bundle, "config_mapping"):
mappings.update(bundle.config_mapping)
loader = YmlLoader()
return loader.build_config(mappings, config_source=configuration_file_path, parameters_source=parameters_file_path)
def is_string(value):
"""Check if the value is actually a string or not"""
try:
float(value)
return False
except ValueError:
if value.lower() in ["true", "false"]:
return False
return True
class ConfigurationLoader(ABC):
"""Base configuration loader"""
@abstractmethod
def load_parameters(self, source):
"""Convert the source into a dictionary"""
@abstractmethod
def load_config(self, config_source, parameters_source):
"""Prase the config file and build a dictionary"""
def build_config(self, config_mappings, config_source, parameters_source):
"""By using the loaded parameters and loaded config, build the final configuration object"""
configuration_class = create_model('Configuration', **{k: (v, ...) for k, v in config_mappings.items()})
return configuration_class(**self.load_config(config_source, parameters_source))
class YmlLoader(ConfigurationLoader):
"""YML Format parser and config loader"""
def load_parameters(self, source):
"""For YML, the source it the file path"""
with open(source, encoding=locale.getpreferredencoding(False)) as parameters_source:
loaded = yaml.safe_load(parameters_source.read())
if loaded:
for key, value in loaded.items():
if isinstance(value, str):
loaded[key] = "'" + value + "'"
return loaded
return {}
def load_config(self, config_source, parameters_source):
"""For YML, the source it the file path"""
with open(config_source, encoding=locale.getpreferredencoding(False)) as config_source_file:
config_raw = config_source_file.read()
parameters = {}
# Parameters from file
if os.path.isfile(parameters_source):
params = self.load_parameters(parameters_source)
if params is not None:
parameters.update(params)
# Overwrite parameters with the environment variables
env_params = {}
env_params.update(os.environ)
for key, value in env_params.items():
if is_string(value):
env_params[key] = "'" + value + "'"
parameters.update(env_params)
# Replace the parameters
final_configuration = config_raw.format(**parameters)
final_configuration = yaml.safe_load(final_configuration)
return final_configuration if final_configuration is not None else {}
| 38.674699 | 119 | 0.65109 | [
"Apache-2.0"
] | applauncher-team/applauncher | applauncher/configuration.py | 3,210 | Python |
from sqlalchemy import and_
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import INT
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Sequence
from sqlalchemy import sql
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import VARCHAR
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class InsertExecTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"user_id", INT, primary_key=True, test_needs_autoincrement=True
),
Column("user_name", VARCHAR(20)),
test_needs_acid=True,
)
@testing.requires.multivalues_inserts
def test_multivalues_insert(self):
users = self.tables.users
users.insert(
values=[
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
]
).execute()
rows = users.select().order_by(users.c.user_id).execute().fetchall()
eq_(rows[0], (7, "jack"))
eq_(rows[1], (8, "ed"))
users.insert(values=[(9, "jack"), (10, "ed")]).execute()
rows = users.select().order_by(users.c.user_id).execute().fetchall()
eq_(rows[2], (9, "jack"))
eq_(rows[3], (10, "ed"))
def test_insert_heterogeneous_params(self):
"""test that executemany parameters are asserted to match the
parameter set of the first."""
users = self.tables.users
assert_raises_message(
exc.StatementError,
r"\(sqlalchemy.exc.InvalidRequestError\) A value is required for "
"bind parameter 'user_name', in "
"parameter group 2\n"
r"\[SQL: u?INSERT INTO users",
users.insert().execute,
{"user_id": 7, "user_name": "jack"},
{"user_id": 8, "user_name": "ed"},
{"user_id": 9},
)
# this succeeds however. We aren't yet doing
# a length check on all subsequent parameters.
users.insert().execute(
{"user_id": 7}, {"user_id": 8, "user_name": "ed"}, {"user_id": 9}
)
def _test_lastrow_accessor(self, table_, values, assertvalues):
"""Tests the inserted_primary_key and lastrow_has_id() functions."""
def insert_values(engine, table_, values):
"""
Inserts a row into a table, returns the full list of values
INSERTed including defaults that fired off on the DB side and
detects rows that had defaults and post-fetches.
"""
# verify implicit_returning is working
if engine.dialect.implicit_returning:
ins = table_.insert()
comp = ins.compile(engine, column_keys=list(values))
if not set(values).issuperset(
c.key for c in table_.primary_key
):
is_(bool(comp.returning), True)
result = engine.execute(table_.insert(), **values)
ret = values.copy()
for col, id_ in zip(
table_.primary_key, result.inserted_primary_key
):
ret[col.key] = id_
if result.lastrow_has_defaults():
criterion = and_(
*[
col == id_
for col, id_ in zip(
table_.primary_key, result.inserted_primary_key
)
]
)
row = engine.execute(table_.select(criterion)).first()
for c in table_.c:
ret[c.key] = row[c]
return ret
if testing.against("firebird", "postgresql", "oracle", "mssql"):
assert testing.db.dialect.implicit_returning
if testing.db.dialect.implicit_returning:
test_engines = [
engines.testing_engine(options={"implicit_returning": False}),
engines.testing_engine(options={"implicit_returning": True}),
]
else:
test_engines = [testing.db]
for engine in test_engines:
try:
table_.create(bind=engine, checkfirst=True)
i = insert_values(engine, table_, values)
eq_(i, assertvalues)
finally:
table_.drop(bind=engine)
@testing.skip_if("sqlite")
def test_lastrow_accessor_one(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t1",
metadata,
Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("foo", String(30), primary_key=True),
),
{"foo": "hi"},
{"id": 1, "foo": "hi"},
)
@testing.skip_if("sqlite")
def test_lastrow_accessor_two(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t2",
metadata,
Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("foo", String(30), primary_key=True),
Column("bar", String(30), server_default="hi"),
),
{"foo": "hi"},
{"id": 1, "foo": "hi", "bar": "hi"},
)
def test_lastrow_accessor_three(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t3",
metadata,
Column("id", String(40), primary_key=True),
Column("foo", String(30), primary_key=True),
Column("bar", String(30)),
),
{"id": "hi", "foo": "thisisfoo", "bar": "thisisbar"},
{"id": "hi", "foo": "thisisfoo", "bar": "thisisbar"},
)
@testing.requires.sequences
def test_lastrow_accessor_four(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t4",
metadata,
Column(
"id",
Integer,
Sequence("t4_id_seq", optional=True),
primary_key=True,
),
Column("foo", String(30), primary_key=True),
Column("bar", String(30), server_default="hi"),
),
{"foo": "hi", "id": 1},
{"id": 1, "foo": "hi", "bar": "hi"},
)
def test_lastrow_accessor_five(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t5",
metadata,
Column("id", String(10), primary_key=True),
Column("bar", String(30), server_default="hi"),
),
{"id": "id1"},
{"id": "id1", "bar": "hi"},
)
@testing.skip_if("sqlite")
def test_lastrow_accessor_six(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t6",
metadata,
Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("bar", Integer, primary_key=True),
),
{"bar": 0},
{"id": 1, "bar": 0},
)
# TODO: why not in the sqlite suite?
@testing.only_on("sqlite+pysqlite")
@testing.provide_metadata
def test_lastrowid_zero(self):
from sqlalchemy.dialects import sqlite
eng = engines.testing_engine()
class ExcCtx(sqlite.base.SQLiteExecutionContext):
def get_lastrowid(self):
return 0
eng.dialect.execution_ctx_cls = ExcCtx
t = Table(
"t",
self.metadata,
Column("x", Integer, primary_key=True),
Column("y", Integer),
)
t.create(eng)
r = eng.execute(t.insert().values(y=5))
eq_(r.inserted_primary_key, [0])
@testing.fails_on(
"sqlite", "sqlite autoincremnt doesn't work with composite pks"
)
@testing.provide_metadata
def test_misordered_lastrow(self):
metadata = self.metadata
related = Table(
"related",
metadata,
Column("id", Integer, primary_key=True),
mysql_engine="MyISAM",
)
t6 = Table(
"t6",
metadata,
Column(
"manual_id",
Integer,
ForeignKey("related.id"),
primary_key=True,
),
Column(
"auto_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
mysql_engine="MyISAM",
)
metadata.create_all()
r = related.insert().values(id=12).execute()
id_ = r.inserted_primary_key[0]
eq_(id_, 12)
r = t6.insert().values(manual_id=id_).execute()
eq_(r.inserted_primary_key, [12, 1])
def test_implicit_id_insert_select_columns(self):
users = self.tables.users
stmt = users.insert().from_select(
(users.c.user_id, users.c.user_name),
users.select().where(users.c.user_id == 20),
)
testing.db.execute(stmt)
def test_implicit_id_insert_select_keys(self):
users = self.tables.users
stmt = users.insert().from_select(
["user_id", "user_name"],
users.select().where(users.c.user_id == 20),
)
testing.db.execute(stmt)
@testing.requires.empty_inserts
@testing.requires.returning
def test_no_inserted_pk_on_returning(self):
users = self.tables.users
result = testing.db.execute(
users.insert().returning(users.c.user_id, users.c.user_name)
)
assert_raises_message(
exc.InvalidRequestError,
r"Can't call inserted_primary_key when returning\(\) is used.",
getattr,
result,
"inserted_primary_key",
)
class TableInsertTest(fixtures.TablesTest):
"""test for consistent insert behavior across dialects
regarding the inline=True flag, lower-case 't' tables.
"""
run_create_tables = "each"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"foo",
metadata,
Column("id", Integer, Sequence("t_id_seq"), primary_key=True),
Column("data", String(50)),
Column("x", Integer),
)
def _fixture(self, types=True):
if types:
t = sql.table(
"foo",
sql.column("id", Integer),
sql.column("data", String),
sql.column("x", Integer),
)
else:
t = sql.table(
"foo", sql.column("id"), sql.column("data"), sql.column("x")
)
return t
def _test(self, stmt, row, returning=None, inserted_primary_key=False):
r = testing.db.execute(stmt)
if returning:
returned = r.first()
eq_(returned, returning)
elif inserted_primary_key is not False:
eq_(r.inserted_primary_key, inserted_primary_key)
eq_(testing.db.execute(self.tables.foo.select()).first(), row)
def _test_multi(self, stmt, rows, data):
testing.db.execute(stmt, rows)
eq_(
testing.db.execute(
self.tables.foo.select().order_by(self.tables.foo.c.id)
).fetchall(),
data,
)
@testing.requires.sequences
def test_explicit_sequence(self):
t = self._fixture()
self._test(
t.insert().values(
id=func.next_value(Sequence("t_id_seq")), data="data", x=5
),
(1, "data", 5),
)
def test_uppercase(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data="data", x=5),
(1, "data", 5),
inserted_primary_key=[1],
)
def test_uppercase_inline(self):
t = self.tables.foo
self._test(
t.insert(inline=True).values(id=1, data="data", x=5),
(1, "data", 5),
inserted_primary_key=[1],
)
@testing.crashes(
"mssql+pyodbc",
"Pyodbc + SQL Server + Py3K, some decimal handling issue",
)
def test_uppercase_inline_implicit(self):
t = self.tables.foo
self._test(
t.insert(inline=True).values(data="data", x=5),
(1, "data", 5),
inserted_primary_key=[None],
)
def test_uppercase_implicit(self):
t = self.tables.foo
self._test(
t.insert().values(data="data", x=5),
(1, "data", 5),
inserted_primary_key=[1],
)
def test_uppercase_direct_params(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data="data", x=5),
(1, "data", 5),
inserted_primary_key=[1],
)
@testing.requires.returning
def test_uppercase_direct_params_returning(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data="data", x=5).returning(t.c.id, t.c.x),
(1, "data", 5),
returning=(1, 5),
)
@testing.fails_on(
"mssql", "lowercase table doesn't support identity insert disable"
)
def test_direct_params(self):
t = self._fixture()
self._test(
t.insert().values(id=1, data="data", x=5),
(1, "data", 5),
inserted_primary_key=[],
)
@testing.fails_on(
"mssql", "lowercase table doesn't support identity insert disable"
)
@testing.requires.returning
def test_direct_params_returning(self):
t = self._fixture()
self._test(
t.insert().values(id=1, data="data", x=5).returning(t.c.id, t.c.x),
(1, "data", 5),
returning=(1, 5),
)
@testing.requires.emulated_lastrowid
def test_implicit_pk(self):
t = self._fixture()
self._test(
t.insert().values(data="data", x=5),
(1, "data", 5),
inserted_primary_key=[],
)
@testing.requires.emulated_lastrowid
def test_implicit_pk_multi_rows(self):
t = self._fixture()
self._test_multi(
t.insert(),
[
{"data": "d1", "x": 5},
{"data": "d2", "x": 6},
{"data": "d3", "x": 7},
],
[(1, "d1", 5), (2, "d2", 6), (3, "d3", 7)],
)
@testing.requires.emulated_lastrowid
def test_implicit_pk_inline(self):
t = self._fixture()
self._test(
t.insert(inline=True).values(data="data", x=5),
(1, "data", 5),
inserted_primary_key=[],
)
| 30.968504 | 79 | 0.515446 | [
"MIT"
] | AngelLiang/hacking-sqlalchemy | test/sql/test_insert_exec.py | 15,732 | Python |
import re
import connexion
import logging
import auslib
from os import path
from flask import request
from flask_compress import Compress
from auslib.web.admin.views.problem import problem
from auslib.web.admin.views.validators import BalrogRequestBodyValidator
from raven.contrib.flask import Sentry
from specsynthase.specbuilder import SpecBuilder
try:
from urllib import unquote
except ImportError: # pragma: no cover
from urllib.parse import unquote
log = logging.getLogger(__name__)
current_dir = path.dirname(__file__)
web_dir = path.dirname(auslib.web.__file__)
spec = SpecBuilder().add_spec(path.join(current_dir, 'swagger/api.yaml'))\
.add_spec(path.join(web_dir, 'common/swagger/definitions.yml'))\
.add_spec(path.join(web_dir, 'common/swagger/parameters.yml'))\
.add_spec(path.join(web_dir, 'common/swagger/responses.yml'))
validator_map = {
'body': BalrogRequestBodyValidator
}
connexion_app = connexion.App(__name__, validator_map=validator_map, debug=False)
connexion_app.add_api(spec, strict_validation=True)
app = connexion_app.app
sentry = Sentry()
from auslib.dockerflow import create_dockerflow_endpoints
create_dockerflow_endpoints(app)
# When running under uwsgi, paths will not get decoded before hitting the app.
# We need to handle this ourselves in certain fields, and adding converters
# for them is the best way to do this.
class UnquotingMiddleware(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
environ["PATH_INFO"] = unquote(environ["PATH_INFO"])
return self.app(environ, start_response)
app.wsgi_app = UnquotingMiddleware(app.wsgi_app)
@app.errorhandler(500)
def ise(error):
log.error("Caught ISE 500 error.")
log.debug("Request path is: %s", request.path)
log.debug("Request environment is: %s", request.environ)
log.debug("Request headers are: %s", request.headers)
return error
# Connexion's error handling sometimes breaks when parameters contain
# unicode characters (https://github.com/zalando/connexion/issues/604).
# To work around, we catch them and return a 400 (which is what Connexion
# would do if it didn't hit this error).
@app.errorhandler(UnicodeEncodeError)
def unicode(error):
return problem(400, "Unicode Error", "Connexion was unable to parse some unicode data correctly.")
@app.after_request
def add_security_headers(response):
response.headers['X-Frame-Options'] = 'DENY'
response.headers['X-Content-Type-Options'] = 'nosniff'
response.headers["Strict-Transport-Security"] = app.config.get("STRICT_TRANSPORT_SECURITY", "max-age=31536000;")
if re.match("^/ui/", request.path):
# This enables swagger-ui to dynamically fetch and
# load the swagger specification JSON file containing API definition and examples.
response.headers['X-Frame-Options'] = 'SAMEORIGIN'
else:
response.headers["Content-Security-Policy"] = \
app.config.get("CONTENT_SECURITY_POLICY", "default-src 'none'; frame-ancestors 'none'")
return response
Compress(app)
| 33.913978 | 116 | 0.737793 | [
"MPL-2.0"
] | catlee/balrog | auslib/web/admin/base.py | 3,154 | Python |
# -*- coding: utf-8 -*-
# FLEDGE_BEGIN
# See: http://fledge.readthedocs.io/
# FLEDGE_END
""" Test end to end flow with:
Notification service with
Threshold in-built rule plugin
notify-python35 delivery channel plugin
"""
import os
import time
import subprocess
import http.client
import json
from threading import Event
import urllib.parse
import pytest
__author__ = "Ashish Jabble"
__copyright__ = "Copyright (c) 2019 Dianomic Systems"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
SERVICE = "notification"
SERVICE_NAME = "NotificationServer #1"
NOTIFY_PLUGIN = "python35"
NOTIFY_INBUILT_RULES = ["Threshold"]
def _configure_and_start_service(service_branch, fledge_url, remove_directories):
try:
subprocess.run(["$FLEDGE_ROOT/tests/system/python/scripts/install_c_service {} {}"
.format(service_branch, SERVICE)], shell=True, check=True, stdout=subprocess.DEVNULL)
except subprocess.CalledProcessError:
assert False, "{} installation failed".format(SERVICE)
finally:
remove_directories("/tmp/fledge-service-{}".format(SERVICE))
# Start service
conn = http.client.HTTPConnection(fledge_url)
data = {"name": SERVICE_NAME,
"type": "notification",
"enabled": "true"
}
conn.request("POST", '/fledge/service', json.dumps(data))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert 2 == len(jdoc)
assert SERVICE_NAME == jdoc['name']
def _install_notify_plugin(notify_branch, plugin_name, remove_directories):
try:
subprocess.run(["$FLEDGE_ROOT/tests/system/python/scripts/install_c_plugin {} notify {}".format(
notify_branch, plugin_name)], shell=True, check=True, stdout=subprocess.DEVNULL)
except subprocess.CalledProcessError:
assert False, "{} installation failed".format(plugin_name)
finally:
remove_directories("/tmp/fledge-notify-{}".format(plugin_name))
def _get_result(fledge_url, path):
conn = http.client.HTTPConnection(fledge_url)
conn.request("GET", path)
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
return jdoc
def _verify_service(fledge_url, status):
jdoc = _get_result(fledge_url, '/fledge/service')
srvc = [s for s in jdoc['services'] if s['name'] == SERVICE_NAME]
assert 1 == len(srvc)
svc = srvc[0]
assert SERVICE.capitalize() == svc['type']
assert status == svc['status']
def _verify_audit_log_entry(fledge_url, path, name, severity='INFORMATION', count=1):
jdoc = _get_result(fledge_url, path)
assert len(jdoc['audit'])
assert count == jdoc['totalCount']
audit_detail = jdoc['audit'][0]
assert severity == audit_detail['severity']
assert name == audit_detail['details']['name']
def _add_notification_instance(fledge_url, payload):
conn = http.client.HTTPConnection(fledge_url)
conn.request("POST", '/fledge/notification', json.dumps(payload))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Notification {} created successfully".format(payload['name']) == jdoc['result']
def pause_for_x_seconds(x=1):
wait_e = Event()
wait_e.clear()
wait_e.wait(timeout=x)
class TestNotificationService:
def test_service(self, reset_and_start_fledge, service_branch, fledge_url, wait_time, retries, remove_directories):
_configure_and_start_service(service_branch, fledge_url, remove_directories)
retry_count = 0
# only 2 services is being up by default i.e core and storage
default_registry_count = 2
service_registry = default_registry_count
while service_registry != 3 and retry_count < retries:
svc = _get_result(fledge_url, '/fledge/service')
service_registry = svc['services']
retry_count += 1
pause_for_x_seconds(x=wait_time * 2)
if len(service_registry) == default_registry_count:
assert False, "Failed to start the {} service".format(SERVICE)
_verify_service(fledge_url, status='running')
_verify_audit_log_entry(fledge_url, '/fledge/audit?source=NTFST', name=SERVICE_NAME)
def test_get_default_notification_plugins(self, fledge_url, remove_directories):
remove_directories(os.environ['FLEDGE_ROOT'] + '/plugins/notificationDelivery')
remove_directories(os.environ['FLEDGE_ROOT'] + '/plugins/notificationRule')
remove_directories(os.environ['FLEDGE_ROOT'] + 'cmake_build/C/plugins/notificationDelivery')
remove_directories(os.environ['FLEDGE_ROOT'] + 'cmake_build/C/plugins/notificationRule')
jdoc = _get_result(fledge_url, '/fledge/notification/plugin')
assert [] == jdoc['delivery']
assert 1 == len(jdoc['rules'])
assert NOTIFY_INBUILT_RULES[0] == jdoc['rules'][0]['name']
class TestNotificationCRUD:
@pytest.mark.parametrize("data", [
{"name": "Test 1", "description": "Test 1 notification", "rule": NOTIFY_INBUILT_RULES[0],
"channel": NOTIFY_PLUGIN, "enabled": "false", "notification_type": "retriggered"},
{"name": "Test2", "description": "Test 2 notification", "rule": NOTIFY_INBUILT_RULES[0],
"channel": NOTIFY_PLUGIN, "enabled": "false", "notification_type": "toggled"},
{"name": "Test #3", "description": "Test 3 notification", "rule": NOTIFY_INBUILT_RULES[0],
"channel": NOTIFY_PLUGIN, "enabled": "false", "notification_type": "one shot"}
])
def test_create_notification_instances_with_default_rule_and_channel_python35(self, fledge_url, notify_branch,
data,
remove_directories):
if data['name'] == 'Test 1':
_install_notify_plugin(notify_branch, NOTIFY_PLUGIN, remove_directories)
_add_notification_instance(fledge_url, data)
def test_inbuilt_rule_plugin_and_notify_python35_delivery(self, fledge_url):
jdoc = _get_result(fledge_url, '/fledge/notification/plugin')
assert 1 == len(jdoc['delivery'])
assert NOTIFY_PLUGIN == jdoc['delivery'][0]['name']
assert 1 == len(jdoc['rules'])
assert NOTIFY_INBUILT_RULES[0] == jdoc['rules'][0]['name']
def test_get_notifications_and_audit_entry(self, fledge_url):
jdoc = _get_result(fledge_url, '/fledge/notification')
assert 3 == len(jdoc['notifications'])
# Test 1, Test2 and Test #3
jdoc = _get_result(fledge_url, '/fledge/audit?source=NTFAD')
assert 3 == jdoc['totalCount']
def test_update_notification(self, fledge_url, name="Test 1"):
conn = http.client.HTTPConnection(fledge_url)
data = {"notification_type": "toggled"}
conn.request("PUT", '/fledge/notification/{}'.format(urllib.parse.quote(name))
, json.dumps(data))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Notification {} updated successfully".format(name) == jdoc["result"]
# Verify updated notification info
jdoc = _get_result(fledge_url, '/fledge/notification/{}'.format(urllib.parse.quote(name)))
assert "toggled" == jdoc['notification']['notificationType']
def test_delete_notification(self, fledge_url, name="Test #3"):
conn = http.client.HTTPConnection(fledge_url)
conn.request("DELETE", '/fledge/notification/{}'.format(urllib.parse.quote(name)))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Notification {} deleted successfully.".format(name) == jdoc["result"]
# Verify only two notifications should exist NOT 3
jdoc = _get_result(fledge_url, '/fledge/notification')
notifications = jdoc['notifications']
assert 2 == len(notifications)
assert "Test 1" == notifications[0]['name']
assert "Test2" == notifications[1]['name']
jdoc = _get_result(fledge_url, '/fledge/audit?source=NTFDL')
assert 1 == jdoc['totalCount']
class TestSentAndReceiveNotification:
FOGBENCH_TEMPLATE = "fogbench-template.json"
SENSOR_VALUE = 20
SOUTH_PLUGIN_NAME = "coap"
ASSET_NAME = "{}".format(SOUTH_PLUGIN_NAME)
@pytest.fixture
def start_south(self, add_south, remove_data_file, remove_directories, south_branch, fledge_url):
""" This fixture clone a south repo and starts south instance
add_south: Fixture that starts any south service with given configuration
remove_data_file: Fixture that remove data file created during the tests
remove_directories: Fixture that remove directories created during the tests """
fogbench_template_path = self.prepare_template_reading_from_fogbench()
add_south(self.SOUTH_PLUGIN_NAME, south_branch, fledge_url, service_name=self.SOUTH_PLUGIN_NAME)
yield self.start_south
# Cleanup code that runs after the test is over
remove_data_file(fogbench_template_path)
remove_directories("/tmp/fledge-south-{}".format(self.SOUTH_PLUGIN_NAME))
def prepare_template_reading_from_fogbench(self):
""" Define the template file for fogbench readings """
fogbench_template_path = os.path.join(
os.path.expandvars('${FLEDGE_ROOT}'), 'data/{}'.format(self.FOGBENCH_TEMPLATE))
with open(fogbench_template_path, "w") as f:
f.write(
'[{"name": "%s", "sensor_values": '
'[{"name": "sensor", "type": "number", "min": %d, "max": %d, "precision": 0}]}]' % (
self.ASSET_NAME, self.SENSOR_VALUE, self.SENSOR_VALUE))
return fogbench_template_path
def ingest_readings_from_fogbench(self, fledge_url, wait_time):
pause_for_x_seconds(x=wait_time*3)
conn = http.client.HTTPConnection(fledge_url)
subprocess.run(["cd $FLEDGE_ROOT/extras/python; python3 -m fogbench -t ../../data/{}; cd -"
.format(self.FOGBENCH_TEMPLATE)], shell=True, check=True, stdout=subprocess.DEVNULL)
pause_for_x_seconds(x=wait_time)
conn.request("GET", '/fledge/asset')
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
val = json.loads(r)
assert 1 == len(val)
assert self.ASSET_NAME == val[0]["assetCode"]
assert 1 == val[0]["count"]
conn.request("GET", '/fledge/asset/{}'.format(self.ASSET_NAME))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
val = json.loads(r)
assert 1 == len(val)
assert {'sensor': self.SENSOR_VALUE} == val[0]["reading"]
def configure_rule_with_single_item_eval_type(self, fledge_url, cat_name):
conn = http.client.HTTPConnection(fledge_url)
data = {"asset": self.ASSET_NAME,
"datapoint": "sensor",
"evaluation_data": "Single Item",
"condition": ">",
"trigger_value": str(self.SENSOR_VALUE - 10),
}
conn.request("PUT", '/fledge/category/rule{}'.format(cat_name), json.dumps(data))
r = conn.getresponse()
assert 200 == r.status
def enable_notification(self, fledge_url, cat_name, is_enabled=True):
_enabled = "true" if is_enabled else "false"
data = {"value": _enabled}
conn = http.client.HTTPConnection(fledge_url)
conn.request("PUT", '/fledge/category/{}/enable'.format(cat_name), json.dumps(data))
r = conn.getresponse()
assert 200 == r.status
def test_sent_and_receive_notification(self, fledge_url, start_south, wait_time):
data = {"name": "Test4",
"description": "Test4_Notification",
"rule": NOTIFY_INBUILT_RULES[0],
"channel": NOTIFY_PLUGIN,
"enabled": True,
"notification_type": "one shot"
}
name = data['name']
_add_notification_instance(fledge_url, data)
self.configure_rule_with_single_item_eval_type(fledge_url, name)
# upload script NotifyPython35::configure() -> lowercase(categoryName) + _script_ + method_name + ".py"
cat_name = "delivery{}".format(name)
script_path = '$FLEDGE_ROOT/tests/system/python/data/notify35.py'
url = 'http://' + fledge_url + '/fledge/category/' + cat_name + '/script/upload'
upload_script = 'curl -F "script=@{}" {}'.format(script_path, url)
subprocess.run(upload_script, shell=True, check=True, stdout=subprocess.DEVNULL)
# enable notification delivery (it was getting disabled, as no script file was available)
self.enable_notification(fledge_url, "delivery" + name)
self.ingest_readings_from_fogbench(fledge_url, wait_time)
time.sleep(wait_time)
_verify_audit_log_entry(fledge_url, '/fledge/audit?source=NTFSN', name=name)
class TestStartStopNotificationService:
def test_shutdown_service_with_schedule_disable(self, fledge_url, disable_schedule, wait_time):
disable_schedule(fledge_url, SERVICE_NAME)
_verify_service(fledge_url, status='shutdown')
pause_for_x_seconds(x=wait_time)
# After shutdown there should be 1 entry for NTFSD (shutdown)
_verify_audit_log_entry(fledge_url, '/fledge/audit?source=NTFSD', name=SERVICE_NAME, count=1)
def test_restart_notification_service(self, fledge_url, enable_schedule, wait_time):
enable_schedule(fledge_url, SERVICE_NAME)
pause_for_x_seconds(x=wait_time)
_verify_service(fledge_url, status='running')
# After restart there should be 2 entries for NTFST (start)
_verify_audit_log_entry(fledge_url, '/fledge/audit?source=NTFST', name=SERVICE_NAME, count=2)
| 41.973214 | 119 | 0.657307 | [
"Apache-2.0"
] | YashTatkondawar/fledge | tests/system/python/e2e/test_e2e_notification_service_with_plugins.py | 14,103 | Python |
# coding: utf-8
# ... import symbolic tools
weak_formulation = load('pyccel.symbolic.gelato', 'weak_formulation', True, 2)
glt_function = load('pyccel.symbolic.gelato', 'glt_function', True, 3)
Grad = load('pyccel.symbolic.gelato', 'Grad', False, 1)
Curl = load('pyccel.symbolic.gelato', 'Curl', False, 1)
Div = load('pyccel.symbolic.gelato', 'Div', False, 1)
Rot = load('pyccel.symbolic.gelato', 'Rot', False, 1)
Cross = load('pyccel.symbolic.gelato', 'Cross', False, 2)
Dot = load('pyccel.symbolic.gelato', 'Dot', False, 2)
# ...
# ... Laplace
a1 = lambda x,y,v,u: Dot(Grad(u), Grad(v))
ga1 = glt_function(a1, [4, 4], [2, 2])
wa1 = weak_formulation(a1, 2)
print(' a1 := ', a1)
print(' glt symbol a1 := ', ga1)
print('wa1 := ', wa1)
print('')
# ...
# ...
a2 = lambda x,y,v,u: Rot(u) * Rot(v) + Div(u) * Div(v) + 0.2 * Dot(u, v)
ga2 = glt_function(a2, [4, 4], [2, 2])
wa2 = weak_formulation(a2, 2)
print(' a2 := ', a2)
print(' glt symbol a2 := ', ga2)
print('wa2 := ', wa2)
print('')
# ...
# ...
a3 = lambda x,y,v,u: Cross(Curl(u), Curl(v)) + 0.2 * u * v
ga3 = glt_function(a3, [4, 4], [2, 2])
wa3 = weak_formulation(a3, 2)
print(' a3 := ', a3)
print(' glt symbol a3 := ', ga3)
print('wa3 := ', wa3)
print('')
# ...
| 25.901961 | 78 | 0.549584 | [
"MIT"
] | toddrme2178/pyccel | src_old/tests/scripts/lambda/pdes/2d/ex10.py | 1,321 | Python |
from py12306.log.base import BaseLog
from py12306.helpers.func import *
@singleton
class OrderLog(BaseLog):
# 这里如果不声明,会出现重复打印,目前不知道什么原因
logs = []
thread_logs = {}
quick_log = []
MESSAGE_REQUEST_INIT_DC_PAGE_FAIL = '请求初始化订单页面失败'
MESSAGE_SUBMIT_ORDER_REQUEST_FAIL = '提交订单失败,错误原因 {} \n'
MESSAGE_SUBMIT_ORDER_REQUEST_SUCCESS = '提交订单成功'
MESSAGE_CHECK_ORDER_INFO_FAIL = '检查订单失败,错误原因 {} \n'
MESSAGE_CHECK_ORDER_INFO_SUCCESS = '检查订单成功'
MESSAGE_GET_QUEUE_INFO_SUCCESS = '获取排队信息成功,目前排队人数 {}, 余票还剩余 {} 张'
MESSAGE_GET_QUEUE_INFO_NO_SEAT = '接口返回实际为无票,跳过本次排队'
MESSAGE_GET_QUEUE_COUNT_SUCCESS = '排队成功,你当前排在第 {} 位, 余票还剩余 {} 张'
MESSAGE_GET_QUEUE_LESS_TICKET = '排队失败,目前排队人数已经超过余票张数'
MESSAGE_GET_QUEUE_COUNT_FAIL = '排队失败,错误原因 {}'
MESSAGE_CONFIRM_SINGLE_FOR_QUEUE_SUCCESS = '# 提交订单成功!#'
MESSAGE_CONFIRM_SINGLE_FOR_QUEUE_ERROR = '出票失败,错误原因 {}'
MESSAGE_CONFIRM_SINGLE_FOR_QUEUE_FAIL = '提交订单失败,错误原因 {}'
MESSAGE_QUERY_ORDER_WAIT_TIME_WAITING = '排队等待中,排队人数 {},预计还需要 {} 秒'
MESSAGE_QUERY_ORDER_WAIT_TIME_FAIL = '排队失败,错误原因 {}'
MESSAGE_QUERY_ORDER_WAIT_TIME_INFO = '第 {} 次排队,请耐心等待'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_TITLE = '车票购买成功!'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_CONTENT = '请及时登录12306账号[{}],打开 \'未完成订单\',在30分钟内完成支付!'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_INFO = '\t\t车次信息: {} {}[{}] -> {}[{}],乘车日期 {},席位:{},乘车人:{}'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_OF_VOICE_CODE_START_SEND = '正在发送语音通知...'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_OF_VOICE_CODE_CONTENT = '你的车票 {} 到 {} 购买成功,请登录 12306 进行支付'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_OF_EMAIL_CONTENT = '订单号 {},请及时登录12306账号[{}],打开 \'未完成订单\',在30分钟内完成支付!'
MESSAGE_JOB_CLOSED = '当前任务已结束'
@classmethod
def print_passenger_did_deleted(cls, passengers):
self = cls()
result = [passenger.get('name') + '(' + passenger.get('type_text') + ')' for passenger in passengers]
self.add_quick_log('# 删减后的乘客列表 {} #'.format(', '.join(result)))
self.flush()
return self
@classmethod
def print_ticket_did_ordered(cls, order_id):
self = cls()
self.add_quick_log('# 车票购买成功,订单号 {} #'.format(order_id))
self.flush()
return self
@classmethod
def get_order_success_notification_info(cls, query):
from py12306.query.job import Job
assert isinstance(query, Job)
passengers = [passenger.get(
'name') + '(' + passenger.get('type_text') + ')' for passenger in query.passengers]
return cls.MESSAGE_ORDER_SUCCESS_NOTIFICATION_INFO.format(query.get_info_of_train_number(),
query.get_info_of_left_station(),
query.get_info_of_train_left_time(),
query.get_info_of_arrive_station(),
query.get_info_of_train_arrive_time(),
query.get_info_of_left_date(),
query.current_seat_name,
','.join(passengers))
| 45.191781 | 109 | 0.614429 | [
"Apache-2.0"
] | 1182836912/py12306 | py12306/log/order_log.py | 3,989 | Python |
from setuptools import setup, find_packages
def read_requirements():
with open('requirements.txt') as req:
content = req.read()
requirements = content.split('\n')
return requirements
classifiers = [
'Topic :: Security',
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
'Intended Audience :: End Users/Desktop',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.7'
]
def Longdesc():
with open('README.md') as rm:
RM_desc = rm.read()
RM_desc = str(RM_desc)
with open('ChangeLog.md') as cl:
CL_desc = cl.read
CL_desc = str(CL_desc)
return f"{CL_desc}\n\n{RM_desc}"
setup(
name='skyhawk',
version='0.0.7',
description='Skyhawk is a CLI tool that can run on any device with a camera to recognize faces. It built with open-cv & python',
long_description=Longdesc(),
long_description_content_type="text/markdown",
url='https://github.com/devqueue/Skyhawk-cli.git',
author='Haziq Sayyed',
author_email='[email protected]',
license='Mozilla Public License 2.0',
classifiers=classifiers,
packages=find_packages(),
include_package_dat=True,
install_requires=read_requirements(),
python_requires='>=3.8',
entry_points={
'console_scripts': [
'skyhawk = skyhawk.cli:cli'
]
})
| 28.803922 | 132 | 0.641933 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | devqueue/Skyhawk-cli | setup.py | 1,469 | Python |
""" DarkWorldsMetricMountianOsteric.py
Custom evaluation metric for the 'Observing Dark Worlds' competition.
[Description of metric, or reference to documentation.]
Update: Made for the training set only so users can check there results from the training c
@Author: David Harvey
Created: 22 August 2012
"""
import numpy as np
import math as mt
import itertools as it
import csv as c
import getopt as gt
import sys as sys
import argparse as ap
import string as st
import random as rd
def calc_delta_r(x_predicted,y_predicted,x_true,y_true):
""" Compute the scalar distance between predicted halo centers
and the true halo centers. Predictions are matched to the closest
halo center.
Notes: It takes in the predicted and true positions, and then loops over each possile configuration and finds the most optimal one.
Arguments:
x_predicted, y_predicted: vector for predicted x- and y-positions (1 to 3 elements)
x_true, y_true: vector for known x- and y-positions (1 to 3 elements)
Returns:
radial_distance: vector containing the scalar distances between the predicted halo centres and the true halo centres (1 to 3 elements)
true_halo_idexes: vector containing indexes of the input true halos which matches the predicted halo indexes (1 to 3 elements)
measured_halo_indexes: vector containing indexes of the predicted halo position with the reference to the true halo position.
e.g if true_halo_indexes=[0,1] and measured_halo_indexes=[1,0] then the first x,y coordinates of the true halo position matches the second input of the predicted x,y coordinates.
"""
num_halos=len(x_true) #Only works for number of halso > 1
num_configurations=mt.factorial(num_halos) #The number of possible different comb
configurations=np.zeros([num_halos,num_configurations],int) #The array of combinations
#I will pass back
distances = np.zeros([num_configurations],float) #THe array of the distances
#for all possible combinations
radial_distance=[] #The vector of distances
#I will pass back
#Pick a combination of true and predicted
a=['01','012'] #Input for the permutatiosn, 01 number halos or 012
count=0 #For the index of the distances array
true_halo_indexes=[] #The tuples which will show the order of halos picked
predicted_halo_indexes=[]
distances_perm=np.zeros([num_configurations,num_halos],float) #The distance between eac
#true and predicted
#halo for every comb
true_halo_indexes_perm=[] #log of all the permutations of true halos used
predicted_halo_indexes_perm=[] #log of all the predicted permutations
for perm in it.permutations(a[num_halos-2],num_halos):
which_true_halos=[]
which_predicted_halos=[]
for j in xrange(num_halos): #loop through all the true halos with the
distances_perm[count,j]=np.sqrt((x_true[j]-x_predicted[int(perm[j])])**2\
+(y_true[j]-y_predicted[int(perm[j])])**2)
#This array logs the distance between true and
#predicted halo for ALL configruations
which_true_halos.append(j) #logthe order in which I try each true halo
which_predicted_halos.append(int(perm[j])) #log the order in which I true
#each predicted halo
true_halo_indexes_perm.append(which_true_halos) #this is a tuple of tuples of
#all of thifferent config
#true halo indexes
predicted_halo_indexes_perm.append(which_predicted_halos)
distances[count]=sum(distances_perm[count,0::]) #Find what the total distances
#are for each configuration
count=count+1
config = np.where(distances == min(distances))[0][0] #The configuration used is the one
#which has the smallest distance
radial_distance.append(distances_perm[config,0::]) #Find the tuple of distances that
#correspond to this smallest distance
true_halo_indexes=true_halo_indexes_perm[config] #Find the tuple of the index which refers
#to the smallest distance
predicted_halo_indexes=predicted_halo_indexes_perm[config]
return radial_distance,true_halo_indexes,predicted_halo_indexes
def calc_theta(x_predicted, y_predicted, x_true, y_true, x_ref, y_ref):
""" Calculate the angle the predicted position and the true position, where the zero degree corresponds to the line joing the true halo position and the reference point given.
Arguments:
x_predicted, y_predicted: vector for predicted x- and y-positions (1 to 3 elements)
x_true, y_true: vector for known x- and y-positions (1 to 3 elements)
Note that the input of these are matched up so that the first elements of each
vector are associated with one another
x_ref, y_ref: scalars of the x,y coordinate of reference point
Returns:
Theta: A vector containing the angles of the predicted halo w.r.t the true halo
with the vector joining the reference point and the halo as the zero line.
"""
num_halos=len(x_predicted)
theta=np.zeros([num_halos+1],float) #Set up the array which will pass back the values
phi = np.zeros([num_halos],float)
psi = np.arctan( (y_true-y_ref)/(x_true-x_ref) )
# Angle at which the halo is at
#with respect to the reference poitn
phi[x_true != x_ref] = np.arctan((y_predicted[x_true != x_predicted]-\
y_true[x_true != x_predicted])\
/(x_predicted[x_true != x_predicted]-\
x_true[x_true != x_predicted])) # Angle of the estimate
#wrt true halo centre
#Before finding the angle with the zero line as the line joiing the halo and the reference
#point I need to convert the angle produced by Python to an angle between 0 and 2pi
phi =convert_to_360(phi, x_predicted-x_true,\
y_predicted-y_true)
psi = convert_to_360(psi, x_true-x_ref,\
y_true-y_ref)
theta = phi-psi #The angle with the baseline as the line joing the ref and the halo
theta[theta< 0.0]=theta[theta< 0.0]+2.0*mt.pi #If the angle of the true pos wrt the ref is
#greater than the angle of predicted pos
#and the true pos then add 2pi
return theta
def convert_to_360(angle, x_in, y_in):
""" Convert the given angle to the true angle in the range 0:2pi
Arguments:
angle:
x_in, y_in: the x and y coordinates used to determine the quartile
the coordinate lies in so to add of pi or 2pi
Returns:
theta: the angle in the range 0:2pi
"""
n = len(x_in)
for i in xrange(n):
if x_in[i] < 0 and y_in[i] > 0:
angle[i] = angle[i]+mt.pi
elif x_in[i] < 0 and y_in[i] < 0:
angle[i] = angle[i]+mt.pi
elif x_in[i] > 0 and y_in[i] < 0:
angle[i] = angle[i]+2.0*mt.pi
elif x_in[i] == 0 and y_in[i] == 0:
angle[i] = 0
elif x_in[i] == 0 and y_in[i] > 0:
angle[i] = mt.pi/2.
elif x_in[i] < 0 and y_in[i] == 0:
angle[i] = mt.pi
elif x_in[i] == 0 and y_in[i] < 0:
angle[i] = 3.*mt.pi/2.
return angle
def get_ref(x_halo,y_halo,weight):
""" Gets the reference point of the system of halos by weighted averaging the x and y
coordinates.
Arguments:
x_halo, y_halo: Vector num_halos referrin to the coordinates of the halos
weight: the weight which will be assigned to the position of the halo
num_halos: number of halos in the system
Returns:
x_ref, y_ref: The coordinates of the reference point for the metric
"""
#Find the weighted average of the x and y coordinates
x_ref = np.sum([x_halo*weight])/np.sum([weight])
y_ref = np.sum([y_halo*weight])/np.sum([weight])
return x_ref,y_ref
def main_score( nhalo_all, x_true_all, y_true_all, x_ref_all, y_ref_all, sky_prediction):
"""abstracts the score from the old command-line interface.
sky_prediction is a dx2 array of predicted x,y positions
-camdp"""
r=np.array([],dtype=float) # The array which I will log all the calculated radial distances
angle=np.array([],dtype=float) #The array which I will log all the calculated angles
#Load in the sky_ids from the true
num_halos_total=0 #Keep track of how many halos are iput into the metric
for selectskyinsolutions, sky in enumerate(sky_prediction): #Loop through each line in result.csv and analyse each one
nhalo=int(nhalo_all[selectskyinsolutions])#How many halos in the
#selected sky?
x_true=x_true_all[selectskyinsolutions][0:nhalo]
y_true=y_true_all[selectskyinsolutions][0:nhalo]
x_predicted=np.array([],dtype=float)
y_predicted=np.array([],dtype=float)
for i in xrange(nhalo):
x_predicted=np.append(x_predicted,float(sky[0])) #get the predictd values
y_predicted=np.append(y_predicted,float(sky[1]))
#The solution file for the test data provides masses
#to calculate the centre of mass where as the Training_halo.csv
#direct provides x_ref y_ref. So in the case of test data
#we need to calculae the ref point from the masses using
#Get_ref()
x_ref=x_ref_all[selectskyinsolutions]
y_ref=y_ref_all[selectskyinsolutions]
num_halos_total=num_halos_total+nhalo
#Single halo case, this needs to be separately caluclated since
#x_ref = x_true
if nhalo == 1:
#What is the radial distance between the true and predicted position
r=np.append(r,np.sqrt( (x_predicted-x_true)**2 \
+ (y_predicted-y_true)**2))
#What is the angle between the predicted position and true halo position
if (x_predicted-x_true) != 0:
psi = np.arctan((y_predicted-y_true)/(x_predicted-x_true))
else: psi=0.
theta = convert_to_360([psi], [x_predicted-x_true], [y_predicted-y_true])
angle=np.append(angle,theta)
else:
#r_index_index, contains the radial distances of the predicted to
#true positions. These are found by matching up the true halos to
#the predicted halos such that the average of all the radial distances
#is optimal. it also contains indexes of the halos used which are used to
#show which halo has been mathced to which.
r_index_index = calc_delta_r(x_predicted, y_predicted, x_true, \
y_true)
r=np.append(r,r_index_index[0][0])
halo_index= r_index_index[1] #The true halos indexes matched with the
predicted_index=r_index_index[2] #predicted halo index
angle=np.append(angle,calc_theta\
(x_predicted[predicted_index],\
y_predicted[predicted_index],\
x_true[halo_index],\
y_true[halo_index],x_ref,\
y_ref)) # Find the angles of the predicted
#position wrt to the halo and
# add to the vector angle
# Find what the average distance the estimate is from the halo position
av_r=sum(r)/len(r)
#In order to quanitfy the orientation invariance we will express each angle
# as a vector and find the average vecor
#R_bar^2=(1/N Sum^Ncos(theta))^2+(1/N Sum^Nsin(theta))**2
N = float(num_halos_total)
angle_vec = np.sqrt(( 1.0/N * sum(np.cos(angle)) )**2 + \
( 1.0/N * sum(np.sin(angle)) )**2)
W1=1./1000. #Weight the av_r such that < 1 i a good score > 1 isnt so good.
W2=1.
metric = W1*av_r + W2*angle_vec #Weighted metric, weights TBD
print 'Your average distance in pixels you are away from the true halo is', av_r
print 'Your average angular vector is', angle_vec
print 'Your score for the training data is', metric
return metric
def main(user_fname, fname):
""" Script to compute the evaluation metric for the Observing Dark Worlds competition. You can run it on your training data to understand how well you have done with the training data.
"""
r=np.array([],dtype=float) # The array which I will log all the calculated radial distances
angle=np.array([],dtype=float) #The array which I will log all the calculated angles
#Load in the sky_ids from the true
true_sky_id=[]
sky_loader = c.reader(open(fname, 'rb')) #Load in the sky_ids from the solution file
for row in sky_loader:
true_sky_id.append(row[0])
#Load in the true values from the solution file
nhalo_all=np.loadtxt(fname,usecols=(1,),delimiter=',',skiprows=1)
x_true_all=np.loadtxt(fname,usecols=(4,6,8),delimiter=',',skiprows=1)
y_true_all=np.loadtxt(fname,usecols=(5,7,9),delimiter=',',skiprows=1)
x_ref_all=np.loadtxt(fname,usecols=(2,),delimiter=',',skiprows=1)
y_ref_all=np.loadtxt(fname,usecols=(3,),delimiter=',',skiprows=1)
for row in sky_loader:
true_sky_id.append(row[1])
num_halos_total=0 #Keep track of how many halos are iput into the metric
sky_prediction = c.reader(open(user_fname, 'rb')) #Open the result.csv
try: #See if the input file from user has a header on it
#with open('JoyceTest/trivialUnitTest_Pred.txt', 'r') as f:
with open(user_fname, 'r') as f:
header = float((f.readline()).split(',')[1]) #try and make where the
#first input would be
#a float, if succed its
#not a header
print 'THE INPUT FILE DOESNT APPEAR TO HAVE A HEADER'
except :
print 'THE INPUT FILE APPEARS TO HAVE A HEADER, SKIPPING THE FIRST LINE'
skip_header = sky_prediction.next()
for sky in sky_prediction: #Loop through each line in result.csv and analyse each one
sky_id = str(sky[0]) #Get the sky_id of the input
does_it_exist=true_sky_id.count(sky_id) #Is the input sky_id
#from user a real one?
if does_it_exist > 0: #If it does then find the matching solutions to the sky_id
selectskyinsolutions=true_sky_id.index(sky_id)-1
else: #Otherwise exit
print 'Sky_id does not exist, formatting problem: ',sky_id
sys.exit(2)
nhalo=int(nhalo_all[selectskyinsolutions])#How many halos in the
#selected sky?
x_true=x_true_all[selectskyinsolutions][0:nhalo]
y_true=y_true_all[selectskyinsolutions][0:nhalo]
x_predicted=np.array([],dtype=float)
y_predicted=np.array([],dtype=float)
for i in xrange(nhalo):
x_predicted=np.append(x_predicted,float(sky[2*i+1])) #get the predictd values
y_predicted=np.append(y_predicted,float(sky[2*i+2]))
#The solution file for the test data provides masses
#to calculate the centre of mass where as the Training_halo.csv
#direct provides x_ref y_ref. So in the case of test data
#we need to calculae the ref point from the masses using
#Get_ref()
x_ref=x_ref_all[selectskyinsolutions]
y_ref=y_ref_all[selectskyinsolutions]
num_halos_total=num_halos_total+nhalo
#Single halo case, this needs to be separately caluclated since
#x_ref = x_true
if nhalo == 1:
#What is the radial distance between the true and predicted position
r=np.append(r,np.sqrt( (x_predicted-x_true)**2 \
+ (y_predicted-y_true)**2))
#What is the angle between the predicted position and true halo position
if (x_predicted-x_true) != 0:
psi = np.arctan((y_predicted-y_true)/(x_predicted-x_true))
else: psi=0.
theta = convert_to_360([psi], [x_predicted-x_true], [y_predicted-y_true])
angle=np.append(angle,theta)
else:
#r_index_index, contains the radial distances of the predicted to
#true positions. These are found by matching up the true halos to
#the predicted halos such that the average of all the radial distances
#is optimal. it also contains indexes of the halos used which are used to
#show which halo has been mathced to which.
r_index_index = calc_delta_r(x_predicted, y_predicted, x_true, \
y_true)
r=np.append(r,r_index_index[0][0])
halo_index= r_index_index[1] #The true halos indexes matched with the
predicted_index=r_index_index[2] #predicted halo index
angle=np.append(angle,calc_theta\
(x_predicted[predicted_index],\
y_predicted[predicted_index],\
x_true[halo_index],\
y_true[halo_index],x_ref,\
y_ref)) # Find the angles of the predicted
#position wrt to the halo and
# add to the vector angle
# Find what the average distance the estimate is from the halo position
av_r=sum(r)/len(r)
#In order to quanitfy the orientation invariance we will express each angle
# as a vector and find the average vecor
#R_bar^2=(1/N Sum^Ncos(theta))^2+(1/N Sum^Nsin(theta))**2
N = float(num_halos_total)
angle_vec = np.sqrt(( 1.0/N * sum(np.cos(angle)) )**2 + \
( 1.0/N * sum(np.sin(angle)) )**2)
W1=1./1000. #Weight the av_r such that < 1 i a good score > 1 isnt so good.
W2=1.
metric = W1*av_r + W2*angle_vec #Weighted metric, weights TBD
print 'Your average distance in pixels you are away from the true halo is', av_r
print 'Your average angular vector is', angle_vec
print 'Your score for the training data is', metric
if __name__ == "__main__":
#For help just typed 'python DarkWorldsMetric.py -h'
parser = ap.ArgumentParser(description='Work out the Metric for your input file')
parser.add_argument('inputfile',type=str,nargs=1,help='Input file of halo positions. Needs to be in the format SkyId,halo_x1,haloy1,halox_2,halo_y2,halox3,halo_y3 ')
parser.add_argument('reffile',type=str,nargs=1,help='This should point to Training_halos.csv')
args = parser.parse_args()
user_fname=args.inputfile[0]
filename = (args.reffile[0]).count('Training_halos.csv')
if filename == 0:
fname=args.reffile[0]+str('Training_halos.csv')
else:
fname=args.reffile[0]
main(user_fname, fname)
| 46.896313 | 188 | 0.604284 | [
"MIT"
] | 19shanu91/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers | Chapter5_LossFunctions/DarkWorldsMetric.py | 20,353 | Python |
#!/usr/bin/env python3
# methodological_experiment.py
import sys, os, csv
import numpy as np
import pandas as pd
import versatiletrainer2
import metaselector
import matplotlib.pyplot as plt
from scipy import stats
def first_experiment():
sourcefolder = '../data/'
metadatapath = '../metadata/mastermetadata.csv'
vocabpath = '../modeloutput/experimentalvocab.txt'
tags4positive = {'fantasy_loc', 'fantasy_oclc'}
tags4negative = {'sf_loc', 'sf_oclc'}
sizecap = 200
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = versatiletrainer2.get_simple_data(sourcefolder, metadatapath, vocabpath, tags4positive, tags4negative, sizecap)
c_range = [.004, .012, 0.3, 0.8, 2]
featurestart = 3000
featureend = 4400
featurestep = 100
modelparams = 'logistic', 10, featurestart, featureend, featurestep, c_range
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, 'first_experiment', '../modeloutput/first_experiment.csv')
plt.rcParams["figure.figsize"] = [9.0, 6.0]
plt.matshow(matrix, origin = 'lower', cmap = plt.cm.YlOrRd)
plt.show()
def get_ratio_data(vocabpath, sizecap, ratio, tags4positive, tags4negative, excludebelow = 0, excludeabove = 3000):
''' Loads metadata, selects instances for the positive
and negative classes (using a ratio to dilute the positive
class with negative instances), creates a lexicon if one doesn't
already exist, and creates a pandas dataframe storing
texts as rows and words/features as columns. A refactored
and simplified version of get_data_for_model().
'''
holdout_authors = True
freqs_already_normalized = True
verbose = False
datecols = ['firstpub']
indexcol = ['docid']
extension = '.tsv'
genrecol = 'tags'
numfeatures = 8000
sourcefolder = '../data/'
metadatapath = '../metadata/mastermetadata.csv'
# Get a list of files.
allthefiles = os.listdir(sourcefolder)
volumeIDsinfolder = list()
volumepaths = list()
numchars2trim = len(extension)
for filename in allthefiles:
if filename.endswith(extension):
volID = filename[0 : -numchars2trim]
# The volume ID is basically the filename minus its extension.
volumeIDsinfolder.append(volID)
metadata = metaselector.load_metadata(metadatapath, volumeIDsinfolder, excludebelow, excludeabove, indexcol = indexcol, datecols = datecols, genrecol = genrecol)
# That function returns a pandas dataframe which is guaranteed to be indexed by indexcol,
# and to contain a numeric column 'std_date' as well as a column 'tagset' which contains
# sets of genre tags for each row. It has also been filtered so it only contains volumes
# in the folder, and none whose date is below excludebelow or above excludeabove.
orderedIDs, classdictionary = metaselector.dilute_positive_class(metadata, sizecap, tags4positive, tags4negative, ratio)
metadata = metadata.loc[orderedIDs]
# Limits the metadata data frame to rows we are actually using
# (those selected in select_instances).
# We now create an ordered list of id-path tuples.
volspresent = [(x, sourcefolder + x + extension) for x in orderedIDs]
print(len(volspresent))
print('Building vocabulary.')
vocablist = versatiletrainer2.get_vocablist(vocabpath, volspresent, n = numfeatures)
numfeatures = len(vocablist)
print()
print("Number of features: " + str(numfeatures))
# For each volume, we're going to create a list of volumes that should be
# excluded from the training set when it is to be predicted. More precisely,
# we're going to create a list of their *indexes*, so that we can easily
# remove rows from the training matrix.
authormatches = [ [] for x in orderedIDs]
# Now we proceed to enlarge that list by identifying, for each volume,
# a set of indexes that have the same author. Obvs, there will always be at least one.
# We exclude a vol from it's own training set.
if holdout_authors:
for idx1, anid in enumerate(orderedIDs):
thisauthor = metadata.loc[anid, 'author']
authormatches[idx1] = list(np.flatnonzero(metadata['author'] == thisauthor))
for alist in authormatches:
alist.sort(reverse = True)
print()
print('Authors matched.')
print()
# I am reversing the order of indexes so that I can delete them from
# back to front, without changing indexes yet to be deleted.
# This will become important in the modelingprocess module.
masterdata, classvector = versatiletrainer2.get_dataframe(volspresent, classdictionary, vocablist, freqs_already_normalized)
return metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist
def vary_sf_ratio_against_random():
if not os.path.isfile('../measuredivergence/modeldata.tsv'):
with open('../measuredivergence/modeldata.tsv', mode = 'w', encoding = 'utf-8') as f:
outline = 'name\tsize\tratio\taccuracy\tfeatures\tregularization\n'
f.write(outline)
size = 80
for iteration in [5, 6, 7]:
ceiling = 105
if iteration == 7:
ceiling = 5
for pct in range(0, ceiling, 5):
ratio = pct / 100
name = 'iter' + str(iteration) + '_size' + str(size) + '_ratio' + str(pct)
vocabpath = '../measuredivergence/vocabularies/' + name + '.txt'
tags4positive = {'sf_loc', 'sf_oclc'}
tags4negative = {'random'}
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = get_ratio_data(vocabpath, size, ratio, tags4positive, tags4negative, excludebelow = 0, excludeabove = 3000)
c_range = [.00005, .0003, .001, .004, .012, 0.2, 0.8]
featurestart = 1000
featureend = 6000
featurestep = 300
modelparams = 'logistic', 16, featurestart, featureend, featurestep, c_range
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/modeloutput/' + name + '.csv', write_fullmodel = False)
# It's important not to write fullmodel if you want the csvs
# to accurately reflect terrible accuracy on diluted datasets.
# write_fullmodel = False forces crossvalidation.
with open('../measuredivergence/modeldata.tsv', mode = 'a', encoding = 'utf-8') as f:
outline = name + '\t' + str(size) + '\t' + str(ratio) + '\t' + str(maxaccuracy) + '\t' + str(features4max) + '\t' + str(best_regularization_coef) + '\n'
f.write(outline)
def vary_fantasy_ratio_against_sf():
if not os.path.isfile('../measuredivergence/modeldata.tsv'):
with open('../measuredivergence/modeldata.tsv', mode = 'w', encoding = 'utf-8') as f:
outline = 'name\tsize\tratio\taccuracy\tfeatures\tregularization\n'
f.write(outline)
size = 80
for iteration in [8, 9, 10]:
ceiling = 105
if iteration == 10:
ceiling = 5
for pct in range(0, ceiling, 5):
ratio = pct / 100
name = 'iter' + str(iteration) + '_size' + str(size) + '_ratio' + str(pct)
vocabpath = '../measuredivergence/vocabularies/' + name + '.txt'
tags4positive = {'fantasy_loc', 'fantasy_oclc'}
tags4negative = {'sf_loc', 'sf_oclc'}
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = get_ratio_data(vocabpath, size, ratio, tags4positive, tags4negative, excludebelow = 0, excludeabove = 3000)
c_range = [.00005, .0003, .001, .004, .012, 0.2, 0.8, 3]
featurestart = 2000
featureend = 7500
featurestep = 400
modelparams = 'logistic', 16, featurestart, featureend, featurestep, c_range
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/modeloutput/' + name + '.csv', write_fullmodel = False)
# write_fullmodel = False forces crossvalidation.
with open('../measuredivergence/modeldata.tsv', mode = 'a', encoding = 'utf-8') as f:
outline = name + '\t' + str(size) + '\t' + str(ratio) + '\t' + str(maxaccuracy) + '\t' + str(features4max) + '\t' + str(best_regularization_coef) + '\n'
f.write(outline)
def vary_fantasy_ratio_against_random():
if not os.path.isfile('../measuredivergence/modeldata.tsv'):
with open('../measuredivergence/modeldata.tsv', mode = 'w', encoding = 'utf-8') as f:
outline = 'name\tsize\tratio\taccuracy\tfeatures\tregularization\n'
f.write(outline)
size = 80
for iteration in [11, 12, 13]:
ceiling = 105
if iteration == 13:
ceiling = 5
for pct in range(0, ceiling, 5):
ratio = pct / 100
name = 'iter' + str(iteration) + '_size' + str(size) + '_ratio' + str(pct)
vocabpath = '../measuredivergence/vocabularies/' + name + '.txt'
tags4positive = {'fantasy_loc', 'fantasy_oclc'}
tags4negative = {'random'}
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = get_ratio_data(vocabpath, size, ratio, tags4positive, tags4negative, excludebelow = 0, excludeabove = 3000)
c_range = [.00005, .0003, .001, .004, .012, 0.2, 0.8, 3]
featurestart = 1600
featureend = 6400
featurestep = 400
modelparams = 'logistic', 16, featurestart, featureend, featurestep, c_range
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/modeloutput/' + name + '.csv', write_fullmodel = False)
# write_fullmodel = False forces crossvalidation.
with open('../measuredivergence/modeldata.tsv', mode = 'a', encoding = 'utf-8') as f:
outline = name + '\t' + str(size) + '\t' + str(ratio) + '\t' + str(maxaccuracy) + '\t' + str(features4max) + '\t' + str(best_regularization_coef) + '\n'
f.write(outline)
def accuracy(df, column):
totalcount = len(df.realclass)
tp = sum((df.realclass > 0.5) & (df[column] > 0.5))
tn = sum((df.realclass <= 0.5) & (df[column] <= 0.5))
fp = sum((df.realclass <= 0.5) & (df[column] > 0.5))
fn = sum((df.realclass > 0.5) & (df[column] <= 0.5))
assert totalcount == (tp + fp + tn + fn)
return (tp + tn) / totalcount
def accuracy_loss(df):
return accuracy(df, 'probability') - accuracy(df, 'alien_model')
def kldivergence(p, q):
"""Kullback-Leibler divergence D(P || Q) for discrete distributions
Parameters
----------
p, q : array-like, dtype=float, shape=n
Discrete probability distributions.
"""
p = np.asarray(p, dtype=np.float)
q = np.asarray(q, dtype=np.float)
return np.sum(np.where(p != 0, p * np.log(p / q), 0))
def averagecorr(r1, r2):
z1 = np.arctanh(r1)
z2 = np.arctanh(r2)
themean = (z1 + z2) / 2
return np.tanh(themean)
def get_divergences(gold, testname, itera, size, pct):
'''
This function gets several possible measures of divergence
between two models.
'''
# We start by constructing the paths to the gold
# standard model criteria (.pkl) and
# model output (.csv) on the examples
# originally used to train it.
# We're going to try applying the gold standard
# criteria to another model's output, and vice-
# versa.
model1 = '../measuredivergence/modeloutput/' + gold + '.pkl'
meta1 = '../measuredivergence/modeloutput/' + gold + '.csv'
# Now we construct paths to the test model
# criteria (.pkl) and output (.csv).
testpath = '../measuredivergence/modeloutput/' + testname
model2 = testpath + '.pkl'
meta2 = testpath + '.csv'
model1on2 = versatiletrainer2.apply_pickled_model(model1, '../data/', '.tsv', meta2)
model2on1 = versatiletrainer2.apply_pickled_model(model2, '../data/', '.tsv', meta1)
pearson1on2 = stats.pearsonr(model1on2.probability, model1on2.alien_model)[0]
pearson2on1 = stats.pearsonr(model2on1.probability, model2on1.alien_model)[0]
pearson = averagecorr(pearson1on2, pearson2on1)
spearman1on2 = stats.spearmanr(model1on2.probability, model1on2.alien_model)[0]
spearman2on1 = stats.spearmanr(model2on1.probability, model2on1.alien_model)[0]
spearman = averagecorr(spearman1on2, spearman2on1)
loss1on2 = accuracy_loss(model1on2)
loss2on1 = accuracy_loss(model2on1)
loss = (loss1on2 + loss2on1) / 2
kl1on2 = kldivergence(model1on2.probability, model1on2.alien_model)
kl2on1 = kldivergence(model2on1.probability, model2on1.alien_model)
kl = (kl1on2 + kl2on1) / 2
return pearson, spearman, loss, kl, spearman1on2, spearman2on1, loss1on2, loss2on1
def measure_sf_divergences():
columns = ['name1', 'name2', 'size', 'acc1', 'acc2', 'ratiodiff', 'pearson', 'spearman', 'spear1on2', 'spear2on1', 'loss', 'loss1on2', 'loss2on1', 'kl']
if not os.path.isfile('../measuredivergence/sf_divergences.tsv'):
with open('../measuredivergence/sf_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writeheader()
goldstandards = ['iter5_size80_ratio0', 'iter6_size80_ratio0', 'iter7_size80_ratio0']
size = 80
modeldata = pd.read_csv('../measuredivergence/modeldata.tsv', sep = '\t', index_col = 'name')
for gold in goldstandards:
for itera in [5, 6]:
for pct in range(0, 105, 5):
ratio = pct / 100
testname = 'iter' + str(itera) + '_size' + str(size) + '_ratio' + str(pct)
if testname == gold:
continue
# we don't test a model against itself
else:
row = dict()
row['pearson'], row['spearman'], row['loss'], row['kl'], row['spear1on2'], row['spear2on1'], row['loss1on2'], row['loss2on1'] = get_divergences(gold, testname, itera, size, pct)
row['name1'] = gold
row['name2'] = testname
row['size'] = size
row['acc1'] = modeldata.loc[gold, 'accuracy']
row['acc2'] = modeldata.loc[testname, 'accuracy']
row['ratiodiff'] = ratio
with open('../measuredivergence/sf_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writerow(row)
def measure_fsf_divergences():
columns = ['name1', 'name2', 'size', 'acc1', 'acc2', 'ratiodiff', 'pearson', 'spearman', 'spear1on2', 'spear2on1', 'loss', 'loss1on2', 'loss2on1', 'kl']
if not os.path.isfile('../measuredivergence/fsf_divergences.tsv'):
with open('../measuredivergence/fsf_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writeheader()
goldstandards = ['iter8_size80_ratio0', 'iter9_size80_ratio0', 'iter10_size80_ratio0']
size = 80
modeldata = pd.read_csv('../measuredivergence/modeldata.tsv', sep = '\t', index_col = 'name')
for gold in goldstandards:
for itera in [8, 9]:
for pct in range(0, 105, 5):
ratio = pct / 100
testname = 'iter' + str(itera) + '_size' + str(size) + '_ratio' + str(pct)
if testname == gold:
continue
# we don't test a model against itself
else:
row = dict()
row['pearson'], row['spearman'], row['loss'], row['kl'], row['spear1on2'], row['spear2on1'], row['loss1on2'], row['loss2on1'] = get_divergences(gold, testname, itera, size, pct)
row['name1'] = gold
row['name2'] = testname
row['size'] = size
row['acc1'] = modeldata.loc[gold, 'accuracy']
row['acc2'] = modeldata.loc[testname, 'accuracy']
row['ratiodiff'] = ratio
with open('../measuredivergence/fsf_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writerow(row)
def measure_fantasy_divergences():
columns = ['name1', 'name2', 'size', 'acc1', 'acc2', 'ratiodiff', 'pearson', 'spearman', 'spear1on2', 'spear2on1', 'loss', 'loss1on2', 'loss2on1', 'kl']
if not os.path.isfile('../measuredivergence/fantasy_divergences.tsv'):
with open('../measuredivergence/fantasy_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writeheader()
goldstandards = ['iter11_size80_ratio0', 'iter12_size80_ratio0', 'iter13_size80_ratio0']
size = 80
modeldata = pd.read_csv('../measuredivergence/modeldata.tsv', sep = '\t', index_col = 'name')
for gold in goldstandards:
for itera in [11, 12]:
for pct in range(0, 105, 5):
ratio = pct / 100
testname = 'iter' + str(itera) + '_size' + str(size) + '_ratio' + str(pct)
if testname == gold:
continue
# we don't test a model against itself
else:
row = dict()
row['pearson'], row['spearman'], row['loss'], row['kl'], row['spear1on2'], row['spear2on1'], row['loss1on2'], row['loss2on1'] = get_divergences(gold, testname, itera, size, pct)
row['name1'] = gold
row['name2'] = testname
row['size'] = size
row['acc1'] = modeldata.loc[gold, 'accuracy']
row['acc2'] = modeldata.loc[testname, 'accuracy']
row['ratiodiff'] = ratio
with open('../measuredivergence/fantasy_divergences.tsv', mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writerow(row)
def new_experiment():
# The first time I ran this, I used partition 2 to build the
# mixed data, and partition 1 as a gold standard. Now reversing.
outmodelpath = '../measuredivergence/results/newexperimentmodels.csv'
columns = ['name', 'size', 'ratio', 'iteration', 'meandate', 'maxaccuracy', 'features', 'regularization']
if not os.path.isfile(outmodelpath):
with open(outmodelpath, mode = 'w', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, fieldnames = columns)
scribe.writeheader()
c_range = [.00001, .0001, .001, .01, 0.1, 1, 10, 100]
featurestart = 1500
featureend = 6000
featurestep = 300
modelparams = 'logistic', 10, featurestart, featureend, featurestep, c_range
sizecap = 75
for i in range(3, 6):
for ratio in [0, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 100]:
sourcefolder = '../measuredivergence/mix/' + str(ratio) + '/'
metadatapath = '../measuredivergence/partitionmeta/meta' + str(ratio) + '.csv'
name = 'mixeddata_' + str(i) + '_' + str(ratio)
vocabpath = '../lexica/' + name + '.txt'
tags4positive = {'fantasy', 'detective'}
tags4negative = {'random'}
floor = 1800
ceiling = 1930
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = versatiletrainer2.get_simple_data(sourcefolder, metadatapath, vocabpath, tags4positive, tags4negative, sizecap, excludebelow = floor, excludeabove = ceiling, force_even_distribution = False, numfeatures = 6000)
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/newmodeloutput/' + name + '.csv')
meandate = int(round(np.sum(metadata.firstpub) / len(metadata.firstpub)))
row = dict()
row['name'] = name
row['size'] = sizecap
row['ratio'] = ratio
row['iteration'] = i
row['meandate'] = meandate
row['maxaccuracy'] = maxaccuracy
row['features'] = features4max
row['regularization'] = best_regularization_coef
with open(outmodelpath, mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, fieldnames = columns)
scribe.writerow(row)
os.remove(vocabpath)
sourcefolder = '../data/'
metadatapath = '../measuredivergence/partitionmeta/part2.csv'
# note that this is changed if you create mix data with
# partition 2
name = 'goldfantasy_' + str(i)
vocabpath = '../lexica/' + name + '.txt'
tags4positive = {'fantasy'}
tags4negative = {'random', 'randomB'}
floor = 1800
ceiling = 1930
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = versatiletrainer2.get_simple_data(sourcefolder, metadatapath, vocabpath, tags4positive, tags4negative, sizecap, excludebelow = floor, excludeabove = ceiling, force_even_distribution = False, numfeatures = 6000)
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/newmodeloutput/' + name + '.csv')
meandate = int(round(np.sum(metadata.firstpub) / len(metadata.firstpub)))
row = dict()
row['name'] = name
row['size'] = sizecap
row['ratio'] = ratio
row['iteration'] = i
row['meandate'] = meandate
row['maxaccuracy'] = maxaccuracy
row['features'] = features4max
row['regularization'] = best_regularization_coef
with open(outmodelpath, mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, fieldnames = columns)
scribe.writerow(row)
os.remove(vocabpath)
sourcefolder = '../data/'
metadatapath = '../measuredivergence/partitionmeta/part2.csv'
# depending on which partition you used to create mix data;
# this will be the other one
name = 'golddetective_' + str(i)
vocabpath = '../lexica/' + name + '.txt'
tags4positive = {'detective'}
tags4negative = {'random', 'randomB'}
floor = 1800
ceiling = 1930
metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist = versatiletrainer2.get_simple_data(sourcefolder, metadatapath, vocabpath, tags4positive, tags4negative, sizecap, excludebelow = floor, excludeabove = ceiling, force_even_distribution = False, numfeatures = 6000)
matrix, maxaccuracy, metadata, coefficientuples, features4max, best_regularization_coef = versatiletrainer2.tune_a_model(metadata, masterdata, classvector, classdictionary, orderedIDs, authormatches, vocablist, tags4positive, tags4negative, modelparams, name, '../measuredivergence/newmodeloutput/' + name + '.csv')
meandate = int(round(np.sum(metadata.firstpub) / len(metadata.firstpub)))
row = dict()
row['name'] = name
row['size'] = sizecap
row['ratio'] = ratio
row['iteration'] = i
row['meandate'] = meandate
row['maxaccuracy'] = maxaccuracy
row['features'] = features4max
row['regularization'] = best_regularization_coef
with open(outmodelpath, mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, fieldnames = columns)
scribe.writerow(row)
os.remove(vocabpath)
def accuracy(df, column):
totalcount = len(df.realclass)
tp = sum((df.realclass > 0.5) & (df[column] > 0.5))
tn = sum((df.realclass <= 0.5) & (df[column] <= 0.5))
fp = sum((df.realclass <= 0.5) & (df[column] > 0.5))
fn = sum((df.realclass > 0.5) & (df[column] <= 0.5))
assert totalcount == (tp + fp + tn + fn)
return (tp + tn) / totalcount
def accuracy_loss(df):
return accuracy(df, 'probability') - accuracy(df, 'alien_model')
def get_divergence(sampleA, sampleB, twodatafolder = '../data/', onedatafolder = '../data/'):
'''
This function applies model a to b, and vice versa, and returns
a couple of measures of divergence: notably lost accuracy and
z-tranformed spearman correlation.
'''
# We start by constructing the paths to the sampleA
# standard model criteria (.pkl) and
# model output (.csv) on the examples
# originally used to train it.
# We're going to try applying the sampleA standard
# criteria to another model's output, and vice-
# versa.
model1 = '../measuredivergence/newmodeloutput/' + sampleA + '.pkl'
meta1 = '../measuredivergence/newmodeloutput/' + sampleA + '.csv'
# Now we construct paths to the test model
# criteria (.pkl) and output (.csv).
model2 = '../measuredivergence/newmodeloutput/' + sampleB + '.pkl'
meta2 = '../measuredivergence/newmodeloutput/' + sampleB + '.csv'
model1on2 = versatiletrainer2.apply_pickled_model(model1, twodatafolder, '.tsv', meta2)
model2on1 = versatiletrainer2.apply_pickled_model(model2, onedatafolder, '.tsv', meta1)
spearman1on2 = np.arctanh(stats.spearmanr(model1on2.probability, model1on2.alien_model)[0])
spearman2on1 = np.arctanh(stats.spearmanr(model2on1.probability, model2on1.alien_model)[0])
spearman = (spearman1on2 + spearman2on1) / 2
loss1on2 = accuracy_loss(model1on2)
loss2on1 = accuracy_loss(model2on1)
loss = (loss1on2 + loss2on1) / 2
alienacc2 = accuracy(model1on2, 'alien_model')
alienacc1 = accuracy(model2on1, 'alien_model')
acc2 = accuracy(model1on2, 'probability')
acc1 = accuracy(model2on1, 'probability')
meandate2 = np.mean(model1on2.std_date)
meandate1 = np.mean(model2on1.std_date)
return spearman, loss, spearman1on2, spearman2on1, loss1on2, loss2on1, acc1, acc2, alienacc1, alienacc2, meandate1, meandate2
def write_a_row(r, outfile, columns):
with open(outfile, mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, fieldnames = columns, delimiter = '\t')
scribe.writerow(r)
def new_divergences():
outcomparisons = '../measuredivergence/results/new_comparisons.tsv'
columns = ['testype', 'name1', 'name2', 'ratio', 'spearman', 'spear1on2', 'spear2on1', 'loss', 'loss1on2', 'loss2on1', 'acc1', 'acc2', 'alienacc1', 'alienacc2', 'meandate1', 'meandate2']
if not os.path.isfile(outcomparisons):
with open(outcomparisons, mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writeheader()
# I originally ran this with i and j
# iterating through range(3). Now trying
# on models generated with the partitions
# reversed.
for i in range(3, 6):
for j in range(3, 6):
for ratio in [0, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 100]:
r = dict()
r['testype'] = 'fantasy2mixed'
r['name1'] = 'goldfantasy_' + str(i)
r['name2'] = 'mixeddata_' + str(j) + '_' + str(ratio)
r['spearman'], r['loss'], r['spear1on2'], r['spear2on1'], r['loss1on2'], r['loss2on1'], r['acc1'], r['acc2'], r['alienacc1'], r['alienacc2'], r['meandate1'], r['meandate2'] = get_divergence(r['name1'], r['name2'], twodatafolder = '../measuredivergence/mix/' + str(ratio) + '/')
r['ratio'] = ratio
write_a_row(r, outcomparisons, columns)
r = dict()
r['testype'] = 'detective2mixed'
r['name1'] = 'golddetective_' + str(i)
r['name2'] = 'mixeddata_' + str(j) + '_' + str(ratio)
r['spearman'], r['loss'], r['spear1on2'], r['spear2on1'], r['loss1on2'], r['loss2on1'], r['acc1'], r['acc2'], r['alienacc1'], r['alienacc2'], r['meandate1'], r['meandate2'] = get_divergence(r['name1'], r['name2'], twodatafolder = '../measuredivergence/mix/' + str(ratio) + '/')
r['ratio'] = 100 - ratio
# note that distance from detective is the complement
# of distance from fantasy
write_a_row(r, outcomparisons, columns)
def new_self_comparisons ():
outcomparisons = '../measuredivergence/results/self_comparisons.tsv'
columns = ['testype', 'name1', 'name2', 'ratio', 'spearman', 'spear1on2', 'spear2on1', 'loss', 'loss1on2', 'loss2on1', 'acc1', 'acc2', 'alienacc1', 'alienacc2', 'meandate1', 'meandate2']
if not os.path.isfile(outcomparisons):
with open(outcomparisons, mode = 'a', encoding = 'utf-8') as f:
scribe = csv.DictWriter(f, delimiter = '\t', fieldnames = columns)
scribe.writeheader()
for i in range(0, 3):
for j in range(3, 6):
for ratio in [0, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 100]:
r = dict()
r['testype'] = 'selfmixed'
r['name1'] = 'mixeddata_' + str(i) + '_' + str(ratio)
r['name2'] = 'mixeddata_' + str(j) + '_' + str(ratio)
r['spearman'], r['loss'], r['spear1on2'], r['spear2on1'], r['loss1on2'], r['loss2on1'], r['acc1'], r['acc2'], r['alienacc1'], r['alienacc2'], r['meandate1'], r['meandate2'] = get_divergence(r['name1'], r['name2'], twodatafolder = '../measuredivergence/mix/' + str(ratio) + '/', onedatafolder = '../measuredivergence/altmix/' + str(ratio) + '/')
r['ratio'] = ratio
write_a_row(r, outcomparisons, columns)
new_self_comparisons()
| 44.895954 | 360 | 0.629168 | [
"MIT"
] | tedunderwood/fiction | variation/methodological_experiment.py | 31,068 | Python |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for random_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lingvo.core import test_utils
from lingvo.core.ops import py_x_ops
from six.moves import range
import tensorflow as tf
FLAGS = tf.flags.FLAGS
class RandomOpsTest(test_utils.TestCase):
def testRandomPermutationSequenceRepeat(self):
with self.session() as sess:
out = py_x_ops.random_permutation_sequence(num=20, batch=7, repeat=True)
remaining = list(range(20))
for _ in range(10):
# Each epoch takes exactly 3 steps.
vals = sess.run(out).tolist() + sess.run(out).tolist() + sess.run(
out).tolist()
self.assertEqual(len(vals), 21)
# Contains all the remaining values from previous epoch.
for x in remaining:
vals.remove(x) # Raises exception if x is not in vals.
# Remaining items have no duplicates.
self.assertEqual(len(vals), len(set(vals)))
remaining = list(set(range(20)) - set(vals))
def testRandomPermutationSequenceNoRepeat(self):
with self.session() as sess:
out = py_x_ops.random_permutation_sequence(num=20, batch=7, repeat=False)
# Each epoch takes exactly 3 steps.
vals = sess.run(out).tolist() + sess.run(out).tolist() + sess.run(
out).tolist()
self.assertEqual(list(range(20)), sorted(vals))
# repeat=False. We should see OutOfRange error.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(out)
if __name__ == '__main__':
tf.test.main()
| 34.530303 | 80 | 0.678806 | [
"Apache-2.0"
] | CelineQiQi/lingvo | lingvo/core/ops/random_ops_test.py | 2,279 | Python |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyFastjsonschema(PythonPackage):
"""Fast JSON schema validator for Python."""
homepage = "https://github.com/horejsek/python-fastjsonschema"
pypi = "fastjsonschema/fastjsonschema-2.15.1.tar.gz"
version('2.15.1', sha256='671f36d225b3493629b5e789428660109528f373cf4b8a22bac6fa2f8191c2d2')
depends_on('py-setuptools', type='build')
| 34.8125 | 96 | 0.759425 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | Bambi/spack | var/spack/repos/builtin/packages/py-fastjsonschema/package.py | 557 | Python |
import pytest
from datetime import date, datetime
from dateutil.relativedelta import relativedelta
from django.contrib.auth.models import User
from records.models import (
Category, Record, Budget, OUTCOME, INCOME, SAVINGS, tmz)
from records.month_control import MonthControl, MonthControlWithBudget
@pytest.fixture
def current_date():
today = date.today()
today_datetime = datetime(
day=today.day, month=today.month, year=today.year)
return tmz(today_datetime)
@pytest.fixture
def future_date(current_date):
date = current_date+relativedelta(days=1)
return date
@pytest.fixture
def day_of_month(future_date):
return future_date.day
@pytest.fixture
def start_of_recurrence(future_date):
"""
Date object representing the first day of a record with recurrence
"""
return future_date
@pytest.fixture
def end_of_recurrence(future_date):
"""
Return a date which is used to determine the end month the recurrence
should occur
"""
date = future_date+relativedelta(months=6)
return date
@pytest.fixture
def next_month(current_date):
date = current_date+relativedelta(months=1)
return date
@pytest.fixture
def next_month_future(future_date):
date = future_date+relativedelta(months=1)
return date
@pytest.fixture
def infinite_future_date(current_date):
date = current_date+relativedelta(years=360)
return date
@pytest.fixture
def month_control(user, current_date):
"""
Return a MonthControl object for the current date.
Important: currently any Record fixture should come before month_control
"""
month_control = MonthControl(
user, current_date.month, current_date.year, cache={})
return month_control
@pytest.fixture
def month_control_with_budget(user, current_date):
"""
Return a MonthControlWithBudget object for the current date.
Important: currently any Record fixture should come before month_control
"""
month_control = MonthControlWithBudget(
user, current_date.month, current_date.year, cache={})
return month_control
def _user(username='test_user'):
raw_password = "fake"
new_user = User.objects.create_user(
username=username, email="[email protected]", password=raw_password)
setattr(new_user, "raw_password", raw_password)
return new_user
@pytest.fixture
def user():
return _user()
@pytest.fixture
def another_user():
return _user('another_user')
@pytest.fixture
def outcome(user):
"""
Main category of outcome type
"""
category = Category.objects.create(
name="outcome", type_category=OUTCOME, user=user)
return category
@pytest.fixture
def income(user):
"""
Main category of income type
"""
category = Category.objects.create(
name="income", type_category=INCOME, user=user)
return category
@pytest.fixture
def savings(user):
"""
Category of Savings
"""
category = Category.objects.create(
name="savings", type_category=SAVINGS, user=user)
return category
@pytest.fixture
def outcome_current(user, outcome, current_date):
"""
Record of type Outcome set to today (current date)
"""
record = Record.objects.create(
category=outcome, amount=1, start_date=current_date, user=user)
return record
@pytest.fixture
def outcome_future(user, outcome, future_date):
"""
Record of type Outcome set in the future
"""
record = Record.objects.create(
category=outcome, amount=1, start_date=future_date, user=user)
return record
@pytest.fixture
def outcome_recurrent(user, outcome, start_of_recurrence):
"""
Record of type Outcome set in the future with a day of the month set
to create a recurring record
This fixture should not be used with outcome_recurrent_limited and
outcome_with_parent since they change the instance of this own record
"""
record = Record.objects.create(
category=outcome, amount=1, start_date=start_of_recurrence, user=user,
day_of_month=start_of_recurrence.day)
return record
@pytest.fixture
def outcome_recurrent_limited(user, outcome_recurrent, end_of_recurrence):
"""
Record of type Outcome set in the future with a recurring day of the month
set and limited to a certain time
"""
outcome_recurrent.end_date = end_of_recurrence
outcome_recurrent.save()
return outcome_recurrent
@pytest.fixture
def outcome_with_parent(
outcome_future, outcome_recurrent, next_month_future):
outcome_future.parent = outcome_recurrent
outcome_future.start_date = next_month_future
outcome_future.save()
return outcome_future
@pytest.fixture
def savings_current(request, user, savings, current_date):
"""
Record of type Outcome set in the future
"""
record = Record.objects.create(
category=savings, amount=1, start_date=current_date, user=user)
return record
@pytest.fixture
def budget(user):
budget = Budget.objects.create(user=user, amount=1)
return budget
| 24.389423 | 78 | 0.723044 | [
"Unlicense"
] | curaloucura/money-forecast | moneyforecast/tests/records/fixtures.py | 5,073 | Python |
with open("/cvmfs/cms.cern.ch/phys_generator/gridpacks/slc6_amd64_gcc481/13TeV/madgraph/GMSB_SHLA/GMSB_Lambda350TeV_CTau400cm.slha") as f:
SLHA_TABLE = f.read()
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP5Settings_cfi import *
from Configuration.Generator.PSweightsPythia.PythiaPSweightsSettings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
SLHATableForPythia8 = cms.string('%s' % SLHA_TABLE),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP5SettingsBlock,
pythia8PSweightsSettingsBlock,
processParameters = cms.vstring(
'ParticleDecays:limitTau0 = off',
'ParticleDecays:tau0Max = 10000000',
'SUSY:all on',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CP5Settings',
'pythia8PSweightsSettings',
'processParameters')
)
)
ProductionFilterSequence = cms.Sequence(generator)
| 42.911765 | 138 | 0.632625 | [
"Apache-2.0"
] | zhangzc11/cms-gmsb-sps8-configs | Configuration/GenProduction/python/ThirteenTeV/GMSB_noSLHA/GMSB_L350TeV_Ctau400cm_Pythia8_13TeV_cff.py | 1,459 | Python |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_object('setting')
db = SQLAlchemy(app) | 17.875 | 39 | 0.797203 | [
"MIT"
] | vemodalen-x/IRS_imental | Systemcode/imental-Flask/init.py | 143 | Python |
'''Ask two student's grade, inform 3 possible averages.
average :
> 7 = Approved
< 7 & > 5 = Recovery
< 5 = Failed
'''
g1 = float(input("Inform the student's first grade: "))
g2 = float(input("Inform the student's second grade: "))
average = (g1 + g2)/2 # how to calculate the avarege grade between two values
if average >= 7:
print(f"Student with avarege \033[35m{average}\033[m: \033[32mAPPROVED\033[m")
elif average >= 5 and average < 7:
print(f"Student with avarege \033[35m{average}\033[m: \033[33mRECOVERY\033[m")
else:
print(f"Student with avarege \033[35m{average}\033[m: \033[31mFAILED\033[m") | 38.4375 | 82 | 0.687805 | [
"MIT"
] | engcristian/Python | Python-codes-CeV/40-Average.py | 615 | Python |
from helpers.language import estr
ACTION_TEXT = "text"
ACTION_TEXT_QUESTION = "text_question"
ACTION_LIST_QUESTION = "list_question"
ACTION_YES_NO_QUESTION = "yesno_question"
ACTION_CHOICES_QUESTION = "choices_question"
ID = "id"
NO = "no"
YES = "yes"
TEXT = "text"
ON_NO = "on_no"
ON_YES = "on_yes"
ACTION = "action"
CHOICES = "choices"
ON_CHOICES = "on_choices"
MAX_ANSWERS = "max_answers"
STOP_COMMAND = "stop_command"
START_NUMBER = "start_number"
CONVERSATION = "conversation"
ON_INVALID_CHOICE = "on_invalid_choice"
class Result:
def __init__(self, text, skip=False):
self.text = text
self.skip = skip
class StoppableDict:
def __init__(self, data={}, stopped=False):
self.data = data
self.stopped = stopped
def get(self, key):
return self.data.get(key)
def set(self, key, value):
self.data[key] = value
def toggle_stop(self, value=None):
if value is None:
value = not self.stopped
self.stopped = value
class Level:
def __init__(self, questions, index=-1):
self.questions = questions
self.index = index
def incr(self):
self.index += 1
def should_reset(self):
return len(self.questions) == self.index + 1
def get_next_question(self):
self.incr()
if len(self.questions) > self.index:
return self.questions[self.index]
class Levels:
def __init__(self, initial=[]):
self.levels = initial
@property
def level(self):
last_index = len(self.levels) - 1
return self.levels[last_index]
def reset_last(self):
if len(self.levels) > 1:
return self.levels.pop()
def change_level(self, level):
self.levels.append(level)
def get_next_question(self):
question = self.level.get_next_question()
if question is not None:
return question
if self.reset_last() is None:
return None
return self.get_next_question()
class Conversation:
def __init__(self, manifest, default_answers={}):
self._manifest = manifest[CONVERSATION]
self._stop_command = manifest.get(STOP_COMMAND)
self._answers = StoppableDict(default_answers)
keys = list(default_answers.keys())
if len(keys) == 0:
self._current_question = None
self._levels = Levels([Level(self._manifest)])
else:
qid = keys[len(keys) - 1]
result = self._get_question_by_id(self._manifest, qid)
self._levels = result["levels"]
self._current_question = result["item"]
@property
def answers(self):
return self._answers
def _must_stop(self, prev_answer):
return estr(prev_answer, self._stop_command)
def _get_question_by_id(self, level_list, qid, prev_levels=None):
level = Level(level_list)
if prev_levels is not None:
prev_levels.change_level(level)
else:
prev_levels = Levels([level])
for item in level_list:
prev_levels.level.incr()
if type(item) == dict:
if item.get(ID) == qid and item.get(ACTION):
return {"levels": prev_levels, "item": item}
else:
for key in item:
if key == ON_NO or key == ON_YES:
result = self._get_question_by_id(item[key], qid, prev_levels)
if result is not None:
return result
elif key == ON_CHOICES:
for choice in item[key]:
result = self._get_question_by_id(item[key][choice], qid, prev_levels)
if result is not None:
return result
def get_next_question(self, prev_answer=None):
prev_question = self._current_question
if self._stop_command and (self._must_stop(prev_answer) or self._answers.stopped):
self._answers.toggle_stop(True)
return None
if prev_question:
if prev_question[ACTION] == ACTION_TEXT_QUESTION:
self._answers.set(prev_question[ID], prev_answer)
elif prev_question[ACTION] == ACTION_YES_NO_QUESTION:
yes = estr(prev_answer, prev_question[YES])
no = estr(prev_answer, prev_question[NO])
if not (yes or no):
return Result(prev_question[ON_INVALID_CHOICE])
self._answers.set(prev_question[ID], yes)
level = prev_question[ON_YES] if yes else prev_question[ON_NO]
self._levels.change_level(level)
elif prev_question[ACTION] == ACTION_CHOICES_QUESTION:
choice_id = prev_question[CHOICES].get(prev_answer)
if choice_id is None:
return Result(prev_question[ON_INVALID_CHOICE])
self._answers.set(prev_question[ID], choice_id)
level = Level(prev_question[ON_CHOICES][choice_id])
self._levels.change_level(level)
elif prev_question[ACTION] == ACTION_LIST_QUESTION:
if not estr(prev_answer, prev_question[STOP_COMMAND]):
answers = self._answers.get(prev_question[ID])
if answers is None:
answers = []
self._answers.set(prev_question[ID], [])
if prev_answer:
self._answers.set(prev_question[ID], [*answers, prev_answer])
answers.append(prev_answer)
count = len(answers)
max_answers = prev_question[MAX_ANSWERS]
if count < max_answers:
text = "{}{}".format(self._current_question[TEXT], self._current_question[START_NUMBER] + count)
return Result(text)
elif prev_question[ACTION] == ACTION_TEXT:
self._answers.set(prev_question[ID], True)
self._current_question = self._levels.get_next_question()
if self._current_question is not None:
text = None
if self._current_question[ACTION] != ACTION_LIST_QUESTION:
text = self._current_question[TEXT]
else:
text = "{}{}".format(self._current_question[TEXT], self._current_question[START_NUMBER])
self._answers.set(self._current_question[ID], None)
return Result(text, self._current_question[ACTION] == ACTION_TEXT)
class ConversationsStorage:
def __init__(self):
self.conversations = {}
def add(self, cid, *args, **kwargs):
conversation = Conversation(*args, **kwargs)
self.conversations[cid] = conversation
return conversation
def get(self, cid):
return self.conversations.get(cid)
def set(self, cid, conversation):
self.conversations[cid] = conversation
def remove(self, cid):
return self.conversations.pop(cid, None)
def exists(self, cid):
conversation = self.get(cid)
return bool(conversation)
| 32.342222 | 120 | 0.588292 | [
"MIT"
] | R-Mielamud/Telegram_BooksDelivery | src/helpers/conversation.py | 7,277 | Python |
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# File:
import numpy as np
import unittest
import torch
from detectron2.data import MetadataCatalog
from detectron2.structures import Instances, RotatedBoxes, BoxMode
from detectron2.utils.visualizer import Visualizer
class TestVisualizer(unittest.TestCase):
def _random_data(self):
H, W = 100, 100
N = 10
img = np.random.rand(H, W, 3) * 255
boxxy = np.random.rand(N, 2) * (H // 2)
boxes = np.concatenate((boxxy, boxxy + H // 2), axis=1)
def _rand_poly():
return np.random.rand(3, 2).flatten() * H
polygons = [[_rand_poly() for _ in range(np.random.randint(1, 5))] for _ in range(N)]
mask = np.zeros_like(img[:, :, 0], dtype=np.bool)
mask[:10, 10:20] = 1
labels = [str(i) for i in range(N)]
return img, boxes, labels, polygons, [mask] * N
@property
def metadata(self):
return MetadataCatalog.get("coco_2017_train")
def test_draw_dataset_dict(self):
img = np.random.rand(512, 512, 3) * 255
dic = {'annotations': [{'bbox': [368.9946492271106,
330.891438763377,
13.148537455410235,
13.644708680142685],
'bbox_mode': BoxMode.XYWH_ABS,
'category_id': 0,
'iscrowd': 1,
'segmentation': {'counts': '_jh52m?2N2N2N2O100O10O001N1O2MceP2',
'size': [512, 512]}}],
'height': 512,
'image_id': 1,
'width': 512}
v = Visualizer(img, self.metadata)
v.draw_dataset_dict(dic)
def test_overlay_instances(self):
img, boxes, labels, polygons, masks = self._random_data()
v = Visualizer(img, self.metadata)
output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image()
self.assertEqual(output.shape, img.shape)
# Test 2x scaling
v = Visualizer(img, self.metadata, scale=2.0)
output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image()
self.assertEqual(output.shape[0], img.shape[0] * 2)
# Test overlay masks
v = Visualizer(img, self.metadata)
output = v.overlay_instances(masks=masks, boxes=boxes, labels=labels).get_image()
self.assertEqual(output.shape, img.shape)
def test_overlay_instances_no_boxes(self):
img, boxes, labels, polygons, _ = self._random_data()
v = Visualizer(img, self.metadata)
v.overlay_instances(masks=polygons, boxes=None, labels=labels).get_image()
def test_draw_instance_predictions(self):
img, boxes, _, _, masks = self._random_data()
num_inst = len(boxes)
inst = Instances((img.shape[0], img.shape[1]))
inst.pred_classes = torch.randint(0, 80, size=(num_inst,))
inst.scores = torch.rand(num_inst)
inst.pred_boxes = torch.from_numpy(boxes)
inst.pred_masks = torch.from_numpy(np.asarray(masks))
v = Visualizer(img, self.metadata)
v.draw_instance_predictions(inst)
def test_draw_empty_mask_predictions(self):
img, boxes, _, _, masks = self._random_data()
num_inst = len(boxes)
inst = Instances((img.shape[0], img.shape[1]))
inst.pred_classes = torch.randint(0, 80, size=(num_inst,))
inst.scores = torch.rand(num_inst)
inst.pred_boxes = torch.from_numpy(boxes)
inst.pred_masks = torch.from_numpy(np.zeros_like(np.asarray(masks)))
v = Visualizer(img, self.metadata)
v.draw_instance_predictions(inst)
def test_correct_output_shape(self):
img = np.random.rand(928, 928, 3) * 255
v = Visualizer(img, self.metadata)
out = v.output.get_image()
self.assertEqual(out.shape, img.shape)
def test_overlay_rotated_instances(self):
H, W = 100, 150
img = np.random.rand(H, W, 3) * 255
num_boxes = 50
boxes_5d = torch.zeros(num_boxes, 5)
boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-0.1 * W, 1.1 * W)
boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-0.1 * H, 1.1 * H)
boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, max(W, H))
boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, max(W, H))
boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800)
rotated_boxes = RotatedBoxes(boxes_5d)
labels = [str(i) for i in range(num_boxes)]
v = Visualizer(img, self.metadata)
output = v.overlay_instances(boxes=rotated_boxes, labels=labels).get_image()
self.assertEqual(output.shape, img.shape)
def test_draw_no_metadata(self):
img, boxes, _, _, masks = self._random_data()
num_inst = len(boxes)
inst = Instances((img.shape[0], img.shape[1]))
inst.pred_classes = torch.randint(0, 80, size=(num_inst,))
inst.scores = torch.rand(num_inst)
inst.pred_boxes = torch.from_numpy(boxes)
inst.pred_masks = torch.from_numpy(np.asarray(masks))
v = Visualizer(img, MetadataCatalog.get("asdfasdf"))
v.draw_instance_predictions(inst)
| 40.402985 | 96 | 0.602143 | [
"Apache-2.0"
] | HJ0116/detectron2 | tests/test_visualizer.py | 5,414 | Python |
from selenium.webdriver.common.by import By
# for maintainability we can seperate web objects by page name
class MainPageLocators(object):
LOGO = (By.ID, 'nav-logo')
ACCOUNT = (By.ID, 'nav-link-accountList')
SIGNUP = (By.CSS_SELECTOR, '#nav-signin-tooltip > div > a')
LOGIN = (By.CSS_SELECTOR, '#nav-signin-tooltip > a')
SEARCH = (By.ID, 'twotabsearchtextbox')
SEARCH_LIST = (By.CSS_SELECTOR, 'div[data-component-type="s-search-result"]')
class LoginPageLocators(object):
EMAIL = (By.ID, 'ap_email')
PASSWORD = (By.ID, 'ap_password')
SUBMIT = (By.ID, 'signInSubmit-input')
ERROR_MESSAGE = (By.ID, 'message_error')
| 32.95 | 81 | 0.6783 | [
"MIT"
] | gunesmes/page-object-python-selenium | utils/locators.py | 659 | Python |
#
# builder.py - PJSIP test scenarios builder
#
# Copyright (C) 2008-2009 Teluu Inc. (http://www.teluu.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import ccdash
import os
import platform
import re
import subprocess
import sys
import time
class Operation:
"""\
The Operation class describes the individual ccdash operation to be
performed.
"""
# Types:
UPDATE = "update" # Update operation
CONFIGURE = "configure" # Configure operation
BUILD = "build" # Build operation
TEST = "test" # Unit test operation
def __init__(self, type, cmdline, name="", wdir=""):
self.type = type
self.cmdline = cmdline
self.name = name
self.wdir = wdir
if self.type==self.TEST and not self.name:
raise "name required for tests"
def encode(self, base_dir):
s = [self.type]
if self.type == self.TEST:
s.append(self.name)
if self.type != self.UPDATE:
s.append(self.cmdline)
s.append("-w")
if self.wdir:
s.append(base_dir + "/" + self.wdir)
else:
s.append(base_dir)
return s
#
# Update operation
#
update_ops = [Operation(Operation.UPDATE, "")]
#
# The standard library tests (e.g. pjlib-test, pjsip-test, etc.)
#
std_test_ops= [
Operation(Operation.TEST, "./pjlib-test$SUFFIX", name="pjlib test",
wdir="pjlib/bin"),
Operation(Operation.TEST, "./pjlib-util-test$SUFFIX",
name="pjlib-util test", wdir="pjlib-util/bin"),
Operation(Operation.TEST, "./pjnath-test$SUFFIX", name="pjnath test",
wdir="pjnath/bin"),
Operation(Operation.TEST, "./pjmedia-test$SUFFIX", name="pjmedia test",
wdir="pjmedia/bin"),
Operation(Operation.TEST, "./pjsip-test$SUFFIX", name="pjsip test",
wdir="pjsip/bin")
]
#
# These are pjsua Python based unit test operations
#
def build_pjsua_test_ops(pjsua_exe=""):
ops = []
if pjsua_exe:
exe = " -e ../../pjsip-apps/bin/" + pjsua_exe
else:
exe = ""
cwd = os.getcwd()
os.chdir("../pjsua")
os.system("python runall.py --list > list")
f = open("list", "r")
for e in f:
e = e.rstrip("\r\n ")
(mod,param) = e.split(None,2)
name = mod[4:mod.find(".py")] + "_" + \
param[param.find("/")+1:param.find(".py")]
ops.append(Operation(Operation.TEST, "python run.py" + exe + " " + \
e, name=name, wdir="tests/pjsua"))
f.close()
os.remove("list")
os.chdir(cwd)
return ops
#
# Get gcc version
#
def gcc_version(gcc):
proc = subprocess.Popen(gcc + " -v", stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True)
ver = ""
while True:
s = proc.stdout.readline()
if not s:
break
if s.find("gcc version") >= 0:
ver = s.split(None, 3)[2]
break
proc.wait()
return "gcc-" + ver
#
# Get Visual Studio version
#
def vs_get_version():
proc = subprocess.Popen("cl", stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
while True:
s = proc.stdout.readline()
if s=="":
break
pos = s.find("Version")
if pos > 0:
proc.wait()
s = s[pos+8:]
ver = s.split(None, 1)[0]
major = ver[0:2]
if major=="12":
return "vs6"
elif major=="13":
return "vs2003"
elif major=="14":
return "vs2005"
elif major=="15":
return "vs2008"
else:
return "vs-" + major
proc.wait()
return "vs-unknown"
#
# Test config
#
class BaseConfig:
def __init__(self, base_dir, url, site, group, options=None):
self.base_dir = base_dir
self.url = url
self.site = site
self.group = group
self.options = options
#
# Base class for test configurator
#
class TestBuilder:
def __init__(self, config, build_config_name="",
user_mak="", config_site="", exclude=[], not_exclude=[]):
self.config = config # BaseConfig instance
self.build_config_name = build_config_name # Optional build suffix
self.user_mak = user_mak # To be put in user.mak
self.config_site = config_site # To be put in config_s..
self.saved_user_mak = "" # To restore user.mak
self.saved_config_site = "" # To restore config_s..
self.exclude = exclude # List of exclude pattern
self.not_exclude = not_exclude # List of include pattern
self.ccdash_args = [] # ccdash cmd line
def stamp(self):
return time.strftime("%Y%m%d-%H%M", time.localtime())
def pre_action(self):
# Override user.mak
name = self.config.base_dir + "/user.mak"
if os.access(name, os.F_OK):
f = open(name, "r")
self.saved_user_mak = f.read()
f.close()
if True:
f = open(name, "w")
f.write(self.user_mak)
f.close()
# Override config_site.h
name = self.config.base_dir + "/pjlib/include/pj/config_site.h"
if os.access(name, os.F_OK):
f = open(name, "r")
self.saved_config_site= f.read()
f.close()
if True:
f = open(name, "wt")
f.write(self.config_site)
f.close()
def post_action(self):
# Restore user.mak
name = self.config.base_dir + "/user.mak"
f = open(name, "wt")
f.write(self.saved_user_mak)
f.close()
# Restore config_site.h
name = self.config.base_dir + "/pjlib/include/pj/config_site.h"
f = open(name, "wt")
f.write(self.saved_config_site)
f.close()
def build_tests(self):
# This should be overridden by subclasses
pass
def execute(self):
if len(self.ccdash_args)==0:
self.build_tests()
self.pre_action()
mandatory_op = ["update", "configure", "build"]
counter = 0
for a in self.ccdash_args:
# Check if this test is in exclusion list
fullcmd = " ".join(a)
excluded = False
included = False
for pat in self.exclude:
if pat and re.search(pat, fullcmd) != None:
excluded = True
break
if excluded:
for pat in self.not_exclude:
if pat and re.search(pat, fullcmd) != None:
included = True
break
if excluded and not included:
if len(fullcmd)>60:
fullcmd = fullcmd[0:60] + ".."
print "Skipping '%s'" % (fullcmd)
continue
b = ["ccdash.py"]
b.extend(a)
a = b
#print a
try:
rc = ccdash.main(a)
except Exception, e:
errmsg = str(e)
print "**** Error: ccdash got exception %s ****" % errmsg
rc = -1
except:
print "**** Error: ccdash got unknown exception ****"
rc = -1
if rc!=0 and a[1] in mandatory_op:
print "Stopping because of error.."
break
counter = counter + 1
self.post_action()
#
# GNU test configurator
#
class GNUTestBuilder(TestBuilder):
"""\
This class creates list of tests suitable for GNU targets.
"""
def __init__(self, config, build_config_name="", user_mak="", \
config_site="", cross_compile="", exclude=[], not_exclude=[]):
"""\
Parameters:
config - BaseConfig instance
build_config_name - Optional name to be added as suffix to the build
name. Sample: "min-size", "O4", "TLS", etc.
user_mak - Contents to be put on user.mak
config_site - Contents to be put on config_site.h
cross_compile - Optional cross-compile prefix. Must include the
trailing dash, e.g. "arm-unknown-linux-"
exclude - List of regular expression patterns for tests
that will be excluded from the run
not_exclude - List of regular expression patterns for tests
that will be run regardless of whether they
match the excluded pattern.
"""
TestBuilder.__init__(self, config, build_config_name=build_config_name,
user_mak=user_mak, config_site=config_site,
exclude=exclude, not_exclude=not_exclude)
self.cross_compile = cross_compile
if self.cross_compile and self.cross_compile[-1] != '-':
self.cross_compile.append("-")
def build_tests(self):
if self.cross_compile:
suffix = "-" + self.cross_compile[0:-1]
build_name = self.cross_compile + \
gcc_version(self.cross_compile + "gcc")
else:
proc = subprocess.Popen("sh "+self.config.base_dir+"/config.guess",
shell=True, stdout=subprocess.PIPE)
plat = proc.stdout.readline().rstrip(" \r\n")
build_name = plat + "-"+gcc_version(self.cross_compile + "gcc")
suffix = "-" + plat
if self.build_config_name:
build_name = build_name + "-" + self.build_config_name
cmds = []
cmds.extend(update_ops)
cmds.append(Operation(Operation.CONFIGURE, "sh ./configure"))
if sys.platform=="win32":
# Don't build python module on Mingw
cmds.append(Operation(Operation.BUILD,
"sh -c 'make distclean && make dep && make'"))
else:
cmds.append(Operation(Operation.BUILD,
"sh -c 'make distclean && make dep && make" + \
" && cd pjsip-apps/src/python && " + \
"python setup.py clean build'"))
cmds.extend(std_test_ops)
cmds.extend(build_pjsua_test_ops())
self.ccdash_args = []
for c in cmds:
c.cmdline = c.cmdline.replace("$SUFFIX", suffix)
args = c.encode(self.config.base_dir)
args.extend(["-U", self.config.url,
"-S", self.config.site,
"-T", self.stamp(),
"-B", build_name,
"-G", self.config.group])
args.extend(self.config.options)
self.ccdash_args.append(args)
#
# MSVC test configurator
#
class MSVCTestBuilder(TestBuilder):
"""\
This class creates list of tests suitable for Visual Studio builds.
You need to set the MSVC environment variables (typically by calling
vcvars32.bat) prior to running this class.
"""
def __init__(self, config, target="Release|Win32", build_config_name="",
config_site="", exclude=[], not_exclude=[]):
"""\
Parameters:
config - BaseConfig instance
target - Visual Studio build configuration to build.
Sample: "Debug|Win32", "Release|Win32".
build_config_name - Optional name to be added as suffix to the build
name. Sample: "Debug", "Release", "IPv6", etc.
config_site - Contents to be put on config_site.h
exclude - List of regular expression patterns for tests
that will be excluded from the run
not_exclude - List of regular expression patterns for tests
that will be run regardless of whether they
match the excluded pattern.
"""
TestBuilder.__init__(self, config, build_config_name=build_config_name,
config_site=config_site, exclude=exclude,
not_exclude=not_exclude)
self.target = target.lower()
def build_tests(self):
(vsbuild,sys) = self.target.split("|",2)
build_name = sys + "-" + vs_get_version() + "-" + vsbuild
if self.build_config_name:
build_name = build_name + "-" + self.build_config_name
vccmd = "vcbuild.exe /nologo /nohtmllog /nocolor /rebuild " + \
"pjproject-vs8.sln " + " \"" + self.target + "\""
suffix = "-i386-win32-vc8-" + vsbuild
pjsua = "pjsua_vc8"
if vsbuild=="debug":
pjsua = pjsua + "d"
cmds = []
cmds.extend(update_ops)
cmds.append(Operation(Operation.CONFIGURE, "CMD /C echo Nothing to do"))
cmds.append(Operation(Operation.BUILD, vccmd))
cmds.extend(std_test_ops)
cmds.extend(build_pjsua_test_ops(pjsua))
self.ccdash_args = []
for c in cmds:
c.cmdline = c.cmdline.replace("$SUFFIX", suffix)
args = c.encode(self.config.base_dir)
args.extend(["-U", self.config.url,
"-S", self.config.site,
"-T", self.stamp(),
"-B", build_name,
"-G", self.config.group])
args.extend(self.config.options)
self.ccdash_args.append(args)
#
# Symbian test configurator
#
class SymbianTestBuilder(TestBuilder):
"""\
This class creates list of tests suitable for Symbian builds. You need to
set the command line build settings prior to running this class (typically
that involves setting the EPOCROOT variable and current device).
"""
def __init__(self, config, target="gcce urel", build_config_name="",
config_site="", exclude=[], not_exclude=[]):
"""\
Parameters:
config - BaseConfig instance
target - Symbian target to build. Default is "gcce urel".
build_config_name - Optional name to be added as suffix to the build
name. Sample: "APS", "VAS", etc.
config_site - Contents to be put on config_site.h
exclude - List of regular expression patterns for tests
that will be excluded from the run
not_exclude - List of regular expression patterns for tests
that will be run regardless of whether they
match the excluded pattern.
"""
TestBuilder.__init__(self, config, build_config_name=build_config_name,
config_site=config_site, exclude=exclude,
not_exclude=not_exclude)
self.target = target.lower()
def build_tests(self):
# Check that EPOCROOT is set
if not "EPOCROOT" in os.environ:
print "Error: EPOCROOT environment variable is not set"
sys.exit(1)
epocroot = os.environ["EPOCROOT"]
# EPOCROOT must have trailing backslash
if epocroot[-1] != "\\":
epocroot = epocroot + "\\"
os.environ["EPOCROOT"] = epocroot
sdk1 = epocroot.split("\\")[-2]
# Check that correct device is set
proc = subprocess.Popen("devices", stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True)
sdk2 = ""
while True:
line = proc.stdout.readline()
if line.find("- default") > 0:
sdk2 = line.split(":",1)[0]
break
proc.wait()
if sdk1 != sdk2:
print "Error: default SDK in device doesn't match EPOCROOT"
print "Default device SDK =", sdk2
print "EPOCROOT SDK =", sdk1
sys.exit(1)
build_name = sdk2.replace("_", "-") + "-" + \
self.target.replace(" ", "-")
if self.build_config_name:
build_name = build_name + "-" + self.build_config_name
cmdline = "cmd /C \"cd build.symbian && bldmake bldfiles && abld build %s\"" % (self.target)
cmds = []
cmds.extend(update_ops)
cmds.append(Operation(Operation.CONFIGURE, "CMD /C echo Nothing to do"))
cmds.extend([Operation(Operation.BUILD, cmdline)])
self.ccdash_args = []
suffix = ""
for c in cmds:
c.cmdline = c.cmdline.replace("$SUFFIX", suffix)
args = c.encode(self.config.base_dir)
args.extend(["-U", self.config.url,
"-S", self.config.site,
"-T", self.stamp(),
"-B", build_name,
"-G", self.config.group])
args.extend(self.config.options)
self.ccdash_args.append(args)
| 35.695219 | 100 | 0.53686 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | MathJud/qaul.net_legacy | pjproject-2.2.1/tests/cdash/builder.py | 17,919 | Python |
import copy
import gym
import numpy as np
import torch.nn as nn
import railrl.misc.hyperparameter as hyp
import railrl.torch.pytorch_util as ptu
from railrl.data_management.obs_dict_replay_buffer import \
ObsDictReplayBuffer
from railrl.launchers.launcher_util import run_experiment
# from railrl.samplers.data_collector import MdpPathCollector
# from railrl.samplers.data_collector.step_collector import MdpStepCollector
from railrl.samplers.data_collector.path_collector import ObsDictPathCollector
from railrl.samplers.data_collector.step_collector import ObsDictStepCollector
from railrl.visualization.video import VideoSaveFunctionBullet
from railrl.misc.buffer_save import BufferSaveFunction
from railrl.torch.networks import (
CNN,
MlpQfWithObsProcessor,
Split,
FlattenEach,
Concat,
Flatten,
)
from railrl.torch.sac.policies import (
MakeDeterministic, TanhGaussianPolicyAdapter,
)
from railrl.torch.sac.sac import SACTrainer
from railrl.torch.torch_rl_algorithm import (
TorchBatchRLAlgorithm,
TorchOnlineRLAlgorithm,
)
import os.path as osp
from experiments.avi.env_wrappers import FlatEnv
PARENT_DIR = '/media/avi/data/Work/github/'
import sys
env_file = osp.join(PARENT_DIR, 'avisingh599/google-research/dql_grasping/')
sys.path.insert(1, env_file)
from grasping_env import KukaGraspingProceduralEnv
def experiment(variant):
env_params = dict(
block_random=0.3,
camera_random=0,
simple_observations=False,
continuous=True,
remove_height_hack=True,
render_mode="DIRECT",
# render_mode="GUI",
num_objects=5,
max_num_training_models=900,
target=False,
test=False,
)
expl_env = FlatEnv(KukaGraspingProceduralEnv(**env_params))
eval_env = expl_env
img_width, img_height = eval_env.image_shape
num_channels = 3
action_dim = int(np.prod(eval_env.action_space.shape))
cnn_params = variant['cnn_params']
cnn_params.update(
input_width=img_width,
input_height=img_height,
input_channels=num_channels,
added_fc_input_size=0,
output_conv_channels=True,
output_size=None,
)
qf_cnn = CNN(**cnn_params)
qf_obs_processor = nn.Sequential(
qf_cnn,
Flatten(),
)
qf_kwargs = copy.deepcopy(variant['qf_kwargs'])
qf_kwargs['obs_processor'] = qf_obs_processor
qf_kwargs['output_size'] = 1
qf_kwargs['input_size'] = (
action_dim + qf_cnn.conv_output_flat_size
)
qf1 = MlpQfWithObsProcessor(**qf_kwargs)
qf2 = MlpQfWithObsProcessor(**qf_kwargs)
target_qf_cnn = CNN(**cnn_params)
target_qf_obs_processor = nn.Sequential(
target_qf_cnn,
Flatten(),
)
target_qf_kwargs = copy.deepcopy(variant['qf_kwargs'])
target_qf_kwargs['obs_processor'] = target_qf_obs_processor
target_qf_kwargs['output_size'] = 1
target_qf_kwargs['input_size'] = (
action_dim + target_qf_cnn.conv_output_flat_size
)
target_qf1 = MlpQfWithObsProcessor(**target_qf_kwargs)
target_qf2 = MlpQfWithObsProcessor(**target_qf_kwargs)
action_dim = int(np.prod(eval_env.action_space.shape))
policy_cnn = CNN(**cnn_params)
policy_obs_processor = nn.Sequential(
policy_cnn,
Flatten(),
)
policy = TanhGaussianPolicyAdapter(
policy_obs_processor,
policy_cnn.conv_output_flat_size,
action_dim,
**variant['policy_kwargs']
)
observation_key = 'image'
eval_policy = MakeDeterministic(policy)
eval_path_collector = ObsDictPathCollector(
eval_env,
eval_policy,
observation_key=observation_key,
**variant['eval_path_collector_kwargs']
)
replay_buffer = ObsDictReplayBuffer(
variant['replay_buffer_size'],
expl_env,
observation_key=observation_key,
)
trainer = SACTrainer(
env=eval_env,
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
**variant['trainer_kwargs']
)
if variant['collection_mode'] == 'batch':
expl_path_collector = ObsDictPathCollector(
expl_env,
policy,
observation_key=observation_key,
**variant['expl_path_collector_kwargs']
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant['algo_kwargs']
)
elif variant['collection_mode'] == 'online':
expl_path_collector = ObsDictStepCollector(
expl_env,
policy,
observation_key=observation_key,
**variant['expl_path_collector_kwargs']
)
algorithm = TorchOnlineRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant['algo_kwargs']
)
else:
raise NotImplementedError
video_func = VideoSaveFunctionBullet(variant)
algorithm.post_train_funcs.append(video_func)
# dump_buffer_func = BufferSaveFunction(variant)
# algorithm.post_train_funcs.append(dump_buffer_func)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
variant = dict(
trainer_kwargs=dict(
discount=0.99,
# soft_target_tau=5e-3,
# target_update_period=1,
soft_target_tau=1.0,
target_update_period=1000,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
use_automatic_entropy_tuning=True,
),
algo_kwargs=dict(
batch_size=256,
max_path_length=15,
num_epochs=5000,
num_eval_steps_per_epoch=45,
num_expl_steps_per_train_loop=300,
num_trains_per_train_loop=300,
min_num_steps_before_training=10*300,
# max_path_length=10,
# num_epochs=100,
# num_eval_steps_per_epoch=100,
# num_expl_steps_per_train_loop=100,
# num_trains_per_train_loop=100,
# min_num_steps_before_training=100,
),
cnn_params=dict(
kernel_sizes=[3, 3],
n_channels=[4, 4],
strides=[1, 1],
hidden_sizes=[32, 32],
paddings=[1, 1],
pool_type='max2d',
pool_sizes=[2, 2],
pool_strides=[2, 2],
pool_paddings=[0, 0],
),
# replay_buffer_size=int(1E6),
qf_kwargs=dict(
hidden_sizes=[256, 256],
),
policy_kwargs=dict(
hidden_sizes=[256, 256],
),
dump_video_kwargs=dict(
imsize=48,
save_video_period=1,
),
logger_config=dict(
snapshot_gap=10,
),
dump_buffer_kwargs=dict(
dump_buffer_period=50,
),
replay_buffer_size=int(5E5),
expl_path_collector_kwargs=dict(),
eval_path_collector_kwargs=dict(),
shared_qf_conv=False,
use_robot_state=False,
randomize_env=True,
)
import argparse
parser = argparse.ArgumentParser()
# parser.add_argument("--env", type=str, required=True,
# choices=('SawyerReach-v0', 'SawyerGraspOne-v0'))
# parser.add_argument("--obs", required=True, type=str, choices=('pixels', 'pixels_debug'))
parser.add_argument("--gpu", type=int, default=1)
args = parser.parse_args()
variant['env'] = 'KukaGraspingProceduralEnv'
variant['obs'] = 'pixels'
n_seeds = 1
mode = 'local'
exp_prefix = 'dev-{}'.format(
__file__.replace('/', '-').replace('_', '-').split('.')[0]
)
exp_prefix = 'railrl-bullet-{}-{}'.format(variant['env'], variant['obs'])
# n_seeds = 5
# mode = 'ec2'
# exp_prefix = 'railrl-bullet-sawyer-image-reach'
search_space = {
'shared_qf_conv': [
True,
# False,
],
'collection_mode': [
# 'batch',
'online',
]
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
for _ in range(n_seeds):
run_experiment(
experiment,
exp_name=exp_prefix,
mode=mode,
variant=variant,
use_gpu=True,
gpu_id=args.gpu,
unpack_variant=False,
)
| 29.973333 | 95 | 0.632006 | [
"MIT"
] | Asap7772/rail-rl-franka-eval | experiments/avi/eric_grasp_sac_pixel.py | 8,992 | Python |
from __future__ import print_function
import datetime
import hashlib
import logging
from abc import ABCMeta
from halo_flask.classes import AbsBaseClass
from halo_flask.logs import log_json
from halo_flask.const import SYSTEMChoice,LOGChoice
from .settingsx import settingsx
settings = settingsx()
logger = logging.getLogger(__name__)
ver = settings.DB_VER
uri = settings.DB_URL
tbl = False
page_size = settings.PAGE_SIZE
class AbsDbMixin(AbsBaseClass):
__metaclass__ = ABCMeta
# intercept db calls
halo_context = None
def __init__(self, halo_context):
self.halo_context = halo_context
def __getattribute__(self, name):
attr = object.__getattribute__(self, name)
if hasattr(attr, '__call__'):
def newfunc(*args, **kwargs):
now = datetime.datetime.now()
result = attr(*args, **kwargs)
total = datetime.datetime.now() - now
logger.info(LOGChoice.performance_data.value, extra=log_json(self.halo_context,
{LOGChoice.type.value: SYSTEMChoice.dbaccess.value,
LOGChoice.milliseconds.value: int(total.total_seconds() * 1000),
LOGChoice.function.value: str(attr.__name__)}))
return result
return newfunc
else:
return attr
class AbsModel(AbsBaseClass):
pass | 28.584906 | 123 | 0.611881 | [
"MIT"
] | yoramk2/halo_flask | halo_flask/models.py | 1,515 | Python |
av1 = float(input('Nota 1° avaliação'))
av2 = float(input('Nota 2° avaliação'))
mp = av1 + av2 / 2
tf = int(input('Total geral de faltas: ')
pf = float(input('Prova final: '))
final = mp + pf / 2
if tf > 20:
print("Reprovado por falta")
else
elif mp => 7:
print("Aprovado")
elif 3 <= mp < 7:
print("Em recuperação")
elif final => 5:
print("Aprovado")
else:
(print("Reprovado"))
| 19.681818 | 41 | 0.558891 | [
"MIT"
] | sullyvan15/UVV | 4° Período/Programação de Computadores/lista 1/CONCEITOS DE LÓGICA DE PROGRAMAÇÃO/Estrutura de seleção/Exercício 14.py | 441 | Python |
"""
.. module:: Katna.image_filters.text_detector
:platform: OS X
:synopsis: This module is implementation of text detector filter
"""
import os
import cv2
import numpy as np
import time
import requests
import random
from imutils.object_detection import non_max_suppression
from Katna.image_filters.filter import Filter
import Katna.config as config
class TextDetector(Filter):
"""TextDetector Class: Class for implementation of text detector filter, inherit from Filter class
"""
def __init__(self, weight=1.0):
"""Constructor for this class does following tasks, if not already downloaded\
, it first downloads text detector dnn weights file from public URL\
ands save it at USER_HOME/.katna directory, or /tmp/.katna directory.\
After this initializer code initializes internal parameter: \
min_confidence (for text detection)
"""
super().__init__(weight)
self.min_confidence = config.TextDetector.min_confidence
self.merge_threshold = config.TextDetector.merge_threshold
self.layerNames = config.TextDetector.layerNames
self.frozen_weights = config.TextDetector.frozen_weights
self.cache_subdir = config.TextDetector.cache_subdir
try:
self.network_folder_path = os.path.join(os.path.expanduser("~"), ".katna")
if not os.access(self.network_folder_path, os.W_OK):
self.network_folder_path = os.path.join("/tmp", ".katna")
self.datadir = os.path.join(self.network_folder_path, self.cache_subdir)
if not os.path.exists(self.datadir):
os.makedirs(self.datadir)
self.network_file_path = os.path.join(self.datadir, self.frozen_weights)
if not os.path.exists(self.network_file_path):
self.download_data()
self.net = cv2.dnn.readNet(self.network_file_path)
except Exception:
raise FileNotFoundError(
self.frozen_weights
+ " seems to be missing.\
Download the file and specify the full path\
while initializing TextDetector class"
)
def download_data(self):
"""Public function for downloading the network weight from the URL link, to be used for
text detection functionality.
Troubleshooting tip: If you get FileNotFound error during text detector initialization,
initialize the text detector and call this function directly to download the model file from public URL link.
"""
# create response object
link = config.TextDetector.model_download_link
r = requests.get(link, stream=True)
# download started
print("Downloading model file...")
# if not os.path.isfile(self.network_file_path) or not os.path.exists(self.network_file_path):
with open(os.path.join(self.datadir, self.frozen_weights), "wb") as f:
for chunk in r.iter_content(chunk_size=1024 * 1024):
if chunk:
f.write(chunk)
print("Model file downloaded.")
def __decode_predictions(self, scores, geometry):
"""Internal Function for getting bounding box and confidence values
from text detector dnn network output (scores, geometry)
function takes the number of rows and columns from the scores volume, then
initializes set of bounding box rectangles and corresponding confidence scores
"""
(numRows, numCols) = scores.shape[2:4]
rects = []
confidences = []
# loop over the number of rows
for y in range(0, numRows):
# extract the scores (probabilities), followed by the
# geometrical data used to derive potential bounding box
# coordinates that surround text
scoresData = scores[0, 0, y]
xData0 = geometry[0, 0, y]
xData1 = geometry[0, 1, y]
xData2 = geometry[0, 2, y]
xData3 = geometry[0, 3, y]
anglesData = geometry[0, 4, y]
# loop over the number of columns
for x in range(0, numCols):
# if our score does not have sufficient probability,
# ignore it
if scoresData[x] < self.min_confidence:
continue
# compute the offset factor as our resulting feature
# maps will be 4x smaller than the input image
(offsetX, offsetY) = (x * 4.0, y * 4.0)
# extract the rotation angle for the prediction and
# then compute the sin and cosine
angle = anglesData[x]
cos = np.cos(angle)
sin = np.sin(angle)
# use the geometry volume to derive the width and height
# of the bounding box
h = xData0[x] + xData2[x]
w = xData1[x] + xData3[x]
# compute both the starting and ending (x, y)-coordinates
# for the text prediction bounding box
endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))
endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))
startX = int(endX - w)
startY = int(endY - h)
# add the bounding box coordinates and probability score
# to our respective lists
rects.append((startX, startY, endX, endY))
confidences.append(scoresData[x])
# return a tuple of the bounding boxes and associated confidences
return (rects, confidences)
def __merge_boxes(self, rects):
"""main function to detect text boxes from image
:param rects: list of
:type rects: numpy array
:param rectsUsed: image file in numpy array/opencv format
:type rectsUsed: numpy array
:return: output image with the list of text boxes
:rtype: file, list
"""
def grouper(iterable, interval=2):
prev = None
group = []
for item in iterable:
if not prev or abs(item[1] - prev[1]) <= interval:
group.append(item)
else:
yield group
group = [item]
prev = item
if group:
yield group
rects_used = []
heights = list()
for bbox in rects:
heights.append(bbox[3] - bbox[1])
heights = sorted(heights) # Sort heights
median_height = heights[len(heights) // 2] / 2 # Find half of the median height
bboxes_list = sorted(
rects, key=lambda k: k[1]
) # Sort the bounding boxes based on y1 coordinate ( y of the left-top coordinate )
combined_bboxes = grouper(
bboxes_list, median_height
) # Group the bounding boxes
for group in combined_bboxes:
x_min = min(group, key=lambda k: k[0])[0] # Find min of x1
x_max = max(group, key=lambda k: k[2])[2] # Find max of x2
y_min = min(group, key=lambda k: k[1])[1] # Find min of y1
y_max = max(group, key=lambda k: k[3])[3] # Find max of y2
rects_used.append([x_min, y_min, x_max, y_max])
return rects_used
def __detect_text(self):
"""Internal function to detect text bounding boxes from input image.
Returns list of bounding boxes of each detected text field in input image.
:param image: image file in numpy array/opencv format
:type image: numpy array
:param output_image: image file in numpy array/opencv format
:type output_image: numpy array
:return: output image with the list of text boxes
:rtype: file, list
"""
(H, W) = self.image.shape[:2]
rW = W / 320
rH = H / 320
image = cv2.resize(self.image, (320, 320))
(H, W) = image.shape[:2]
# construct a blob from the image and then perform a forward pass of
# the model to obtain the two output layer sets
blob = cv2.dnn.blobFromImage(
self.image, 1.0, (W, H), (123.68, 116.78, 103.94), swapRB=True, crop=False
)
self.net.setInput(blob)
(scores, geometry) = self.net.forward(self.layerNames)
rects, confidences = self.__decode_predictions(scores, geometry)
# apply non-maxima suppression to suppress weak, overlapping bounding
# boxes
boxes = non_max_suppression(np.array(rects), probs=confidences)
text_rects = []
# loop over the bounding boxes
for (startX, startY, endX, endY) in boxes:
# scale the bounding box coordinates based on the respective
# ratios
startX = int(startX * rW)
startY = int(startY * rH)
endX = int(endX * rW)
endY = int(endY * rH)
cv2.rectangle(self.image, (startX, startY), (endX, endY), (0, 0, 255), 3)
text_rects.append([startX, startY, endX, endY])
text_rects = sorted(text_rects, key=lambda item: item[0])
final_rects = text_rects
if len(text_rects) > 0:
final_rects = self.__merge_boxes(text_rects)
return final_rects
def set_image(self, image):
"""Public set_image function, This will detect all text boxes in input image and
will saves them as internal list of text_rect to be used in get_filter_result
:param image: input image from which needs to be cropped
:type image: numpy array(opencv)
"""
if image is None:
return None
self.image = image
self.text_rects = self.__detect_text()
def get_filter_result(self, crop):
"""Main public function of TextDetector filter class,
this filter Returns false if crop contains no text, additionally
checks for overlap between input crop rectangle and the detected
text bounding box, returns True if No overlap (Filter will not discard input crop)
otherwise returns False (signal for discarding input crop).
:param crop: input crop rectangle to test
:type crop: crop_rect
:return: True if No overlap (Filter will not discard input crop) otherwise returns False
:rtype: bool
"""
# rect: xs,ys,xe,ye
# crop: x,y,w,h
if self.text_rects is None or len(self.text_rects) == 0:
return True
for rect in self.text_rects:
if not (
(rect[2]) <= (crop.x + crop.w)
and (rect[0]) >= (crop.x)
and (rect[1]) >= (crop.y)
and (rect[3]) <= (crop.y + crop.h)
):
return False
else:
return True
return True
| 40.058608 | 117 | 0.59263 | [
"MIT"
] | jibinmathew69/katna | Katna/image_filters/text_detector.py | 10,936 | Python |
from distutils.core import setup
extra_requires = {
'celery': ["celery[redis]"],
'flower': ["flower"]
}
setup(name="terra",
packages=["terra"],
description="Terra",
extra_requires=extra_requires,
install_requires=[
"pyyaml",
"jstyleson",
# I use signal and task from celery, no matter what
"celery",
"filelock"
]
)
| 19.55 | 59 | 0.578005 | [
"MIT"
] | VisionSystemsInc/terra | setup.py | 391 | Python |
from lightning_conceptnet.uri import concept_uri
from wordfreq import simple_tokenize
from wordfreq.preprocess import preprocess_text
STOPWORDS = [
'the', 'a', 'an'
]
DROP_FIRST = ['to']
def english_filter(tokens):
"""
Given a list of tokens, remove a small list of English stopwords.
"""
non_stopwords = [token for token in tokens if token not in STOPWORDS]
while non_stopwords and non_stopwords[0] in DROP_FIRST:
non_stopwords = non_stopwords[1:]
if non_stopwords:
return non_stopwords
else:
return tokens
def standardized_concept_uri(lang, text, *more):
"""
Make the appropriate URI for a concept in a particular language, including
removing English stopwords, normalizing the text in a way appropriate
to that language (using the text normalization from wordfreq), and joining
its tokens with underscores in a concept URI.
This text normalization can smooth over some writing differences: for
example, it removes vowel points from Arabic words, and it transliterates
Serbian written in the Cyrillic alphabet to the Latin alphabet so that it
can match other words written in Latin letters.
'more' contains information to distinguish word senses, such as a part
of speech or a WordNet domain. The items in 'more' get lowercased and
joined with underscores, but skip many of the other steps -- for example,
they won't have stopwords removed.
>>> standardized_concept_uri('en', 'this is a test')
'/c/en/this_is_test'
>>> standardized_concept_uri('en', 'this is a test', 'n', 'example phrase')
'/c/en/this_is_test/n/example_phrase'
>>> standardized_concept_uri('sh', 'симетрија')
'/c/sh/simetrija'
"""
lang = lang.lower()
if lang == 'en':
token_filter = english_filter
else:
token_filter = None
text = preprocess_text(text.replace('_', ' '), lang)
tokens = simple_tokenize(text)
if token_filter is not None:
tokens = token_filter(tokens)
norm_text = '_'.join(tokens)
more_text = []
for item in more:
if item is not None:
tokens = simple_tokenize(item.replace('_', ' '))
if token_filter is not None:
tokens = token_filter(tokens)
more_text.append('_'.join(tokens))
return concept_uri(lang, norm_text, *more_text)
| 34.042857 | 79 | 0.684431 | [
"Apache-2.0"
] | ldtoolkit/lightning-conceptnet | lightning_conceptnet/nodes.py | 2,392 | Python |
# Copyright 2014 DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DreamHost Neutron Extensions
# @author: Murali Raju, New Dream Network, LLC (DreamHost)
# @author: Rosario Disomma, New Dream Network, LLC (DreamHost)
import logging
from openstack_dashboard.api import nova
from openstack_dashboard.api import neutron
from openstack_dashboard.api.neutron import neutronclient
from neutronclient.common.exceptions import PortNotFoundClient
from akanda.horizon.common import (
NEW_PROTOCOL_CHOICES_DICT, POLICY_CHOICES_DICT)
LOG = logging.getLogger(__name__)
def get_protocol(value):
return NEW_PROTOCOL_CHOICES_DICT[value]
class Port(object):
def __init__(self, alias_name, protocol, port, id=None):
self.alias_name = alias_name
self.protocol = protocol
self.port = port
self.id = id
def display_protocol(self):
return get_protocol(self.protocol)
class AddressGroup(object):
def __init__(self, name, id=None):
self.name = name
self.id = id
class Network(object):
def __init__(self, alias_name, cidr, id=None):
self.alias_name = alias_name
self.cidr = cidr
self.id = id
class FilterRule(object):
def __init__(self, source, source_public_port,
destination, destination_public_port,
protocol, policy, request, id=None):
self.policy = policy
self.source = source
self.source_public_port = source_public_port
self.destination = destination
self.destination_public_port = destination_public_port
self.protocol = protocol
self.request = request
self.id = id
def display_policy(self):
return POLICY_CHOICES_DICT[self.policy]
def display_source_group(self):
if self.source:
return self.source['name']
return ''
def display_destination_group(self):
if self.destination:
return self.destination['name']
return ''
def display_source_port(self):
return "%s %s" % (get_protocol(self.protocol),
self.source_public_port)
def display_destination_port(self):
return "%s %s" % (get_protocol(self.protocol),
self.destination_public_port)
class PortForwardingRule(object):
def __init__(self, rule_name, public_port,
protocol, private_port, port,
request, id=None):
self.rule_name = rule_name
self.public_port = public_port
self.protocol = protocol
self.private_port = private_port
self.port = port
self.request = request
self.id = id
def display_public_port(self):
return "%s %s" % (get_protocol(self.protocol),
self.public_port)
def display_private_port(self):
return "%s %s" % (get_protocol(self.protocol),
self.private_port)
def display_instance(self):
try:
instance = nova.server_get(self.request, self.port['device_id'])
return instance.name
except:
return '--'
def _mk_url(*args):
path = '/'.join(args).lstrip('/')
if not path.startswith('/'):
path = '/' + path
return path
def _list(request, path):
return neutronclient(request).get(_mk_url(path))
def _get(request, path, obj_id):
return neutronclient(request).get(_mk_url(path, obj_id))
def _create(request, path, body):
return neutronclient(request).post(_mk_url(path), body=body)
def _put(request, path, obj_id, body):
return neutronclient(request).put(_mk_url(path, obj_id), body=body)
def _delete(request, path, obj_id):
return neutronclient(request).delete(_mk_url(path, obj_id))
def portalias_list(request):
r = _list(request, 'dhportalias')
return [Port(item['name'], item['protocol'], item['port'], item['id'])
for item in r.get('portaliases', {})]
def portalias_get(request, obj_id):
r = _get(request, 'dhportalias', obj_id)
return r.get('portalias', {})
def portalias_create(request, body):
portalias = {'portalias': {
'name': body['alias_name'],
'protocol': body['protocol'],
'port': body['port'],
}}
LOG.debug("portalias_create(): body = %s" % body)
return _create(request, 'dhportalias', portalias)
def portalias_update(request, body):
obj_id = body.pop('id', '')
portalias = {'portalias': {
'name': body['alias_name'],
'protocol': body['protocol'],
'port': body['port'],
}}
LOG.debug("portalias_update(): body = %s" % body)
return _put(request, 'dhportalias', obj_id, portalias)
def portalias_delete(request, obj_id):
return _delete(request, 'dhportalias', obj_id)
def addressgroup_list(request):
r = _list(request, 'dhaddressgroup')
return [AddressGroup(item['name'], item['id'])
for item in r.get('addressgroups', {})]
def addressgroup_get(request, obj_id):
r = _get(request, 'dhaddressgroup', obj_id)
return r.get('addressgroup', {})
def addressgroup_create(request, body):
addressgroup = {'addressgroup': {
'name': body['name'],
}}
LOG.debug("addressgroup_create(): body = %s" % body)
return _create(request, 'dhaddressgroup', addressgroup)
def addressgroup_update(request, body):
obj_id = body.pop('id', '')
addressgroup = {'addressgroup': {
'name': body['name'],
}}
LOG.debug("addressgroup_update(): body = %s" % body)
return _put(request, 'dhaddressgroup', obj_id, addressgroup)
def addressgroup_delete(request, obj_id):
return _delete(request, 'dhaddressgroup', obj_id)
def networkalias_list(request):
r = _list(request, 'dhaddressentry')
return [Network(item['name'], item['cidr'], item['id'])
for item in r.get('addressentries', {})]
def networkalias_get(request, obj_id):
r = _get(request, 'dhaddressentry', obj_id)
return r.get('addressentry', {})
def networkalias_create(request, body):
networkalias = {'addressentry': {
'name': body['name'],
'cidr': body['cidr'],
'group_id': body['group']
}}
LOG.debug("networkalias_create(): body = %s" % body)
return _create(request, 'dhaddressentry', networkalias)
def networkalias_update(request, body):
obj_id = body.pop('id', '')
networkalias = {'addressentry': {
'name': body['name'],
'cidr': body['cidr'],
}}
LOG.debug("networkalias_update(): body = %s" % body)
return _put(request, 'dhaddressentry', obj_id, networkalias)
def networkalias_delete(request, obj_id):
return _delete(request, 'dhaddressentry', obj_id)
def filterrule_list(request):
r = _list(request, 'dhfilterrule')
return [FilterRule(item.get('source'), item['source_port'],
item.get('destination'), item['destination_port'],
item['protocol'], item['action'], request, item['id'])
for item in r.get('filterrules', {})]
def filterrule_get(request, obj_id):
r = _get(request, 'dhfilterrule', obj_id)
return r.get('filterrule', {})
def filterrule_create(request, body):
filterrule = {'filterrule': {
'source_id': body['source_id'],
'destination_id': body['destination_id'],
'source_port': body['source_public_port'],
'destination_port': body['destination_public_port'],
'protocol': body['source_protocol'],
'action': body['policy'],
}}
LOG.debug("filterrule_create(): body = %s" % body)
return _create(request, 'dhfilterrule', filterrule)
def filterrule_update(request, body):
obj_id = body.pop('id', '')
filterrule = {'filterrule': {
'source_id': body['source_id'],
'destination_id': body['destination_id'],
'source_port': body['source_public_port'],
'destination_port': body['destination_public_port'],
'protocol': body['source_protocol'],
'action': body['policy'],
}}
LOG.debug("filterrule_update(): body = %s" % body)
return _put(request, 'dhfilterrule', obj_id, filterrule)
def filterrule_delete(request, obj_id):
return _delete(request, 'dhfilterrule', obj_id)
def portforward_list(request):
r = _list(request, 'dhportforward')
return [PortForwardingRule(item['name'], item['public_port'],
item['protocol'], item['private_port'],
item['port'], request, item['id'])
for item in r.get('portforwards', {})]
def portforward_get(request, obj_id):
r = _get(request, 'dhportforward', obj_id)
return r.get('portforward', {})
def portforward_create(request, body):
port_list = neutron.port_list(request, device_id=body['instance'])
try:
port = port_list[0]
except IndexError:
raise PortNotFoundClient
portforward = {'portforward': {
'name': body['rule_name'],
'protocol': body['public_protocol'],
'public_port': body['public_port'],
'private_port': body['private_port'],
'port_id': port.id
}}
LOG.debug("portforward_create(): body = %s" % body)
return _create(request, 'dhportforward', portforward)
def portforward_update(request, body):
obj_id = body.pop('id', '')
port_list = neutron.port_list(request, device_id=body['instance'])
try:
port = port_list[0]
except IndexError:
raise PortNotFoundClient
portforward = {'portforward': {
'name': body['rule_name'],
'instance_id': body['instance'],
'protocol': body['public_protocol'],
'public_port': body['public_port'],
'private_port': body['private_port'],
'port_id': port.id
}}
LOG.debug("portforward_update(): body = %s" % body)
return _put(request, 'dhportforward', obj_id, portforward)
def portforward_delete(request, obj_id):
return _delete(request, 'dhportforward', obj_id)
| 29.790368 | 77 | 0.641308 | [
"Apache-2.0"
] | dreamhost/akanda-horizon | akanda/horizon/api/neutron_extensions_client.py | 10,516 | Python |
"""Django models utilities."""
#Django
from django.db import models
class CRideModel(models.Model):
"""
CrideModel acts as an abastract base class from
which every other model in the project will inherit.
This class provides every table with de following
attributes:
+ created (Datetime): store the datetime the object was created
+ modified (Datetime): store the last datetime the object was modified
"""
created = models.DateTimeField(
'created at',
auto_now_add=True, # set the date auto when the model is created
help_text='Date time on which the object was created.'
)
modified = models.DateTimeField(
'modified at',
auto_now=True, # set the date when the model is called
help_text='Date time on which the object was last modified.'
)
class Meta:
"""Meta options."""
abstract = True
# Set class config when it is called
get_latest_by = 'created'
ordering = ['-created', '-modified']
| 29.771429 | 78 | 0.649712 | [
"MIT"
] | Jonulo/dejango-adv | cride/utils/models.py | 1,042 | Python |
#!/usr/bin/env python3
# Copyright 2019, Alex Wiens <[email protected]>, Achim Lösch <[email protected]>
# SPDX-License-Identifier: BSD-2-Clause
import os
import os.path
import subprocess
import test as schedtest
import plot
def hostname():
return subprocess.getoutput("hostname")
if __name__ == "__main__":
cwd = os.getcwd()
testname = os.path.basename(cwd)
host = os.environ if "SCHED_HOST" in os.environ else hostname()
for testtype in ["sim","exp"]:
test = schedtest.SchedTest.loadTest(testtype, testname=testname, resultdir=cwd, host=host)
if test != None and test.loadTestLog():
test.generate_report()
else:
print("log for",testtype,"not found")
| 23.448276 | 92 | 0.726471 | [
"BSD-2-Clause"
] | aw32/sched | scripts/report_gen.py | 681 | Python |
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy, os, re
from collections import OrderedDict
import itertools, pathlib
import hashlib
import pickle
from functools import lru_cache
from . import environment
from . import dependencies
from . import mlog
from .mesonlib import (
File, MesonException, listify, extract_as_list, OrderedSet,
typeslistify, stringlistify, classify_unity_sources,
get_filenames_templates_dict, substitute_values,
for_windows, for_darwin, for_cygwin, for_android, has_path_sep
)
from .compilers import is_object, clink_langs, sort_clink, lang_suffixes, get_macos_dylib_install_name
from .interpreterbase import FeatureNew
pch_kwargs = set(['c_pch', 'cpp_pch'])
lang_arg_kwargs = set([
'c_args',
'cpp_args',
'd_args',
'd_import_dirs',
'd_unittest',
'd_module_versions',
'd_debug',
'fortran_args',
'java_args',
'objc_args',
'objcpp_args',
'rust_args',
'vala_args',
'cs_args',
])
vala_kwargs = set(['vala_header', 'vala_gir', 'vala_vapi'])
rust_kwargs = set(['rust_crate_type'])
cs_kwargs = set(['resources', 'cs_args'])
buildtarget_kwargs = set([
'build_by_default',
'build_rpath',
'dependencies',
'extra_files',
'gui_app',
'link_with',
'link_whole',
'link_args',
'link_depends',
'implicit_include_directories',
'include_directories',
'install',
'install_rpath',
'install_dir',
'install_mode',
'name_prefix',
'name_suffix',
'native',
'objects',
'override_options',
'sources',
'gnu_symbol_visibility',
])
known_build_target_kwargs = (
buildtarget_kwargs |
lang_arg_kwargs |
pch_kwargs |
vala_kwargs |
rust_kwargs |
cs_kwargs)
known_exe_kwargs = known_build_target_kwargs | {'implib', 'export_dynamic', 'pie'}
known_shlib_kwargs = known_build_target_kwargs | {'version', 'soversion', 'vs_module_defs', 'darwin_versions'}
known_shmod_kwargs = known_build_target_kwargs
known_stlib_kwargs = known_build_target_kwargs | {'pic'}
known_jar_kwargs = known_exe_kwargs | {'main_class'}
@lru_cache(maxsize=None)
def get_target_macos_dylib_install_name(ld):
return get_macos_dylib_install_name(ld.prefix, ld.name, ld.suffix, ld.soversion)
class InvalidArguments(MesonException):
pass
class Build:
"""A class that holds the status of one build including
all dependencies and so on.
"""
def __init__(self, environment):
self.project_name = 'name of master project'
self.project_version = None
self.environment = environment
self.projects = {}
self.targets = OrderedDict()
# Coredata holds the state. This is just here for convenience.
self.compilers = environment.coredata.compilers
self.cross_compilers = environment.coredata.cross_compilers
self.global_args = {}
self.projects_args = {}
self.global_link_args = {}
self.projects_link_args = {}
self.cross_global_args = {}
self.cross_projects_args = {}
self.cross_global_link_args = {}
self.cross_projects_link_args = {}
self.tests = []
self.benchmarks = []
self.headers = []
self.man = []
self.data = []
self.static_linker = None
self.static_cross_linker = None
self.subprojects = {}
self.subproject_dir = ''
self.install_scripts = []
self.postconf_scripts = []
self.dist_scripts = []
self.install_dirs = []
self.dep_manifest_name = None
self.dep_manifest = {}
self.cross_stdlibs = {}
self.test_setups = {}
self.test_setup_default_name = None
self.find_overrides = {}
self.searched_programs = set() # The list of all programs that have been searched for.
def copy(self):
other = Build(self.environment)
for k, v in self.__dict__.items():
if k in ['compilers', 'cross_compilers']:
# These alias coredata's fields of the same name, and must not
# become copies.
continue
if isinstance(v, (list, dict, set, OrderedDict)):
other.__dict__[k] = v.copy()
else:
other.__dict__[k] = v
return other
def merge(self, other):
for k, v in other.__dict__.items():
self.__dict__[k] = v
def ensure_static_linker(self, compiler):
if self.static_linker is None and compiler.needs_static_linker():
self.static_linker = self.environment.detect_static_linker(compiler)
def ensure_static_cross_linker(self, compiler):
if self.static_cross_linker is None and compiler.needs_static_linker():
self.static_cross_linker = self.environment.detect_static_linker(compiler)
def get_project(self):
return self.projects['']
def get_subproject_dir(self):
return self.subproject_dir
def get_targets(self):
return self.targets
def get_tests(self):
return self.tests
def get_benchmarks(self):
return self.benchmarks
def get_headers(self):
return self.headers
def get_man(self):
return self.man
def get_data(self):
return self.data
def get_install_subdirs(self):
return self.install_dirs
def get_global_args(self, compiler, for_cross):
d = self.cross_global_args if for_cross else self.global_args
return d.get(compiler.get_language(), [])
def get_project_args(self, compiler, project, for_cross):
d = self.cross_projects_args if for_cross else self.projects_args
args = d.get(project)
if not args:
return []
return args.get(compiler.get_language(), [])
def get_global_link_args(self, compiler, for_cross):
d = self.cross_global_link_args if for_cross else self.global_link_args
return d.get(compiler.get_language(), [])
def get_project_link_args(self, compiler, project, for_cross):
d = self.cross_projects_link_args if for_cross else self.projects_link_args
link_args = d.get(project)
if not link_args:
return []
return link_args.get(compiler.get_language(), [])
class IncludeDirs:
def __init__(self, curdir, dirs, is_system, extra_build_dirs=None):
self.curdir = curdir
self.incdirs = dirs
self.is_system = is_system
# Interpreter has validated that all given directories
# actually exist.
if extra_build_dirs is None:
self.extra_build_dirs = []
else:
self.extra_build_dirs = extra_build_dirs
def __repr__(self):
r = '<{} {}/{}>'
return r.format(self.__class__.__name__, self.curdir, self.incdirs)
def get_curdir(self):
return self.curdir
def get_incdirs(self):
return self.incdirs
def get_extra_build_dirs(self):
return self.extra_build_dirs
class ExtractedObjects:
'''
Holds a list of sources for which the objects must be extracted
'''
def __init__(self, target, srclist=[], genlist=[], objlist=[], recursive=True):
self.target = target
self.recursive = recursive
self.srclist = srclist
self.genlist = genlist
self.objlist = objlist
if self.target.is_unity:
self.check_unity_compatible()
def __repr__(self):
r = '<{0} {1!r}: {2}>'
return r.format(self.__class__.__name__, self.target.name, self.srclist)
def classify_all_sources(self, sources, generated_sources):
# Merge sources and generated sources
sources = list(sources)
for gensrc in generated_sources:
for s in gensrc.get_outputs():
# We cannot know the path where this source will be generated,
# but all we need here is the file extension to determine the
# compiler.
sources.append(s)
# Filter out headers and all non-source files
sources = [s for s in sources if environment.is_source(s) and not environment.is_header(s)]
return classify_unity_sources(self.target.compilers.values(), sources)
def check_unity_compatible(self):
# Figure out if the extracted object list is compatible with a Unity
# build. When we're doing a Unified build, we go through the sources,
# and create a single source file from each subset of the sources that
# can be compiled with a specific compiler. Then we create one object
# from each unified source file. So for each compiler we can either
# extra all its sources or none.
cmpsrcs = self.classify_all_sources(self.target.sources, self.target.generated)
extracted_cmpsrcs = self.classify_all_sources(self.srclist, self.genlist)
for comp, srcs in extracted_cmpsrcs.items():
if set(srcs) != set(cmpsrcs[comp]):
raise MesonException('Single object files can not be extracted '
'in Unity builds. You can only extract all '
'the object files for each compiler at once.')
class EnvironmentVariables:
def __init__(self):
self.envvars = []
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.envvars)
def get_value(self, values, kwargs):
separator = kwargs.get('separator', os.pathsep)
value = ''
for var in values:
value += separator + var
return separator, value.strip(separator)
def set(self, env, name, values, kwargs):
return self.get_value(values, kwargs)[1]
def append(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return env[name] + sep + value
return value
def prepend(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return value + sep + env[name]
return value
def get_env(self, full_env):
env = full_env.copy()
for method, name, values, kwargs in self.envvars:
env[name] = method(full_env, name, values, kwargs)
return env
class Target:
def __init__(self, name, subdir, subproject, build_by_default):
if has_path_sep(name):
# Fix failing test 53 when this becomes an error.
mlog.warning('''Target "%s" has a path separator in its name.
This is not supported, it can cause unexpected failures and will become
a hard error in the future.''' % name)
self.name = name
self.subdir = subdir
self.subproject = subproject
self.build_by_default = build_by_default
self.install = False
self.build_always_stale = False
self.option_overrides = {}
if not hasattr(self, 'typename'):
raise RuntimeError('Target type is not set for target class "{}". This is a bug'.format(type(self).__name__))
def get_install_dir(self, environment):
# Find the installation directory.
default_install_dir = self.get_default_install_dir(environment)
outdirs = self.get_custom_install_dir()
if outdirs[0] is not None and outdirs[0] != default_install_dir and outdirs[0] is not True:
# Either the value is set to a non-default value, or is set to
# False (which means we want this specific output out of many
# outputs to not be installed).
custom_install_dir = True
else:
custom_install_dir = False
outdirs[0] = default_install_dir
return outdirs, custom_install_dir
def get_basename(self):
return self.name
def get_subdir(self):
return self.subdir
def get_typename(self):
return self.typename
@staticmethod
def _get_id_hash(target_id):
# We don't really need cryptographic security here.
# Small-digest hash function with unlikely collision is good enough.
h = hashlib.sha256()
h.update(target_id.encode(encoding='utf-8', errors='replace'))
# This ID should be case-insensitive and should work in Visual Studio,
# e.g. it should not start with leading '-'.
return h.hexdigest()[:7]
@staticmethod
def construct_id_from_path(subdir, name, type_suffix):
"""Construct target ID from subdir, name and type suffix.
This helper function is made public mostly for tests."""
# This ID must also be a valid file name on all OSs.
# It should also avoid shell metacharacters for obvious
# reasons. '@' is not used as often as '_' in source code names.
# In case of collisions consider using checksums.
# FIXME replace with assert when slash in names is prohibited
name_part = name.replace('/', '@').replace('\\', '@')
assert not has_path_sep(type_suffix)
my_id = name_part + type_suffix
if subdir:
subdir_part = Target._get_id_hash(subdir)
# preserve myid for better debuggability
return subdir_part + '@@' + my_id
return my_id
def get_id(self):
return self.construct_id_from_path(
self.subdir, self.name, self.type_suffix())
def process_kwargs(self, kwargs):
if 'build_by_default' in kwargs:
self.build_by_default = kwargs['build_by_default']
if not isinstance(self.build_by_default, bool):
raise InvalidArguments('build_by_default must be a boolean value.')
elif kwargs.get('install', False):
# For backward compatibility, if build_by_default is not explicitly
# set, use the value of 'install' if it's enabled.
self.build_by_default = True
self.option_overrides = self.parse_overrides(kwargs)
def parse_overrides(self, kwargs):
result = {}
overrides = stringlistify(kwargs.get('override_options', []))
for o in overrides:
if '=' not in o:
raise InvalidArguments('Overrides must be of form "key=value"')
k, v = o.split('=', 1)
k = k.strip()
v = v.strip()
result[k] = v
return result
def is_linkable_target(self):
return False
class BuildTarget(Target):
known_kwargs = known_build_target_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
super().__init__(name, subdir, subproject, True)
self.is_cross = is_cross
unity_opt = environment.coredata.get_builtin_option('unity')
self.is_unity = unity_opt == 'on' or (unity_opt == 'subprojects' and subproject != '')
self.environment = environment
self.sources = []
self.compilers = OrderedDict()
self.objects = []
self.external_deps = []
self.include_dirs = []
self.link_targets = []
self.link_whole_targets = []
self.link_depends = []
self.name_prefix_set = False
self.name_suffix_set = False
self.filename = 'no_name'
# The list of all files outputted by this target. Useful in cases such
# as Vala which generates .vapi and .h besides the compiled output.
self.outputs = [self.filename]
self.need_install = False
self.pch = {}
self.extra_args = {}
self.generated = []
self.extra_files = []
self.d_features = {}
self.pic = False
self.pie = False
# Sources can be:
# 1. Pre-existing source files in the source tree
# 2. Pre-existing sources generated by configure_file in the build tree
# 3. Sources files generated by another target or a Generator
self.process_sourcelist(sources)
# Objects can be:
# 1. Pre-existing objects provided by the user with the `objects:` kwarg
# 2. Compiled objects created by and extracted from another target
self.process_objectlist(objects)
self.process_kwargs(kwargs, environment)
self.check_unknown_kwargs(kwargs)
self.process_compilers()
if not any([self.sources, self.generated, self.objects, self.link_whole]):
raise InvalidArguments('Build target %s has no sources.' % name)
self.process_compilers_late()
self.validate_sources()
self.validate_cross_install(environment)
self.check_module_linking()
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.filename)
def validate_cross_install(self, environment):
if environment.is_cross_build() and not self.is_cross and self.need_install:
raise InvalidArguments('Tried to install a natively built target in a cross build.')
def check_unknown_kwargs(self, kwargs):
# Override this method in derived classes that have more
# keywords.
self.check_unknown_kwargs_int(kwargs, self.known_kwargs)
def check_unknown_kwargs_int(self, kwargs, known_kwargs):
unknowns = []
for k in kwargs:
if k not in known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword argument(s) in target %s: %s.' %
(self.name, ', '.join(unknowns)))
def process_objectlist(self, objects):
assert(isinstance(objects, list))
for s in objects:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, (str, File, ExtractedObjects)):
self.objects.append(s)
elif isinstance(s, (GeneratedList, CustomTarget)):
msg = 'Generated files are not allowed in the \'objects\' kwarg ' + \
'for target {!r}.\nIt is meant only for '.format(self.name) + \
'pre-built object files that are shipped with the\nsource ' + \
'tree. Try adding it in the list of sources.'
raise InvalidArguments(msg)
else:
msg = 'Bad object of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
def process_sourcelist(self, sources):
sources = listify(sources)
added_sources = {} # If the same source is defined multiple times, use it only once.
for s in sources:
# Holder unpacking. Ugly.
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
if s not in added_sources:
self.sources.append(s)
added_sources[s] = True
elif isinstance(s, (GeneratedList, CustomTarget, CustomTargetIndex)):
self.generated.append(s)
else:
msg = 'Bad source of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
@staticmethod
def can_compile_remove_sources(compiler, sources):
removed = False
for s in sources[:]:
if compiler.can_compile(s):
sources.remove(s)
removed = True
return removed
def process_compilers_late(self):
"""Processes additional compilers after kwargs have been evaluated.
This can add extra compilers that might be required by keyword
arguments, such as link_with or dependencies. It will also try to guess
which compiler to use if one hasn't been selected already.
"""
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# If this library is linked against another library we need to consider
# the languages of those libraries as well.
if self.link_targets or self.link_whole_targets:
extra = set()
for t in itertools.chain(self.link_targets, self.link_whole_targets):
for name, compiler in t.compilers.items():
if name in clink_langs:
extra.add((name, compiler))
for name, compiler in sorted(extra, key=lambda p: sort_clink(p[0])):
self.compilers[name] = compiler
if not self.compilers:
# No source files or parent targets, target consists of only object
# files of unknown origin. Just add the first clink compiler
# that we have and hope that it can link these objects
for lang in clink_langs:
if lang in compilers:
self.compilers[lang] = compilers[lang]
break
def process_compilers(self):
'''
Populate self.compilers, which is the list of compilers that this
target will use for compiling all its sources.
We also add compilers that were used by extracted objects to simplify
dynamic linker determination.
'''
if not self.sources and not self.generated and not self.objects:
return
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# Pre-existing sources
sources = list(self.sources)
# All generated sources
for gensrc in self.generated:
for s in gensrc.get_outputs():
# Generated objects can't be compiled, so don't use them for
# compiler detection. If our target only has generated objects,
# we will fall back to using the first c-like compiler we find,
# which is what we need.
if not is_object(s):
sources.append(s)
for d in self.external_deps:
if hasattr(d, 'held_object'):
d = d.held_object
for s in d.sources:
if isinstance(s, (str, File)):
sources.append(s)
# Sources that were used to create our extracted objects
for o in self.objects:
if not isinstance(o, ExtractedObjects):
continue
for s in o.srclist:
# Don't add Vala sources since that will pull in the Vala
# compiler even though we will never use it since we are
# dealing with compiled C code.
if not s.endswith(lang_suffixes['vala']):
sources.append(s)
if sources:
# For each source, try to add one compiler that can compile it.
# It's ok if no compilers can do so, because users are expected to
# be able to add arbitrary non-source files to the sources list.
for s in sources:
for lang, compiler in compilers.items():
if compiler.can_compile(s):
if lang not in self.compilers:
self.compilers[lang] = compiler
break
# Re-sort according to clink_langs
self.compilers = OrderedDict(sorted(self.compilers.items(),
key=lambda t: sort_clink(t[0])))
# If all our sources are Vala, our target also needs the C compiler but
# it won't get added above.
if 'vala' in self.compilers and 'c' not in self.compilers:
self.compilers['c'] = compilers['c']
def validate_sources(self):
if not self.sources:
return
for lang in ('cs', 'java'):
if lang in self.compilers:
check_sources = list(self.sources)
compiler = self.compilers[lang]
if not self.can_compile_remove_sources(compiler, check_sources):
m = 'No {} sources found in target {!r}'.format(lang, self.name)
raise InvalidArguments(m)
if check_sources:
m = '{0} targets can only contain {0} files:\n'.format(lang.capitalize())
m += '\n'.join([repr(c) for c in check_sources])
raise InvalidArguments(m)
# CSharp and Java targets can't contain any other file types
assert(len(self.compilers) == 1)
return
def process_link_depends(self, sources, environment):
"""Process the link_depends keyword argument.
This is designed to handle strings, Files, and the output of Custom
Targets. Notably it doesn't handle generator() returned objects, since
adding them as a link depends would inherently cause them to be
generated twice, since the output needs to be passed to the ld_args and
link_depends.
"""
sources = listify(sources)
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
self.link_depends.append(s)
elif isinstance(s, str):
self.link_depends.append(
File.from_source_file(environment.source_dir, self.subdir, s))
elif hasattr(s, 'get_outputs'):
self.link_depends.extend(
[File.from_built_file(s.subdir, p) for p in s.get_outputs()])
else:
raise InvalidArguments(
'Link_depends arguments must be strings, Files, '
'or a Custom Target, or lists thereof.')
def get_original_kwargs(self):
return self.kwargs
def unpack_holder(self, d):
d = listify(d)
newd = []
for i in d:
if isinstance(i, list):
i = self.unpack_holder(i)
elif hasattr(i, 'held_object'):
i = i.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if hasattr(i, t):
setattr(i, t, self.unpack_holder(getattr(i, t)))
newd.append(i)
return newd
def copy_kwargs(self, kwargs):
self.kwargs = copy.copy(kwargs)
# This sucks quite badly. Arguments
# are holders but they can't be pickled
# so unpack those known.
for k, v in self.kwargs.items():
if isinstance(v, list):
self.kwargs[k] = self.unpack_holder(v)
if hasattr(v, 'held_object'):
self.kwargs[k] = v.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if t in self.kwargs:
self.kwargs[t] = self.unpack_holder(self.kwargs[t])
def extract_objects(self, srclist):
obj_src = []
for src in srclist:
if not isinstance(src, str):
raise MesonException('Object extraction arguments must be strings.')
src = File(False, self.subdir, src)
# FIXME: It could be a generated source
if src not in self.sources:
raise MesonException('Tried to extract unknown source %s.' % src)
obj_src.append(src)
return ExtractedObjects(self, obj_src)
def extract_all_objects(self, recursive=True):
return ExtractedObjects(self, self.sources, self.generated, self.objects,
recursive)
def get_all_link_deps(self):
return self.get_transitive_link_deps()
@lru_cache(maxsize=None)
def get_transitive_link_deps(self):
result = []
for i in self.link_targets:
result += i.get_all_link_deps()
return result
def get_link_deps_mapping(self, prefix, environment):
return self.get_transitive_link_deps_mapping(prefix, environment)
@lru_cache(maxsize=None)
def get_transitive_link_deps_mapping(self, prefix, environment):
result = {}
for i in self.link_targets:
mapping = i.get_link_deps_mapping(prefix, environment)
#we are merging two dictionaries, while keeping the earlier one dominant
result_tmp = mapping.copy()
result_tmp.update(result)
result = result_tmp
return result
@lru_cache(maxsize=None)
def get_link_dep_subdirs(self):
result = OrderedSet()
for i in self.link_targets:
result.add(i.get_subdir())
result.update(i.get_link_dep_subdirs())
return result
def get_default_install_dir(self, environment):
return environment.get_libdir()
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs)
self.copy_kwargs(kwargs)
kwargs.get('modules', [])
self.need_install = kwargs.get('install', self.need_install)
llist = extract_as_list(kwargs, 'link_with')
for linktarget in llist:
# Sorry for this hack. Keyword targets are kept in holders
# in kwargs. Unpack here without looking at the exact type.
if hasattr(linktarget, "held_object"):
linktarget = linktarget.held_object
if isinstance(linktarget, dependencies.ExternalLibrary):
raise MesonException('''An external library was used in link_with keyword argument, which
is reserved for libraries built as part of this project. External
libraries must be passed using the dependencies keyword argument
instead, because they are conceptually "external dependencies",
just like those detected with the dependency() function.''')
self.link(linktarget)
lwhole = extract_as_list(kwargs, 'link_whole')
for linktarget in lwhole:
self.link_whole(linktarget)
c_pchlist, cpp_pchlist, clist, cpplist, cslist, valalist, objclist, objcpplist, fortranlist, rustlist \
= extract_as_list(kwargs, 'c_pch', 'cpp_pch', 'c_args', 'cpp_args', 'cs_args', 'vala_args', 'objc_args',
'objcpp_args', 'fortran_args', 'rust_args')
self.add_pch('c', c_pchlist)
self.add_pch('cpp', cpp_pchlist)
compiler_args = {'c': clist, 'cpp': cpplist, 'cs': cslist, 'vala': valalist, 'objc': objclist, 'objcpp': objcpplist,
'fortran': fortranlist, 'rust': rustlist
}
for key, value in compiler_args.items():
self.add_compiler_args(key, value)
if not isinstance(self, Executable) or 'export_dynamic' in kwargs:
self.vala_header = kwargs.get('vala_header', self.name + '.h')
self.vala_vapi = kwargs.get('vala_vapi', self.name + '.vapi')
self.vala_gir = kwargs.get('vala_gir', None)
dlist = stringlistify(kwargs.get('d_args', []))
self.add_compiler_args('d', dlist)
dfeatures = dict()
dfeature_unittest = kwargs.get('d_unittest', False)
if dfeature_unittest:
dfeatures['unittest'] = dfeature_unittest
dfeature_versions = kwargs.get('d_module_versions', [])
if dfeature_versions:
dfeatures['versions'] = dfeature_versions
dfeature_debug = kwargs.get('d_debug', [])
if dfeature_debug:
dfeatures['debug'] = dfeature_debug
if 'd_import_dirs' in kwargs:
dfeature_import_dirs = extract_as_list(kwargs, 'd_import_dirs', unholder=True)
for d in dfeature_import_dirs:
if not isinstance(d, IncludeDirs):
raise InvalidArguments('Arguments to d_import_dirs must be include_directories.')
dfeatures['import_dirs'] = dfeature_import_dirs
if dfeatures:
self.d_features = dfeatures
self.link_args = extract_as_list(kwargs, 'link_args')
for i in self.link_args:
if not isinstance(i, str):
raise InvalidArguments('Link_args arguments must be strings.')
for l in self.link_args:
if '-Wl,-rpath' in l or l.startswith('-rpath'):
mlog.warning('''Please do not define rpath with a linker argument, use install_rpath or build_rpath properties instead.
This will become a hard error in a future Meson release.''')
self.process_link_depends(kwargs.get('link_depends', []), environment)
# Target-specific include dirs must be added BEFORE include dirs from
# internal deps (added inside self.add_deps()) to override them.
inclist = extract_as_list(kwargs, 'include_directories')
self.add_include_dirs(inclist)
# Add dependencies (which also have include_directories)
deplist = extract_as_list(kwargs, 'dependencies')
self.add_deps(deplist)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs.get('install_dir', [None]),
(str, bool))
self.install_mode = kwargs.get('install_mode', None)
main_class = kwargs.get('main_class', '')
if not isinstance(main_class, str):
raise InvalidArguments('Main class must be a string')
self.main_class = main_class
if isinstance(self, Executable):
self.gui_app = kwargs.get('gui_app', False)
if not isinstance(self.gui_app, bool):
raise InvalidArguments('Argument gui_app must be boolean.')
elif 'gui_app' in kwargs:
raise InvalidArguments('Argument gui_app can only be used on executables.')
extra_files = extract_as_list(kwargs, 'extra_files')
for i in extra_files:
assert(isinstance(i, File))
trial = os.path.join(environment.get_source_dir(), i.subdir, i.fname)
if not(os.path.isfile(trial)):
raise InvalidArguments('Tried to add non-existing extra file %s.' % i)
self.extra_files = extra_files
self.install_rpath = kwargs.get('install_rpath', '')
if not isinstance(self.install_rpath, str):
raise InvalidArguments('Install_rpath is not a string.')
self.build_rpath = kwargs.get('build_rpath', '')
if not isinstance(self.build_rpath, str):
raise InvalidArguments('Build_rpath is not a string.')
resources = extract_as_list(kwargs, 'resources')
for r in resources:
if not isinstance(r, str):
raise InvalidArguments('Resource argument is not a string.')
trial = os.path.join(environment.get_source_dir(), self.subdir, r)
if not os.path.isfile(trial):
raise InvalidArguments('Tried to add non-existing resource %s.' % r)
self.resources = resources
if 'name_prefix' in kwargs:
name_prefix = kwargs['name_prefix']
if isinstance(name_prefix, list):
if name_prefix:
raise InvalidArguments('name_prefix array must be empty to signify null.')
elif not isinstance(name_prefix, str):
raise InvalidArguments('name_prefix must be a string.')
self.prefix = name_prefix
self.name_prefix_set = True
if 'name_suffix' in kwargs:
name_suffix = kwargs['name_suffix']
if isinstance(name_suffix, list):
if name_suffix:
raise InvalidArguments('name_suffix array must be empty to signify null.')
else:
if not isinstance(name_suffix, str):
raise InvalidArguments('name_suffix must be a string.')
if name_suffix == '':
raise InvalidArguments('name_suffix should not be an empty string. '
'If you want meson to use the default behaviour '
'for each platform pass `[]` (empty array)')
self.suffix = name_suffix
self.name_suffix_set = True
if isinstance(self, StaticLibrary):
# You can't disable PIC on OS X. The compiler ignores -fno-PIC.
# PIC is always on for Windows (all code is position-independent
# since library loading is done differently)
if for_darwin(self.is_cross, self.environment) or for_windows(self.is_cross, self.environment):
self.pic = True
else:
self.pic = self._extract_pic_pie(kwargs, 'pic')
if isinstance(self, Executable):
# Executables must be PIE on Android
if for_android(self.is_cross, self.environment):
self.pie = True
else:
self.pie = self._extract_pic_pie(kwargs, 'pie')
self.implicit_include_directories = kwargs.get('implicit_include_directories', True)
if not isinstance(self.implicit_include_directories, bool):
raise InvalidArguments('Implicit_include_directories must be a boolean.')
self.gnu_symbol_visibility = kwargs.get('gnu_symbol_visibility', '')
if not isinstance(self.gnu_symbol_visibility, str):
raise InvalidArguments('GNU symbol visibility must be a string.')
if self.gnu_symbol_visibility != '':
permitted = ['default', 'internal', 'hidden', 'protected', 'inlineshidden']
if self.gnu_symbol_visibility not in permitted:
raise InvalidArguments('GNU symbol visibility arg %s not one of: %s',
self.symbol_visibility, ', '.join(permitted))
def _extract_pic_pie(self, kwargs, arg):
# Check if we have -fPIC, -fpic, -fPIE, or -fpie in cflags
all_flags = self.extra_args['c'] + self.extra_args['cpp']
if '-f' + arg.lower() in all_flags or '-f' + arg.upper() in all_flags:
mlog.warning("Use the '{}' kwarg instead of passing '{}' manually to {!r}".format(arg, '-f' + arg, self.name))
return True
val = kwargs.get(arg, False)
if not isinstance(val, bool):
raise InvalidArguments('Argument {} to {!r} must be boolean'.format(arg, self.name))
return val
def get_filename(self):
return self.filename
def get_outputs(self):
return self.outputs
def get_extra_args(self, language):
return self.extra_args.get(language, [])
def get_dependencies(self, exclude=None, internal=True):
transitive_deps = []
if exclude is None:
exclude = []
if internal:
link_targets = itertools.chain(self.link_targets, self.link_whole_targets)
else:
# We don't want the 'internal' libraries when generating the
# `Libs:` and `Libs.private:` lists in pkg-config files.
link_targets = self.link_targets
for t in link_targets:
if t in transitive_deps or t in exclude:
continue
transitive_deps.append(t)
if isinstance(t, StaticLibrary):
transitive_deps += t.get_dependencies(transitive_deps + exclude, internal)
return transitive_deps
def get_source_subdir(self):
return self.subdir
def get_sources(self):
return self.sources
def get_objects(self):
return self.objects
def get_generated_sources(self):
return self.generated
def should_install(self):
return self.need_install
def has_pch(self):
return len(self.pch) > 0
def get_pch(self, language):
try:
return self.pch[language]
except KeyError:
return[]
def get_include_dirs(self):
return self.include_dirs
def add_deps(self, deps):
deps = listify(deps)
for dep in deps:
if hasattr(dep, 'held_object'):
dep = dep.held_object
if isinstance(dep, dependencies.InternalDependency):
# Those parts that are internal.
self.process_sourcelist(dep.sources)
self.add_include_dirs(dep.include_directories)
for l in dep.libraries:
self.link(l)
for l in dep.whole_libraries:
self.link_whole(l)
if dep.compile_args or dep.link_args:
# Those parts that are external.
extpart = dependencies.InternalDependency('undefined',
[],
dep.compile_args,
dep.link_args,
[], [], [], [])
self.external_deps.append(extpart)
# Deps of deps.
self.add_deps(dep.ext_deps)
elif isinstance(dep, dependencies.Dependency):
self.external_deps.append(dep)
self.process_sourcelist(dep.get_sources())
elif isinstance(dep, BuildTarget):
raise InvalidArguments('''Tried to use a build target as a dependency.
You probably should put it in link_with instead.''')
else:
# This is a bit of a hack. We do not want Build to know anything
# about the interpreter so we can't import it and use isinstance.
# This should be reliable enough.
if hasattr(dep, 'project_args_frozen') or hasattr(dep, 'global_args_frozen'):
raise InvalidArguments('Tried to use subproject object as a dependency.\n'
'You probably wanted to use a dependency declared in it instead.\n'
'Access it by calling get_variable() on the subproject object.')
raise InvalidArguments('Argument is of an unacceptable type {!r}.\nMust be '
'either an external dependency (returned by find_library() or '
'dependency()) or an internal dependency (returned by '
'declare_dependency()).'.format(type(dep).__name__))
def get_external_deps(self):
return self.external_deps
def link(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, Target):
raise InvalidArguments('{!r} is not a target.'.format(t))
if not t.is_linkable_target():
raise InvalidArguments('Link target {!r} is not linkable.'.format(t))
if isinstance(self, SharedLibrary) and isinstance(t, StaticLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_targets.append(t)
def link_whole(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, StaticLibrary):
raise InvalidArguments('{!r} is not a static library.'.format(t))
if isinstance(self, SharedLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_whole_targets.append(t)
def add_pch(self, language, pchlist):
if not pchlist:
return
elif len(pchlist) == 1:
if not environment.is_header(pchlist[0]):
raise InvalidArguments('PCH argument %s is not a header.' % pchlist[0])
elif len(pchlist) == 2:
if environment.is_header(pchlist[0]):
if not environment.is_source(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
elif environment.is_source(pchlist[0]):
if not environment.is_header(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
pchlist = [pchlist[1], pchlist[0]]
else:
raise InvalidArguments('PCH argument %s is of unknown type.' % pchlist[0])
if (os.path.dirname(pchlist[0]) != os.path.dirname(pchlist[1])):
raise InvalidArguments('PCH files must be stored in the same folder.')
elif len(pchlist) > 2:
raise InvalidArguments('PCH definition may have a maximum of 2 files.')
for f in pchlist:
if not os.path.isfile(os.path.join(self.environment.source_dir, self.subdir, f)):
raise MesonException('File %s does not exist.' % f)
self.pch[language] = pchlist
def add_include_dirs(self, args):
ids = []
for a in args:
# FIXME same hack, forcibly unpack from holder.
if hasattr(a, 'held_object'):
a = a.held_object
if not isinstance(a, IncludeDirs):
raise InvalidArguments('Include directory to be added is not an include directory object.')
ids.append(a)
self.include_dirs += ids
def add_compiler_args(self, language, args):
args = listify(args)
for a in args:
if not isinstance(a, (str, File)):
raise InvalidArguments('A non-string passed to compiler args.')
if language in self.extra_args:
self.extra_args[language] += args
else:
self.extra_args[language] = args
def get_aliases(self):
return {}
def get_langs_used_by_deps(self):
'''
Sometimes you want to link to a C++ library that exports C API, which
means the linker must link in the C++ stdlib, and we must use a C++
compiler for linking. The same is also applicable for objc/objc++, etc,
so we can keep using clink_langs for the priority order.
See: https://github.com/mesonbuild/meson/issues/1653
'''
langs = []
# Check if any of the external libraries were written in this language
for dep in self.external_deps:
if dep.language is None:
continue
if dep.language not in langs:
langs.append(dep.language)
# Check if any of the internal libraries this target links to were
# written in this language
for link_target in itertools.chain(self.link_targets, self.link_whole_targets):
for language in link_target.compilers:
if language not in langs:
langs.append(language)
return langs
def get_clink_dynamic_linker_and_stdlibs(self):
'''
We use the order of languages in `clink_langs` to determine which
linker to use in case the target has sources compiled with multiple
compilers. All languages other than those in this list have their own
linker.
Note that Vala outputs C code, so Vala sources can use any linker
that can link compiled C. We don't actually need to add an exception
for Vala here because of that.
'''
# Populate list of all compilers, not just those being used to compile
# sources in this target
if self.is_cross:
all_compilers = self.environment.coredata.cross_compilers
else:
all_compilers = self.environment.coredata.compilers
# Languages used by dependencies
dep_langs = self.get_langs_used_by_deps()
# Pick a compiler based on the language priority-order
for l in clink_langs:
if l in self.compilers or l in dep_langs:
try:
linker = all_compilers[l]
except KeyError:
raise MesonException(
'Could not get a dynamic linker for build target {!r}. '
'Requires a linker for language "{}", but that is not '
'a project language.'.format(self.name, l))
stdlib_args = []
added_languages = set()
for dl in itertools.chain(self.compilers, dep_langs):
if dl != linker.language:
stdlib_args += all_compilers[dl].language_stdlib_only_link_flags()
added_languages.add(dl)
return linker, stdlib_args
m = 'Could not get a dynamic linker for build target {!r}'
raise AssertionError(m.format(self.name))
def get_using_msvc(self):
'''
Check if the dynamic linker is MSVC. Used by Executable, StaticLibrary,
and SharedLibrary for deciding when to use MSVC-specific file naming
and debug filenames.
If at least some code is built with MSVC and the final library is
linked with MSVC, we can be sure that some debug info will be
generated. We only check the dynamic linker here because the static
linker is guaranteed to be of the same type.
Interesting cases:
1. The Vala compiler outputs C code to be compiled by whatever
C compiler we're using, so all objects will still be created by the
MSVC compiler.
2. If the target contains only objects, process_compilers guesses and
picks the first compiler that smells right.
'''
linker, _ = self.get_clink_dynamic_linker_and_stdlibs()
# Mixing many languages with MSVC is not supported yet so ignore stdlibs.
if linker and linker.get_id() in ['msvc', 'clang-cl', 'llvm', 'dmd']:
return True
return False
def check_module_linking(self):
'''
Warn if shared modules are linked with target: (link_with) #2865
'''
for link_target in self.link_targets:
if isinstance(link_target, SharedModule):
if for_darwin(self.is_cross, self.environment):
raise MesonException('''target links against shared modules.
This is not permitted on OSX''')
else:
mlog.warning('''target links against shared modules. This is not
recommended as it is not supported on some platforms''')
return
class Generator:
def __init__(self, args, kwargs):
if len(args) != 1:
raise InvalidArguments('Generator requires exactly one positional argument: the executable')
exe = args[0]
if hasattr(exe, 'held_object'):
exe = exe.held_object
if not isinstance(exe, (Executable, dependencies.ExternalProgram)):
raise InvalidArguments('First generator argument must be an executable.')
self.exe = exe
self.depfile = None
self.capture = False
self.process_kwargs(kwargs)
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.exe)
def get_exe(self):
return self.exe
def process_kwargs(self, kwargs):
if 'arguments' not in kwargs:
raise InvalidArguments('Generator must have "arguments" keyword argument.')
args = kwargs['arguments']
if isinstance(args, str):
args = [args]
if not isinstance(args, list):
raise InvalidArguments('"Arguments" keyword argument must be a string or a list of strings.')
for a in args:
if not isinstance(a, str):
raise InvalidArguments('A non-string object in "arguments" keyword argument.')
self.arglist = args
if 'output' not in kwargs:
raise InvalidArguments('Generator must have "output" keyword argument.')
outputs = listify(kwargs['output'])
for rule in outputs:
if not isinstance(rule, str):
raise InvalidArguments('"output" may only contain strings.')
if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule:
raise InvalidArguments('Every element of "output" must contain @BASENAME@ or @PLAINNAME@.')
if has_path_sep(rule):
raise InvalidArguments('"outputs" must not contain a directory separator.')
if len(outputs) > 1:
for o in outputs:
if '@OUTPUT@' in o:
raise InvalidArguments('Tried to use @OUTPUT@ in a rule with more than one output.')
self.outputs = outputs
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
if 'capture' in kwargs:
capture = kwargs['capture']
if not isinstance(capture, bool):
raise InvalidArguments('Capture must be boolean.')
self.capture = capture
def get_base_outnames(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
bases = [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.outputs]
return bases
def get_dep_outname(self, inname):
if self.depfile is None:
raise InvalidArguments('Tried to get dep name for rule that does not have dependency file defined.')
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
def get_arglist(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.arglist]
def is_parent_path(self, parent, trial):
relpath = pathlib.PurePath(trial).relative_to(parent)
return relpath.parts[0] != '..' # For subdirs we can only go "down".
def process_files(self, name, files, state, preserve_path_from=None, extra_args=[]):
output = GeneratedList(self, state.subdir, preserve_path_from, extra_args=extra_args)
for f in files:
if isinstance(f, str):
f = File.from_source_file(state.environment.source_dir, state.subdir, f)
elif not isinstance(f, File):
raise InvalidArguments('{} arguments must be strings or files not {!r}.'.format(name, f))
if preserve_path_from:
abs_f = f.absolute_path(state.environment.source_dir, state.environment.build_dir)
if not self.is_parent_path(preserve_path_from, abs_f):
raise InvalidArguments('When using preserve_path_from, all input files must be in a subdirectory of the given dir.')
output.add_file(f, state)
return output
class GeneratedList:
def __init__(self, generator, subdir, preserve_path_from=None, extra_args=[]):
if hasattr(generator, 'held_object'):
generator = generator.held_object
self.generator = generator
self.name = self.generator.exe
self.subdir = subdir
self.infilelist = []
self.outfilelist = []
self.outmap = {}
self.extra_depends = []
self.preserve_path_from = preserve_path_from
self.extra_args = extra_args
def add_preserved_path_segment(self, infile, outfiles, state):
result = []
in_abs = infile.absolute_path(state.environment.source_dir, state.environment.build_dir)
assert(os.path.isabs(self.preserve_path_from))
rel = os.path.relpath(in_abs, self.preserve_path_from)
path_segment = os.path.dirname(rel)
for of in outfiles:
result.append(os.path.join(path_segment, of))
return result
def add_file(self, newfile, state):
self.infilelist.append(newfile)
outfiles = self.generator.get_base_outnames(newfile.fname)
if self.preserve_path_from:
outfiles = self.add_preserved_path_segment(newfile, outfiles, state)
self.outfilelist += outfiles
self.outmap[newfile] = outfiles
def get_inputs(self):
return self.infilelist
def get_outputs(self):
return self.outfilelist
def get_outputs_for(self, filename):
return self.outmap[filename]
def get_generator(self):
return self.generator
def get_extra_args(self):
return self.extra_args
class Executable(BuildTarget):
known_kwargs = known_exe_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'executable'
if 'pie' not in kwargs and 'b_pie' in environment.coredata.base_options:
kwargs['pie'] = environment.coredata.base_options['b_pie'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
# Unless overridden, executables have no suffix or prefix. Except on
# Windows and with C#/Mono executables where the suffix is 'exe'
if not hasattr(self, 'prefix'):
self.prefix = ''
if not hasattr(self, 'suffix'):
# Executable for Windows or C#/Mono
if (for_windows(is_cross, environment) or
for_cygwin(is_cross, environment) or 'cs' in self.compilers):
self.suffix = 'exe'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('arm') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('arm')):
self.suffix = 'axf'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('ccrx') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('ccrx')):
self.suffix = 'abs'
else:
self.suffix = ''
self.filename = self.name
if self.suffix:
self.filename += '.' + self.suffix
self.outputs = [self.filename]
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
# Check for export_dynamic
self.export_dynamic = False
if kwargs.get('export_dynamic'):
if not isinstance(kwargs['export_dynamic'], bool):
raise InvalidArguments('"export_dynamic" keyword argument must be a boolean')
self.export_dynamic = True
if kwargs.get('implib'):
self.export_dynamic = True
if self.export_dynamic and kwargs.get('implib') is False:
raise InvalidArguments('"implib" keyword argument must not be false for if "export_dynamic" is true')
# If using export_dynamic, set the import library name
if self.export_dynamic:
implib_basename = self.name + '.exe'
if not isinstance(kwargs.get('implib', False), bool):
implib_basename = kwargs['implib']
if for_windows(is_cross, environment) or for_cygwin(is_cross, environment):
self.vs_import_filename = '{0}.lib'.format(implib_basename)
self.gcc_import_filename = 'lib{0}.a'.format(implib_basename)
if self.get_using_msvc():
self.import_filename = self.vs_import_filename
else:
self.import_filename = self.gcc_import_filename
# Only linkwithable if using export_dynamic
self.is_linkwithable = self.export_dynamic
def get_default_install_dir(self, environment):
return environment.get_bindir()
def description(self):
'''Human friendly description of the executable'''
return self.name
def type_suffix(self):
return "@exe"
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def is_linkable_target(self):
return self.is_linkwithable
class StaticLibrary(BuildTarget):
known_kwargs = known_stlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'static library'
if 'pic' not in kwargs and 'b_staticpic' in environment.coredata.base_options:
kwargs['pic'] = environment.coredata.base_options['b_staticpic'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'cs' in self.compilers:
raise InvalidArguments('Static libraries not supported for C#.')
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use rlib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust static library target crate type to rlib')
self.rust_crate_type = 'rlib'
# Don't let configuration proceed with a non-static crate type
elif self.rust_crate_type not in ['rlib', 'staticlib']:
raise InvalidArguments('Crate type "{0}" invalid for static libraries; must be "rlib" or "staticlib"'.format(self.rust_crate_type))
# By default a static library is named libfoo.a even on Windows because
# MSVC does not have a consistent convention for what static libraries
# are called. The MSVC CRT uses libfoo.lib syntax but nothing else uses
# it and GCC only looks for static libraries called foo.lib and
# libfoo.a. However, we cannot use foo.lib because that's the same as
# the import library. Using libfoo.a is ok because people using MSVC
# always pass the library filename while linking anyway.
if not hasattr(self, 'prefix'):
self.prefix = 'lib'
if not hasattr(self, 'suffix'):
if 'rust' in self.compilers:
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'rlib':
# default Rust static library suffix
self.suffix = 'rlib'
elif self.rust_crate_type == 'staticlib':
self.suffix = 'a'
else:
self.suffix = 'a'
self.filename = self.prefix + self.name + '.' + self.suffix
self.outputs = [self.filename]
def get_link_deps_mapping(self, prefix, environment):
return {}
def get_default_install_dir(self, environment):
return environment.get_static_lib_dir()
def type_suffix(self):
return "@sta"
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def is_linkable_target(self):
return True
class SharedLibrary(BuildTarget):
known_kwargs = known_shlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'shared library'
self.soversion = None
self.ltversion = None
# Max length 2, first element is compatibility_version, second is current_version
self.darwin_versions = []
self.vs_module_defs = None
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use dylib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust dynamic library target crate type to "dylib"')
self.rust_crate_type = 'dylib'
# Don't let configuration proceed with a non-dynamic crate type
elif self.rust_crate_type not in ['dylib', 'cdylib']:
raise InvalidArguments('Crate type "{0}" invalid for dynamic libraries; must be "dylib" or "cdylib"'.format(self.rust_crate_type))
if not hasattr(self, 'prefix'):
self.prefix = None
if not hasattr(self, 'suffix'):
self.suffix = None
self.basic_filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
self.determine_filenames(is_cross, environment)
def get_link_deps_mapping(self, prefix, environment):
result = {}
mappings = self.get_transitive_link_deps_mapping(prefix, environment)
old = get_target_macos_dylib_install_name(self)
if old not in mappings:
fname = self.get_filename()
outdirs, _ = self.get_install_dir(self.environment)
new = os.path.join(prefix, outdirs[0], fname)
result.update({old: new})
mappings.update(result)
return mappings
def get_default_install_dir(self, environment):
return environment.get_shared_lib_dir()
def determine_filenames(self, is_cross, env):
"""
See https://github.com/mesonbuild/meson/pull/417 for details.
First we determine the filename template (self.filename_tpl), then we
set the output filename (self.filename).
The template is needed while creating aliases (self.get_aliases),
which are needed while generating .so shared libraries for Linux.
Besides this, there's also the import library name, which is only used
on Windows since on that platform the linker uses a separate library
called the "import library" during linking instead of the shared
library (DLL). The toolchain will output an import library in one of
two formats: GCC or Visual Studio.
When we're building with Visual Studio, the import library that will be
generated by the toolchain is self.vs_import_filename, and with
MinGW/GCC, it's self.gcc_import_filename. self.import_filename will
always contain the import library name this target will generate.
"""
prefix = ''
suffix = ''
self.filename_tpl = self.basic_filename_tpl
# NOTE: manual prefix/suffix override is currently only tested for C/C++
# C# and Mono
if 'cs' in self.compilers:
prefix = ''
suffix = 'dll'
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
# C, C++, Swift, Vala
# Only Windows uses a separate import library for linking
# For all other targets/platforms import_filename stays None
elif for_windows(is_cross, env):
suffix = 'dll'
self.vs_import_filename = '{0}{1}.lib'.format(self.prefix if self.prefix is not None else '', self.name)
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
if self.get_using_msvc():
# Shared library is of the form foo.dll
prefix = ''
# Import library is called foo.lib
self.import_filename = self.vs_import_filename
# Assume GCC-compatible naming
else:
# Shared library is of the form libfoo.dll
prefix = 'lib'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
# Shared library has the soversion if it is defined
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_cygwin(is_cross, env):
suffix = 'dll'
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
# Shared library is of the form cygfoo.dll
# (ld --dll-search-prefix=cyg is the default)
prefix = 'cyg'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_darwin(is_cross, env):
prefix = 'lib'
suffix = 'dylib'
# On macOS, the filename can only contain the major version
if self.soversion:
# libfoo.X.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.soversion}.{0.suffix}'
else:
# libfoo.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_android(is_cross, env):
prefix = 'lib'
suffix = 'so'
# Android doesn't support shared_library versioning
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
else:
prefix = 'lib'
suffix = 'so'
if self.ltversion:
# libfoo.so.X[.Y[.Z]] (.Y and .Z are optional)
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.ltversion}'
elif self.soversion:
# libfoo.so.X
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.soversion}'
else:
# No versioning, libfoo.so
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
if self.prefix is None:
self.prefix = prefix
if self.suffix is None:
self.suffix = suffix
self.filename = self.filename_tpl.format(self)
self.outputs = [self.filename]
@staticmethod
def _validate_darwin_versions(darwin_versions):
try:
if isinstance(darwin_versions, int):
darwin_versions = str(darwin_versions)
if isinstance(darwin_versions, str):
darwin_versions = 2 * [darwin_versions]
if not isinstance(darwin_versions, list):
raise InvalidArguments('Shared library darwin_versions: must be a string, integer,'
'or a list, not {!r}'.format(darwin_versions))
if len(darwin_versions) > 2:
raise InvalidArguments('Shared library darwin_versions: list must contain 2 or fewer elements')
if len(darwin_versions) == 1:
darwin_versions = 2 * darwin_versions
for i, v in enumerate(darwin_versions[:]):
if isinstance(v, int):
v = str(v)
if not isinstance(v, str):
raise InvalidArguments('Shared library darwin_versions: list elements '
'must be strings or integers, not {!r}'.format(v))
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', v):
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z where '
'X, Y, Z are numbers, and Y and Z are optional')
parts = v.split('.')
if len(parts) in (1, 2, 3) and int(parts[0]) > 65535:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where X is [0, 65535] and Y, Z are optional')
if len(parts) in (2, 3) and int(parts[1]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Y is [0, 255] and Y, Z are optional')
if len(parts) == 3 and int(parts[2]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Z is [0, 255] and Y, Z are optional')
darwin_versions[i] = v
except ValueError:
raise InvalidArguments('Shared library darwin_versions: value is invalid')
return darwin_versions
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if not for_android(self.is_cross, self.environment):
supports_versioning = True
else:
supports_versioning = False
if supports_versioning:
# Shared library version
if 'version' in kwargs:
self.ltversion = kwargs['version']
if not isinstance(self.ltversion, str):
raise InvalidArguments('Shared library version needs to be a string, not ' + type(self.ltversion).__name__)
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', self.ltversion):
raise InvalidArguments('Invalid Shared library version "{0}". Must be of the form X.Y.Z where all three are numbers. Y and Z are optional.'.format(self.ltversion))
# Try to extract/deduce the soversion
if 'soversion' in kwargs:
self.soversion = kwargs['soversion']
if isinstance(self.soversion, int):
self.soversion = str(self.soversion)
if not isinstance(self.soversion, str):
raise InvalidArguments('Shared library soversion is not a string or integer.')
elif self.ltversion:
# library version is defined, get the soversion from that
# We replicate what Autotools does here and take the first
# number of the version by default.
self.soversion = self.ltversion.split('.')[0]
# macOS and iOS dylib compatibility_version and current_version
if 'darwin_versions' in kwargs:
self.darwin_versions = self._validate_darwin_versions(kwargs['darwin_versions'])
elif self.soversion:
# If unspecified, pick the soversion
self.darwin_versions = 2 * [self.soversion]
# Visual Studio module-definitions file
if 'vs_module_defs' in kwargs:
path = kwargs['vs_module_defs']
if hasattr(path, 'held_object'):
path = path.held_object
if isinstance(path, str):
if os.path.isabs(path):
self.vs_module_defs = File.from_absolute_file(path)
else:
self.vs_module_defs = File.from_source_file(environment.source_dir, self.subdir, path)
self.link_depends.append(self.vs_module_defs)
elif isinstance(path, File):
# When passing a generated file.
self.vs_module_defs = path
self.link_depends.append(path)
elif hasattr(path, 'get_filename'):
# When passing output of a Custom Target
path = File.from_built_file(path.subdir, path.get_filename())
self.vs_module_defs = path
self.link_depends.append(path)
else:
raise InvalidArguments(
'Shared library vs_module_defs must be either a string, '
'a file object or a Custom Target')
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def get_all_link_deps(self):
return [self] + self.get_transitive_link_deps()
def get_aliases(self):
"""
If the versioned library name is libfoo.so.0.100.0, aliases are:
* libfoo.so.0 (soversion) -> libfoo.so.0.100.0
* libfoo.so (unversioned; for linking) -> libfoo.so.0
Same for dylib:
* libfoo.dylib (unversioned; for linking) -> libfoo.0.dylib
"""
aliases = {}
# Aliases are only useful with .so and .dylib libraries. Also if
# there's no self.soversion (no versioning), we don't need aliases.
if self.suffix not in ('so', 'dylib') or not self.soversion:
return {}
# With .so libraries, the minor and micro versions are also in the
# filename. If ltversion != soversion we create an soversion alias:
# libfoo.so.0 -> libfoo.so.0.100.0
# Where libfoo.so.0.100.0 is the actual library
if self.suffix == 'so' and self.ltversion and self.ltversion != self.soversion:
alias_tpl = self.filename_tpl.replace('ltversion', 'soversion')
ltversion_filename = alias_tpl.format(self)
aliases[ltversion_filename] = self.filename
# libfoo.so.0/libfoo.0.dylib is the actual library
else:
ltversion_filename = self.filename
# Unversioned alias:
# libfoo.so -> libfoo.so.0
# libfoo.dylib -> libfoo.0.dylib
aliases[self.basic_filename_tpl.format(self)] = ltversion_filename
return aliases
def type_suffix(self):
return "@sha"
def is_linkable_target(self):
return True
# A shared library that is meant to be used with dlopen rather than linking
# into something else.
class SharedModule(SharedLibrary):
known_kwargs = known_shmod_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
if 'version' in kwargs:
raise MesonException('Shared modules must not specify the version kwarg.')
if 'soversion' in kwargs:
raise MesonException('Shared modules must not specify the soversion kwarg.')
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
self.typename = 'shared module'
def get_default_install_dir(self, environment):
return environment.get_shared_module_dir()
class CustomTarget(Target):
known_kwargs = set([
'input',
'output',
'command',
'capture',
'install',
'install_dir',
'install_mode',
'build_always',
'build_always_stale',
'depends',
'depend_files',
'depfile',
'build_by_default',
'override_options',
'console',
])
def __init__(self, name, subdir, subproject, kwargs, absolute_paths=False):
self.typename = 'custom'
super().__init__(name, subdir, subproject, False)
self.dependencies = []
self.extra_depends = []
self.depend_files = [] # Files that this target depends on but are not on the command line.
self.depfile = None
self.process_kwargs(kwargs)
self.extra_files = []
# Whether to use absolute paths for all files on the commandline
self.absolute_paths = absolute_paths
unknowns = []
for k in kwargs:
if k not in CustomTarget.known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword arguments in target %s: %s' %
(self.name, ', '.join(unknowns)))
def get_default_install_dir(self, environment):
return None
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_id(self):
return self.name + self.type_suffix()
def get_target_dependencies(self):
deps = self.dependencies[:]
deps += self.extra_depends
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, (BuildTarget, CustomTarget)):
deps.append(c)
return deps
def get_transitive_build_target_deps(self):
'''
Recursively fetch the build targets that this custom target depends on,
whether through `command:`, `depends:`, or `sources:` The recursion is
only performed on custom targets.
This is useful for setting PATH on Windows for finding required DLLs.
F.ex, if you have a python script that loads a C module that links to
other DLLs in your project.
'''
bdeps = set()
deps = self.get_target_dependencies()
for d in deps:
if isinstance(d, BuildTarget):
bdeps.add(d)
elif isinstance(d, CustomTarget):
bdeps.update(d.get_transitive_build_target_deps())
return bdeps
def flatten_command(self, cmd):
cmd = listify(cmd, unholder=True)
final_cmd = []
for c in cmd:
if isinstance(c, str):
final_cmd.append(c)
elif isinstance(c, File):
self.depend_files.append(c)
final_cmd.append(c)
elif isinstance(c, dependencies.ExternalProgram):
if not c.found():
m = 'Tried to use not-found external program {!r} in "command"'
raise InvalidArguments(m.format(c.name))
path = c.get_path()
if os.path.isabs(path):
# Can only add a dependency on an external program which we
# know the absolute path of
self.depend_files.append(File.from_absolute_file(path))
final_cmd += c.get_command()
elif isinstance(c, (BuildTarget, CustomTarget)):
self.dependencies.append(c)
final_cmd.append(c)
elif isinstance(c, list):
final_cmd += self.flatten_command(c)
else:
raise InvalidArguments('Argument {!r} in "command" is invalid'.format(c))
return final_cmd
def process_kwargs(self, kwargs):
super().process_kwargs(kwargs)
self.sources = extract_as_list(kwargs, 'input', unholder=True)
if 'output' not in kwargs:
raise InvalidArguments('Missing keyword argument "output".')
self.outputs = listify(kwargs['output'])
# This will substitute values from the input into output and return it.
inputs = get_sources_string_names(self.sources)
values = get_filenames_templates_dict(inputs, [])
for i in self.outputs:
if not(isinstance(i, str)):
raise InvalidArguments('Output argument not a string.')
if i == '':
raise InvalidArguments('Output must not be empty.')
if i.strip() == '':
raise InvalidArguments('Output must not consist only of whitespace.')
if has_path_sep(i):
raise InvalidArguments('Output {!r} must not contain a path segment.'.format(i))
if '@INPUT@' in i or '@INPUT0@' in i:
m = 'Output cannot contain @INPUT@ or @INPUT0@, did you ' \
'mean @PLAINNAME@ or @BASENAME@?'
raise InvalidArguments(m)
# We already check this during substitution, but the error message
# will be unclear/confusing, so check it here.
if len(inputs) != 1 and ('@PLAINNAME@' in i or '@BASENAME@' in i):
m = "Output cannot contain @PLAINNAME@ or @BASENAME@ when " \
"there is more than one input (we can't know which to use)"
raise InvalidArguments(m)
self.outputs = substitute_values(self.outputs, values)
self.capture = kwargs.get('capture', False)
if self.capture and len(self.outputs) != 1:
raise InvalidArguments('Capturing can only output to a single file.')
self.console = kwargs.get('console', False)
if not isinstance(self.console, bool):
raise InvalidArguments('"console" kwarg only accepts booleans')
if self.capture and self.console:
raise InvalidArguments("Can't both capture output and output to console")
if 'command' not in kwargs:
raise InvalidArguments('Missing keyword argument "command".')
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
self.command = self.flatten_command(kwargs['command'])
if self.capture:
for c in self.command:
if isinstance(c, str) and '@OUTPUT@' in c:
raise InvalidArguments('@OUTPUT@ is not allowed when capturing output.')
if 'install' in kwargs:
self.install = kwargs['install']
if not isinstance(self.install, bool):
raise InvalidArguments('"install" must be boolean.')
if self.install:
if 'install_dir' not in kwargs:
raise InvalidArguments('"install_dir" must be specified '
'when installing a target')
if isinstance(kwargs['install_dir'], list):
FeatureNew('multiple install_dir for custom_target', '0.40.0').use(self.subproject)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs['install_dir'], (str, bool))
self.install_mode = kwargs.get('install_mode', None)
else:
self.install = False
self.install_dir = [None]
self.install_mode = None
if 'build_always' in kwargs and 'build_always_stale' in kwargs:
raise InvalidArguments('build_always and build_always_stale are mutually exclusive. Combine build_by_default and build_always_stale.')
elif 'build_always' in kwargs:
mlog.deprecation('build_always is deprecated. Combine build_by_default and build_always_stale instead.')
if 'build_by_default' not in kwargs:
self.build_by_default = kwargs['build_always']
self.build_always_stale = kwargs['build_always']
elif 'build_always_stale' in kwargs:
self.build_always_stale = kwargs['build_always_stale']
if not isinstance(self.build_always_stale, bool):
raise InvalidArguments('Argument build_always_stale must be a boolean.')
extra_deps, depend_files = extract_as_list(kwargs, 'depends', 'depend_files', pop = False)
for ed in extra_deps:
while hasattr(ed, 'held_object'):
ed = ed.held_object
if not isinstance(ed, (CustomTarget, BuildTarget)):
raise InvalidArguments('Can only depend on toplevel targets: custom_target or build_target (executable or a library) got: %s(%s)'
% (type(ed), ed))
self.extra_depends.append(ed)
for i in depend_files:
if isinstance(i, (File, str)):
self.depend_files.append(i)
else:
mlog.debug(i)
raise InvalidArguments('Unknown type {!r} in depend_files.'.format(type(i).__name__))
def get_dependencies(self):
return self.dependencies
def should_install(self):
return self.install
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def get_outputs(self):
return self.outputs
def get_filename(self):
return self.outputs[0]
def get_sources(self):
return self.sources
def get_generated_lists(self):
genlists = []
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, GeneratedList):
genlists.append(c)
return genlists
def get_generated_sources(self):
return self.get_generated_lists()
def get_dep_outname(self, infilenames):
if self.depfile is None:
raise InvalidArguments('Tried to get depfile name for custom_target that does not have depfile defined.')
if len(infilenames):
plainname = os.path.basename(infilenames[0])
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
else:
if '@BASENAME@' in self.depfile or '@PLAINNAME@' in self.depfile:
raise InvalidArguments('Substitution in depfile for custom_target that does not have an input file.')
return self.depfile
def type_suffix(self):
return "@cus"
def __getitem__(self, index):
return CustomTargetIndex(self, self.outputs[index])
def __setitem__(self, index, value):
raise NotImplementedError
def __delitem__(self, index):
raise NotImplementedError
class RunTarget(Target):
def __init__(self, name, command, args, dependencies, subdir, subproject):
self.typename = 'run'
super().__init__(name, subdir, subproject, False)
self.command = command
self.args = args
self.dependencies = dependencies
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_dependencies(self):
return self.dependencies
def get_generated_sources(self):
return []
def get_sources(self):
return []
def should_install(self):
return False
def get_filename(self):
return self.name
def get_outputs(self):
if isinstance(self.name, str):
return [self.name]
elif isinstance(self.name, list):
return self.name
else:
raise RuntimeError('RunTarget: self.name is neither a list nor a string. This is a bug')
def type_suffix(self):
return "@run"
class Jar(BuildTarget):
known_kwargs = known_jar_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'jar'
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
for s in self.sources:
if not s.endswith('.java'):
raise InvalidArguments('Jar source %s is not a java file.' % s)
for t in self.link_targets:
if not isinstance(t, Jar):
raise InvalidArguments('Link target %s is not a jar target.' % t)
self.filename = self.name + '.jar'
self.outputs = [self.filename]
self.java_args = kwargs.get('java_args', [])
def get_main_class(self):
return self.main_class
def type_suffix(self):
return "@jar"
def get_java_args(self):
return self.java_args
def validate_cross_install(self, environment):
# All jar targets are installable.
pass
def is_linkable_target(self):
return True
def get_classpath_args(self):
cp_paths = [os.path.join(l.get_subdir(), l.get_filename()) for l in self.link_targets]
cp_string = os.pathsep.join(cp_paths)
if cp_string:
return ['-cp', os.pathsep.join(cp_paths)]
return []
class CustomTargetIndex:
"""A special opaque object returned by indexing a CustomTarget. This object
exists in meson, but acts as a proxy in the backends, making targets depend
on the CustomTarget it's derived from, but only adding one source file to
the sources.
"""
def __init__(self, target, output):
self.typename = 'custom'
self.target = target
self.output = output
def __repr__(self):
return '<CustomTargetIndex: {!r}[{}]>'.format(
self.target, self.target.get_outputs().index(self.output))
def get_outputs(self):
return [self.output]
def get_subdir(self):
return self.target.get_subdir()
class ConfigureFile:
def __init__(self, subdir, sourcename, targetname, configuration_data):
self.subdir = subdir
self.sourcename = sourcename
self.targetname = targetname
self.configuration_data = configuration_data
def __repr__(self):
repr_str = "<{0}: {1} -> {2}>"
src = os.path.join(self.subdir, self.sourcename)
dst = os.path.join(self.subdir, self.targetname)
return repr_str.format(self.__class__.__name__, src, dst)
def get_configuration_data(self):
return self.configuration_data
def get_subdir(self):
return self.subdir
def get_source_name(self):
return self.sourcename
def get_target_name(self):
return self.targetname
class ConfigurationData:
def __init__(self):
super().__init__()
self.values = {}
def __repr__(self):
return repr(self.values)
def __contains__(self, value):
return value in self.values
def get(self, name):
return self.values[name] # (val, desc)
def keys(self):
return self.values.keys()
# A bit poorly named, but this represents plain data files to copy
# during install.
class Data:
def __init__(self, sources, install_dir, install_mode=None, rename=None):
self.sources = sources
self.install_dir = install_dir
self.install_mode = install_mode
self.sources = listify(self.sources)
for s in self.sources:
assert(isinstance(s, File))
if rename is None:
self.rename = [os.path.basename(f.fname) for f in self.sources]
else:
self.rename = stringlistify(rename)
if len(self.rename) != len(self.sources):
raise MesonException('Size of rename argument is different from number of sources')
class RunScript(dict):
def __init__(self, script, args):
super().__init__()
assert(isinstance(script, list))
assert(isinstance(args, list))
self['exe'] = script
self['args'] = args
class TestSetup:
def __init__(self, *, exe_wrapper=None, gdb=None, timeout_multiplier=None, env=None):
self.exe_wrapper = exe_wrapper
self.gdb = gdb
self.timeout_multiplier = timeout_multiplier
self.env = env
def get_sources_string_names(sources):
'''
For the specified list of @sources which can be strings, Files, or targets,
get all the output basenames.
'''
names = []
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, str):
names.append(s)
elif isinstance(s, (BuildTarget, CustomTarget, CustomTargetIndex, GeneratedList)):
names += s.get_outputs()
elif isinstance(s, File):
names.append(s.fname)
else:
raise AssertionError('Unknown source type: {!r}'.format(s))
return names
def load(build_dir):
filename = os.path.join(build_dir, 'meson-private', 'build.dat')
load_fail_msg = 'Build data file {!r} is corrupted. Try with a fresh build tree.'.format(filename)
nonexisting_fail_msg = 'No such build data file as "{!r}".'.format(filename)
try:
with open(filename, 'rb') as f:
obj = pickle.load(f)
except FileNotFoundError:
raise MesonException(nonexisting_fail_msg)
except pickle.UnpicklingError:
raise MesonException(load_fail_msg)
if not isinstance(obj, Build):
raise MesonException(load_fail_msg)
return obj
def save(obj, filename):
with open(filename, 'wb') as f:
pickle.dump(obj, f)
| 42.758204 | 183 | 0.610474 | [
"Apache-2.0"
] | jmesmon/meson | mesonbuild/build.py | 99,028 | Python |
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import array
import asyncio
import collections.abc
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
ForwardRef,
Generic,
Iterable,
Iterator,
List,
Literal,
Mapping,
Optional,
Protocol,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
TYPE_CHECKING,
)
import unicodedata
from base64 import b64encode
from bisect import bisect_left
import datetime
import functools
from inspect import isawaitable as _isawaitable, signature as _signature
from operator import attrgetter
import json
import re
import sys
import types
import warnings
from .errors import InvalidArgument
try:
import orjson
except ModuleNotFoundError:
HAS_ORJSON = False
else:
HAS_ORJSON = True
__all__ = (
"oauth_url",
"snowflake_time",
"time_snowflake",
"find",
"get",
"sleep_until",
"utcnow",
"remove_markdown",
"escape_markdown",
"escape_mentions",
"as_chunks",
"format_dt",
)
DISCORD_EPOCH = 1420070400000
class _MissingSentinel:
def __eq__(self, other):
return False
def __bool__(self):
return False
def __repr__(self):
return "..."
MISSING: Any = _MissingSentinel()
class _cached_property:
def __init__(self, function):
self.function = function
self.__doc__ = getattr(function, "__doc__")
def __get__(self, instance, owner):
if instance is None:
return self
value = self.function(instance)
setattr(instance, self.function.__name__, value)
return value
if TYPE_CHECKING:
from functools import cached_property as cached_property
from typing_extensions import ParamSpec
from .permissions import Permissions
from .abc import Snowflake
from .invite import Invite
from .template import Template
class _RequestLike(Protocol):
headers: Mapping[str, Any]
P = ParamSpec("P")
else:
cached_property = _cached_property
T = TypeVar("T")
T_co = TypeVar("T_co", covariant=True)
_Iter = Union[Iterator[T], AsyncIterator[T]]
class CachedSlotProperty(Generic[T, T_co]):
def __init__(self, name: str, function: Callable[[T], T_co]) -> None:
self.name = name
self.function = function
self.__doc__ = getattr(function, "__doc__")
@overload
def __get__(self, instance: None, owner: Type[T]) -> CachedSlotProperty[T, T_co]:
...
@overload
def __get__(self, instance: T, owner: Type[T]) -> T_co:
...
def __get__(self, instance: Optional[T], owner: Type[T]) -> Any:
if instance is None:
return self
try:
return getattr(instance, self.name)
except AttributeError:
value = self.function(instance)
setattr(instance, self.name, value)
return value
class classproperty(Generic[T_co]):
def __init__(self, fget: Callable[[Any], T_co]) -> None:
self.fget = fget
def __get__(self, instance: Optional[Any], owner: Type[Any]) -> T_co:
return self.fget(owner)
def __set__(self, instance, value) -> None:
raise AttributeError("cannot set attribute")
def cached_slot_property(name: str) -> Callable[[Callable[[T], T_co]], CachedSlotProperty[T, T_co]]:
def decorator(func: Callable[[T], T_co]) -> CachedSlotProperty[T, T_co]:
return CachedSlotProperty(name, func)
return decorator
class SequenceProxy(Generic[T_co], collections.abc.Sequence):
"""Read-only proxy of a Sequence."""
def __init__(self, proxied: Sequence[T_co]):
self.__proxied = proxied
def __getitem__(self, idx: int) -> T_co:
return self.__proxied[idx]
def __len__(self) -> int:
return len(self.__proxied)
def __contains__(self, item: Any) -> bool:
return item in self.__proxied
def __iter__(self) -> Iterator[T_co]:
return iter(self.__proxied)
def __reversed__(self) -> Iterator[T_co]:
return reversed(self.__proxied)
def index(self, value: Any, *args, **kwargs) -> int:
return self.__proxied.index(value, *args, **kwargs)
def count(self, value: Any) -> int:
return self.__proxied.count(value)
@overload
def parse_time(timestamp: None) -> None:
...
@overload
def parse_time(timestamp: str) -> datetime.datetime:
...
@overload
def parse_time(timestamp: Optional[str]) -> Optional[datetime.datetime]:
...
def parse_time(timestamp: Optional[str]) -> Optional[datetime.datetime]:
if timestamp:
return datetime.datetime.fromisoformat(timestamp)
return None
def copy_doc(original: Callable) -> Callable[[T], T]:
def decorator(overriden: T) -> T:
overriden.__doc__ = original.__doc__
overriden.__signature__ = _signature(original) # type: ignore
return overriden
return decorator
def deprecated(instead: Optional[str] = None) -> Callable[[Callable[P, T]], Callable[P, T]]:
def actual_decorator(func: Callable[P, T]) -> Callable[P, T]:
@functools.wraps(func)
def decorated(*args: P.args, **kwargs: P.kwargs) -> T:
warnings.simplefilter("always", DeprecationWarning) # turn off filter
if instead:
fmt = "{0.__name__} is deprecated, use {1} instead."
else:
fmt = "{0.__name__} is deprecated."
warnings.warn(fmt.format(func, instead), stacklevel=3, category=DeprecationWarning)
warnings.simplefilter("default", DeprecationWarning) # reset filter
return func(*args, **kwargs)
return decorated
return actual_decorator
def oauth_url(
client_id: Union[int, str],
*,
permissions: Permissions = MISSING,
guild: Snowflake = MISSING,
redirect_uri: str = MISSING,
scopes: Iterable[str] = MISSING,
disable_guild_select: bool = False,
) -> str:
"""A helper function that returns the OAuth2 URL for inviting the bot
into guilds.
Parameters
-----------
client_id: Union[:class:`int`, :class:`str`]
The client ID for your bot.
permissions: :class:`~discord.Permissions`
The permissions you're requesting. If not given then you won't be requesting any
permissions.
guild: :class:`~discord.abc.Snowflake`
The guild to pre-select in the authorization screen, if available.
redirect_uri: :class:`str`
An optional valid redirect URI.
scopes: Iterable[:class:`str`]
An optional valid list of scopes. Defaults to ``('bot',)``.
.. versionadded:: 1.7
disable_guild_select: :class:`bool`
Whether to disallow the user from changing the guild dropdown.
.. versionadded:: 2.0
Returns
--------
:class:`str`
The OAuth2 URL for inviting the bot into guilds.
"""
url = f"https://discord.com/oauth2/authorize?client_id={client_id}"
url += "&scope=" + "+".join(scopes or ("bot",))
if permissions is not MISSING:
url += f"&permissions={permissions.value}"
if guild is not MISSING:
url += f"&guild_id={guild.id}"
if redirect_uri is not MISSING:
from urllib.parse import urlencode
url += "&response_type=code&" + urlencode({"redirect_uri": redirect_uri})
if disable_guild_select:
url += "&disable_guild_select=true"
return url
def snowflake_time(id: int) -> datetime.datetime:
"""
Parameters
-----------
id: :class:`int`
The snowflake ID.
Returns
--------
:class:`datetime.datetime`
An aware datetime in UTC representing the creation time of the snowflake.
"""
timestamp = ((id >> 22) + DISCORD_EPOCH) / 1000
return datetime.datetime.fromtimestamp(timestamp, tz=datetime.timezone.utc)
def time_snowflake(dt: datetime.datetime, high: bool = False) -> int:
"""Returns a numeric snowflake pretending to be created at the given date.
When using as the lower end of a range, use ``time_snowflake(high=False) - 1``
to be inclusive, ``high=True`` to be exclusive.
When using as the higher end of a range, use ``time_snowflake(high=True) + 1``
to be inclusive, ``high=False`` to be exclusive
Parameters
-----------
dt: :class:`datetime.datetime`
A datetime object to convert to a snowflake.
If naive, the timezone is assumed to be local time.
high: :class:`bool`
Whether or not to set the lower 22 bit to high or low.
Returns
--------
:class:`int`
The snowflake representing the time given.
"""
discord_millis = int(dt.timestamp() * 1000 - DISCORD_EPOCH)
return (discord_millis << 22) + (2 ** 22 - 1 if high else 0)
def find(predicate: Callable[[T], Any], seq: Iterable[T]) -> Optional[T]:
"""A helper to return the first element found in the sequence
that meets the predicate. For example: ::
member = discord.utils.find(lambda m: m.name == 'Mighty', channel.guild.members)
would find the first :class:`~discord.Member` whose name is 'Mighty' and return it.
If an entry is not found, then ``None`` is returned.
This is different from :func:`py:filter` due to the fact it stops the moment it finds
a valid entry.
Parameters
-----------
predicate
A function that returns a boolean-like result.
seq: :class:`collections.abc.Iterable`
The iterable to search through.
"""
for element in seq:
if predicate(element):
return element
return None
def get(iterable: Iterable[T], **attrs: Any) -> Optional[T]:
r"""A helper that returns the first element in the iterable that meets
all the traits passed in ``attrs``. This is an alternative for
:func:`~discord.utils.find`.
When multiple attributes are specified, they are checked using
logical AND, not logical OR. Meaning they have to meet every
attribute passed in and not one of them.
To have a nested attribute search (i.e. search by ``x.y``) then
pass in ``x__y`` as the keyword argument.
If nothing is found that matches the attributes passed, then
``None`` is returned.
Examples
---------
Basic usage:
.. code-block:: python3
member = discord.utils.get(message.guild.members, name='Foo')
Multiple attribute matching:
.. code-block:: python3
channel = discord.utils.get(guild.voice_channels, name='Foo', bitrate=64000)
Nested attribute matching:
.. code-block:: python3
channel = discord.utils.get(client.get_all_channels(), guild__name='Cool', name='general')
Parameters
-----------
iterable
An iterable to search through.
\*\*attrs
Keyword arguments that denote attributes to search with.
"""
# global -> local
_all = all
attrget = attrgetter
# Special case the single element call
if len(attrs) == 1:
k, v = attrs.popitem()
pred = attrget(k.replace("__", "."))
for elem in iterable:
if pred(elem) == v:
return elem
return None
converted = [(attrget(attr.replace("__", ".")), value) for attr, value in attrs.items()]
for elem in iterable:
if _all(pred(elem) == value for pred, value in converted):
return elem
return None
def _unique(iterable: Iterable[T]) -> List[T]:
return [x for x in dict.fromkeys(iterable)]
def _get_as_snowflake(data: Any, key: str) -> Optional[int]:
try:
value = data[key]
except KeyError:
return None
else:
return value and int(value)
def _get_mime_type_for_image(data: bytes):
if data.startswith(b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A"):
return "image/png"
elif data[0:3] == b"\xff\xd8\xff" or data[6:10] in (b"JFIF", b"Exif"):
return "image/jpeg"
elif data.startswith((b"\x47\x49\x46\x38\x37\x61", b"\x47\x49\x46\x38\x39\x61")):
return "image/gif"
elif data.startswith(b"RIFF") and data[8:12] == b"WEBP":
return "image/webp"
else:
raise InvalidArgument("Unsupported image type given")
def _bytes_to_base64_data(data: bytes) -> str:
fmt = "data:{mime};base64,{data}"
mime = _get_mime_type_for_image(data)
b64 = b64encode(data).decode("ascii")
return fmt.format(mime=mime, data=b64)
if HAS_ORJSON:
def _to_json(obj: Any) -> str: # type: ignore
return orjson.dumps(obj).decode("utf-8")
_from_json = orjson.loads # type: ignore
else:
def _to_json(obj: Any) -> str:
return json.dumps(obj, separators=(",", ":"), ensure_ascii=True)
_from_json = json.loads
def _parse_ratelimit_header(request: Any, *, use_clock: bool = False) -> float:
reset_after: Optional[str] = request.headers.get("X-Ratelimit-Reset-After")
if use_clock or not reset_after:
utc = datetime.timezone.utc
now = datetime.datetime.now(utc)
reset = datetime.datetime.fromtimestamp(float(request.headers["X-Ratelimit-Reset"]), utc)
return (reset - now).total_seconds()
else:
return float(reset_after)
async def maybe_coroutine(f, *args, **kwargs):
value = f(*args, **kwargs)
if _isawaitable(value):
return await value
else:
return value
async def async_all(gen, *, check=_isawaitable):
for elem in gen:
if check(elem):
elem = await elem
if not elem:
return False
return True
async def sane_wait_for(futures, *, timeout):
ensured = [asyncio.ensure_future(fut) for fut in futures]
done, pending = await asyncio.wait(ensured, timeout=timeout, return_when=asyncio.ALL_COMPLETED)
if len(pending) != 0:
raise asyncio.TimeoutError()
return done
def get_slots(cls: Type[Any]) -> Iterator[str]:
for mro in reversed(cls.__mro__):
try:
yield from mro.__slots__
except AttributeError:
continue
def compute_timedelta(dt: datetime.datetime):
if dt.tzinfo is None:
dt = dt.astimezone()
now = datetime.datetime.now(datetime.timezone.utc)
return max((dt - now).total_seconds(), 0)
async def sleep_until(when: datetime.datetime, result: Optional[T] = None) -> Optional[T]:
"""|coro|
Sleep until a specified time.
If the time supplied is in the past this function will yield instantly.
.. versionadded:: 1.3
Parameters
-----------
when: :class:`datetime.datetime`
The timestamp in which to sleep until. If the datetime is naive then
it is assumed to be local time.
result: Any
If provided is returned to the caller when the coroutine completes.
"""
delta = compute_timedelta(when)
return await asyncio.sleep(delta, result)
def utcnow() -> datetime.datetime:
"""A helper function to return an aware UTC datetime representing the current time.
This should be preferred to :meth:`datetime.datetime.utcnow` since it is an aware
datetime, compared to the naive datetime in the standard library.
.. versionadded:: 2.0
Returns
--------
:class:`datetime.datetime`
The current aware datetime in UTC.
"""
return datetime.datetime.now(datetime.timezone.utc)
def valid_icon_size(size: int) -> bool:
"""Icons must be power of 2 within [16, 4096]."""
return not size & (size - 1) and 4096 >= size >= 16
class SnowflakeList(array.array):
"""Internal data storage class to efficiently store a list of snowflakes.
This should have the following characteristics:
- Low memory usage
- O(n) iteration (obviously)
- O(n log n) initial creation if data is unsorted
- O(log n) search and indexing
- O(n) insertion
"""
__slots__ = ()
if TYPE_CHECKING:
def __init__(self, data: Iterable[int], *, is_sorted: bool = False):
...
def __new__(cls, data: Iterable[int], *, is_sorted: bool = False):
return array.array.__new__(cls, "Q", data if is_sorted else sorted(data)) # type: ignore
def add(self, element: int) -> None:
i = bisect_left(self, element)
self.insert(i, element)
def get(self, element: int) -> Optional[int]:
i = bisect_left(self, element)
return self[i] if i != len(self) and self[i] == element else None
def has(self, element: int) -> bool:
i = bisect_left(self, element)
return i != len(self) and self[i] == element
_IS_ASCII = re.compile(r"^[\x00-\x7f]+$")
def _string_width(string: str, *, _IS_ASCII=_IS_ASCII) -> int:
"""Returns string's width."""
match = _IS_ASCII.match(string)
if match:
return match.endpos
UNICODE_WIDE_CHAR_TYPE = "WFA"
func = unicodedata.east_asian_width
return sum(2 if func(char) in UNICODE_WIDE_CHAR_TYPE else 1 for char in string)
def resolve_invite(invite: Union[Invite, str]) -> str:
"""
Resolves an invite from a :class:`~discord.Invite`, URL or code.
Parameters
-----------
invite: Union[:class:`~discord.Invite`, :class:`str`]
The invite.
Returns
--------
:class:`str`
The invite code.
"""
from .invite import Invite # circular import
if isinstance(invite, Invite):
return invite.code
else:
rx = r"(?:https?\:\/\/)?discord(?:\.gg|(?:app)?\.com\/invite)\/(.+)"
m = re.match(rx, invite)
if m:
return m.group(1)
return invite
def resolve_template(code: Union[Template, str]) -> str:
"""
Resolves a template code from a :class:`~discord.Template`, URL or code.
.. versionadded:: 1.4
Parameters
-----------
code: Union[:class:`~discord.Template`, :class:`str`]
The code.
Returns
--------
:class:`str`
The template code.
"""
from .template import Template # circular import
if isinstance(code, Template):
return code.code
else:
rx = r"(?:https?\:\/\/)?discord(?:\.new|(?:app)?\.com\/template)\/(.+)"
m = re.match(rx, code)
if m:
return m.group(1)
return code
_MARKDOWN_ESCAPE_SUBREGEX = "|".join(r"\{0}(?=([\s\S]*((?<!\{0})\{0})))".format(c) for c in ("*", "`", "_", "~", "|"))
_MARKDOWN_ESCAPE_COMMON = r"^>(?:>>)?\s|\[.+\]\(.+\)"
_MARKDOWN_ESCAPE_REGEX = re.compile(
fr"(?P<markdown>{_MARKDOWN_ESCAPE_SUBREGEX}|{_MARKDOWN_ESCAPE_COMMON})", re.MULTILINE
)
_URL_REGEX = r"(?P<url><[^: >]+:\/[^ >]+>|(?:https?|steam):\/\/[^\s<]+[^<.,:;\"\'\]\s])"
_MARKDOWN_STOCK_REGEX = fr"(?P<markdown>[_\\~|\*`]|{_MARKDOWN_ESCAPE_COMMON})"
def remove_markdown(text: str, *, ignore_links: bool = True) -> str:
"""A helper function that removes markdown characters.
.. versionadded:: 1.7
.. note::
This function is not markdown aware and may remove meaning from the original text. For example,
if the input contains ``10 * 5`` then it will be converted into ``10 5``.
Parameters
-----------
text: :class:`str`
The text to remove markdown from.
ignore_links: :class:`bool`
Whether to leave links alone when removing markdown. For example,
if a URL in the text contains characters such as ``_`` then it will
be left alone. Defaults to ``True``.
Returns
--------
:class:`str`
The text with the markdown special characters removed.
"""
def replacement(match):
groupdict = match.groupdict()
return groupdict.get("url", "")
regex = _MARKDOWN_STOCK_REGEX
if ignore_links:
regex = f"(?:{_URL_REGEX}|{regex})"
return re.sub(regex, replacement, text, 0, re.MULTILINE)
def escape_markdown(text: str, *, as_needed: bool = False, ignore_links: bool = True) -> str:
r"""A helper function that escapes Discord's markdown.
Parameters
-----------
text: :class:`str`
The text to escape markdown from.
as_needed: :class:`bool`
Whether to escape the markdown characters as needed. This
means that it does not escape extraneous characters if it's
not necessary, e.g. ``**hello**`` is escaped into ``\*\*hello**``
instead of ``\*\*hello\*\*``. Note however that this can open
you up to some clever syntax abuse. Defaults to ``False``.
ignore_links: :class:`bool`
Whether to leave links alone when escaping markdown. For example,
if a URL in the text contains characters such as ``_`` then it will
be left alone. This option is not supported with ``as_needed``.
Defaults to ``True``.
Returns
--------
:class:`str`
The text with the markdown special characters escaped with a slash.
"""
if not as_needed:
def replacement(match):
groupdict = match.groupdict()
is_url = groupdict.get("url")
if is_url:
return is_url
return "\\" + groupdict["markdown"]
regex = _MARKDOWN_STOCK_REGEX
if ignore_links:
regex = f"(?:{_URL_REGEX}|{regex})"
return re.sub(regex, replacement, text, 0, re.MULTILINE)
else:
text = re.sub(r"\\", r"\\\\", text)
return _MARKDOWN_ESCAPE_REGEX.sub(r"\\\1", text)
def escape_mentions(text: str) -> str:
"""A helper function that escapes everyone, here, role, and user mentions.
.. note::
This does not include channel mentions.
.. note::
For more granular control over what mentions should be escaped
within messages, refer to the :class:`~discord.AllowedMentions`
class.
Parameters
-----------
text: :class:`str`
The text to escape mentions from.
Returns
--------
:class:`str`
The text with the mentions removed.
"""
return re.sub(r"@(everyone|here|[!&]?[0-9]{17,20})", "@\u200b\\1", text)
def _chunk(iterator: Iterator[T], max_size: int) -> Iterator[List[T]]:
ret = []
n = 0
for item in iterator:
ret.append(item)
n += 1
if n == max_size:
yield ret
ret = []
n = 0
if ret:
yield ret
async def _achunk(iterator: AsyncIterator[T], max_size: int) -> AsyncIterator[List[T]]:
ret = []
n = 0
async for item in iterator:
ret.append(item)
n += 1
if n == max_size:
yield ret
ret = []
n = 0
if ret:
yield ret
@overload
def as_chunks(iterator: Iterator[T], max_size: int) -> Iterator[List[T]]:
...
@overload
def as_chunks(iterator: AsyncIterator[T], max_size: int) -> AsyncIterator[List[T]]:
...
def as_chunks(iterator: _Iter[T], max_size: int) -> _Iter[List[T]]:
"""A helper function that collects an iterator into chunks of a given size.
.. versionadded:: 2.0
Parameters
----------
iterator: Union[:class:`collections.abc.Iterator`, :class:`collections.abc.AsyncIterator`]
The iterator to chunk, can be sync or async.
max_size: :class:`int`
The maximum chunk size.
.. warning::
The last chunk collected may not be as large as ``max_size``.
Returns
--------
Union[:class:`Iterator`, :class:`AsyncIterator`]
A new iterator which yields chunks of a given size.
"""
if max_size <= 0:
raise ValueError("Chunk sizes must be greater than 0.")
if isinstance(iterator, AsyncIterator):
return _achunk(iterator, max_size)
return _chunk(iterator, max_size)
PY_310 = sys.version_info >= (3, 10)
def flatten_literal_params(parameters: Iterable[Any]) -> Tuple[Any, ...]:
params = []
literal_cls = type(Literal[0])
for p in parameters:
if isinstance(p, literal_cls):
params.extend(p.__args__)
else:
params.append(p)
return tuple(params)
def normalise_optional_params(parameters: Iterable[Any]) -> Tuple[Any, ...]:
none_cls = type(None)
return tuple(p for p in parameters if p is not none_cls) + (none_cls,)
def evaluate_annotation(
tp: Any,
globals: Dict[str, Any],
locals: Dict[str, Any],
cache: Dict[str, Any],
*,
implicit_str: bool = True,
):
if isinstance(tp, ForwardRef):
tp = tp.__forward_arg__
# ForwardRefs always evaluate their internals
implicit_str = True
if implicit_str and isinstance(tp, str):
if tp in cache:
return cache[tp]
evaluated = eval(tp, globals, locals)
cache[tp] = evaluated
return evaluate_annotation(evaluated, globals, locals, cache)
if hasattr(tp, "__args__"):
implicit_str = True
is_literal = False
args = tp.__args__
if not hasattr(tp, "__origin__"):
if PY_310 and tp.__class__ is types.UnionType: # type: ignore
converted = Union[args] # type: ignore
return evaluate_annotation(converted, globals, locals, cache)
return tp
if tp.__origin__ is Union:
try:
if args.index(type(None)) != len(args) - 1:
args = normalise_optional_params(tp.__args__)
except ValueError:
pass
if tp.__origin__ is Literal:
if not PY_310:
args = flatten_literal_params(tp.__args__)
implicit_str = False
is_literal = True
evaluated_args = tuple(
evaluate_annotation(arg, globals, locals, cache, implicit_str=implicit_str) for arg in args
)
if is_literal and not all(isinstance(x, (str, int, bool, type(None))) for x in evaluated_args):
raise TypeError("Literal arguments must be of type str, int, bool, or NoneType.")
if evaluated_args == args:
return tp
try:
return tp.copy_with(evaluated_args)
except AttributeError:
return tp.__origin__[evaluated_args]
return tp
def resolve_annotation(
annotation: Any,
globalns: Dict[str, Any],
localns: Optional[Dict[str, Any]],
cache: Optional[Dict[str, Any]],
) -> Any:
if annotation is None:
return type(None)
if isinstance(annotation, str):
annotation = ForwardRef(annotation)
locals = globalns if localns is None else localns
if cache is None:
cache = {}
return evaluate_annotation(annotation, globalns, locals, cache)
TimestampStyle = Literal["f", "F", "d", "D", "t", "T", "R"]
def format_dt(dt: datetime.datetime, /, style: Optional[TimestampStyle] = None) -> str:
"""A helper function to format a :class:`datetime.datetime` for presentation within Discord.
This allows for a locale-independent way of presenting data using Discord specific Markdown.
+-------------+----------------------------+-----------------+
| Style | Example Output | Description |
+=============+============================+=================+
| t | 22:57 | Short Time |
+-------------+----------------------------+-----------------+
| T | 22:57:58 | Long Time |
+-------------+----------------------------+-----------------+
| d | 17/05/2016 | Short Date |
+-------------+----------------------------+-----------------+
| D | 17 May 2016 | Long Date |
+-------------+----------------------------+-----------------+
| f (default) | 17 May 2016 22:57 | Short Date Time |
+-------------+----------------------------+-----------------+
| F | Tuesday, 17 May 2016 22:57 | Long Date Time |
+-------------+----------------------------+-----------------+
| R | 5 years ago | Relative Time |
+-------------+----------------------------+-----------------+
Note that the exact output depends on the user's locale setting in the client. The example output
presented is using the ``en-GB`` locale.
.. versionadded:: 2.0
Parameters
-----------
dt: :class:`datetime.datetime`
The datetime to format.
style: :class:`str`
The style to format the datetime with.
Returns
--------
:class:`str`
The formatted string.
"""
if style is None:
return f"<t:{int(dt.timestamp())}>"
return f"<t:{int(dt.timestamp())}:{style}>"
| 28.867058 | 118 | 0.61085 | [
"MIT"
] | Astrea49/enhanced-discord.py | discord/utils.py | 29,531 | Python |
# Implements I/O over asynchronous sockets
from time import time
from sys import exc_info
from traceback import format_exception
from asyncore import socket_map
from asyncore import loop
from pysnmp.carrier.base import AbstractTransportDispatcher
from pysnmp.error import PySnmpError
class AsyncoreDispatcher(AbstractTransportDispatcher):
def __init__(self):
self.__sockMap = {} # use own map for MT safety
self.timeout = 0.5
AbstractTransportDispatcher.__init__(self)
def getSocketMap(self): return self.__sockMap
def setSocketMap(self, sockMap=socket_map): self.__sockMap = sockMap
def registerTransport(self, tDomain, t):
AbstractTransportDispatcher.registerTransport(self, tDomain, t)
t.registerSocket(self.__sockMap)
def unregisterTransport(self, tDomain):
self.getTransport(tDomain).unregisterSocket(self.__sockMap)
AbstractTransportDispatcher.unregisterTransport(self, tDomain)
def transportsAreWorking(self):
for transport in self.__sockMap.values():
if transport.writable():
return 1
return 0
def runDispatcher(self, timeout=0.0):
while self.jobsArePending() or self.transportsAreWorking():
try:
loop(timeout and timeout or self.timeout,
use_poll=True, map=self.__sockMap, count=1)
except KeyboardInterrupt:
raise
except:
raise PySnmpError('poll error: %s' % ';'.join(format_exception(*exc_info())))
self.handleTimerTick(time())
| 37.325581 | 93 | 0.685358 | [
"Apache-2.0"
] | ArthurKamalov/scalyr-agent-2 | scalyr_agent/third_party/pysnmp/carrier/asyncore/dispatch.py | 1,605 | Python |
import tensorflow as tf
import os
import sklearn.metrics
import numpy as np
import sys
import math
import time
from . import framework
import network
class policy_agent(framework.re_model):
def __init__(self, train_data_loader, batch_size, max_length=120):
framework.re_model.__init__(self, train_data_loader, batch_size, max_length)
self.weights = tf.placeholder(tf.float32, shape=(), name="weights_scalar")
x = network.embedding.word_position_embedding(self.word, self.word_vec_mat, self.pos1, self.pos2)
x_train = network.encoder.cnn(x, keep_prob=0.5)
x_test = network.encoder.cnn(x, keep_prob=1.0)
self._train_logit = network.selector.instance(x_train, 2, keep_prob=0.5)
self._test_logit = network.selector.instance(x_test, 2, keep_prob=1.0)
self._loss = network.classifier.softmax_cross_entropy(self._train_logit, self.ins_label, 2, weights=self.weights)
def loss(self):
return self._loss
def train_logit(self):
return self._train_logit
def test_logit(self):
return self._test_logit
class rl_re_framework(framework.re_framework):
def __init__(self, train_data_loader, test_data_loader, max_length=120, batch_size=160):
framework.re_framework.__init__(self, train_data_loader, test_data_loader, max_length, batch_size)
def agent_one_step(self, sess, agent_model, batch_data, run_array, weights=1):
feed_dict = {
agent_model.word: batch_data['word'],
agent_model.pos1: batch_data['pos1'],
agent_model.pos2: batch_data['pos2'],
agent_model.ins_label: batch_data['agent_label'],
agent_model.length: batch_data['length'],
agent_model.weights: weights
}
if 'mask' in batch_data and hasattr(agent_model, "mask"):
feed_dict.update({agent_model.mask: batch_data['mask']})
result = sess.run(run_array, feed_dict)
return result
def pretrain_main_model(self, max_epoch):
for epoch in range(max_epoch):
print('###### Epoch ' + str(epoch) + ' ######')
tot_correct = 0
tot_not_na_correct = 0
tot = 0
tot_not_na = 0
i = 0
time_sum = 0
for i, batch_data in enumerate(self.train_data_loader):
time_start = time.time()
iter_loss, iter_logit, _train_op = self.one_step(self.sess, self.model, batch_data, [self.model.loss(), self.model.train_logit(), self.train_op])
time_end = time.time()
t = time_end - time_start
time_sum += t
iter_output = iter_logit.argmax(-1)
iter_label = batch_data['rel']
iter_correct = (iter_output == iter_label).sum()
iter_not_na_correct = np.logical_and(iter_output == iter_label, iter_label != 0).sum()
tot_correct += iter_correct
tot_not_na_correct += iter_not_na_correct
tot += iter_label.shape[0]
tot_not_na += (iter_label != 0).sum()
if tot_not_na > 0:
sys.stdout.write("[pretrain main model] epoch %d step %d time %.2f | loss: %f, not NA accuracy: %f, accuracy: %f\r" % (epoch, i, t, iter_loss, float(tot_not_na_correct) / tot_not_na, float(tot_correct) / tot))
sys.stdout.flush()
i += 1
print("\nAverage iteration time: %f" % (time_sum / i))
def pretrain_agent_model(self, max_epoch):
# Pre-train policy agent
for epoch in range(max_epoch):
print('###### [Pre-train Policy Agent] Epoch ' + str(epoch) + ' ######')
tot_correct = 0
tot_not_na_correct = 0
tot = 0
tot_not_na = 0
time_sum = 0
for i, batch_data in enumerate(self.train_data_loader):
time_start = time.time()
batch_data['agent_label'] = batch_data['ins_rel'] + 0
batch_data['agent_label'][batch_data['agent_label'] > 0] = 1
iter_loss, iter_logit, _train_op = self.agent_one_step(self.sess, self.agent_model, batch_data, [self.agent_model.loss(), self.agent_model.train_logit(), self.agent_train_op])
time_end = time.time()
t = time_end - time_start
time_sum += t
iter_output = iter_logit.argmax(-1)
iter_label = batch_data['ins_rel']
iter_correct = (iter_output == iter_label).sum()
iter_not_na_correct = np.logical_and(iter_output == iter_label, iter_label != 0).sum()
tot_correct += iter_correct
tot_not_na_correct += iter_not_na_correct
tot += iter_label.shape[0]
tot_not_na += (iter_label != 0).sum()
if tot_not_na > 0:
sys.stdout.write("[pretrain policy agent] epoch %d step %d time %.2f | loss: %f, not NA accuracy: %f, accuracy: %f\r" % (epoch, i, t, iter_loss, float(tot_not_na_correct) / tot_not_na, float(tot_correct) / tot))
sys.stdout.flush()
i += 1
def train(self,
model, # The main model
agent_model, # The model of policy agent
model_name,
ckpt_dir='./checkpoint',
summary_dir='./summary',
test_result_dir='./test_result',
learning_rate=0.5,
max_epoch=60,
pretrain_agent_epoch=1,
pretrain_model=None,
test_epoch=1,
optimizer=tf.train.GradientDescentOptimizer):
print("Start training...")
# Init
self.model = model(self.train_data_loader, self.train_data_loader.batch_size, self.train_data_loader.max_length)
model_optimizer = optimizer(learning_rate)
grads = model_optimizer.compute_gradients(self.model.loss())
self.train_op = model_optimizer.apply_gradients(grads)
# Init policy agent
self.agent_model = agent_model(self.train_data_loader, self.train_data_loader.batch_size, self.train_data_loader.max_length)
agent_optimizer = optimizer(learning_rate)
agent_grads = agent_optimizer.compute_gradients(self.agent_model.loss())
self.agent_train_op = agent_optimizer.apply_gradients(agent_grads)
# Session, writer and saver
self.sess = tf.Session()
summary_writer = tf.summary.FileWriter(summary_dir, self.sess.graph)
saver = tf.train.Saver(max_to_keep=None)
if pretrain_model is None:
self.sess.run(tf.global_variables_initializer())
else:
saver.restore(self.sess, pretrain_model)
self.pretrain_main_model(max_epoch=5) # Pre-train main model
self.pretrain_agent_model(max_epoch=1) # Pre-train policy agent
# Train
tot_delete = 0
batch_count = 0
instance_count = 0
reward = 0.0
best_metric = 0
best_prec = None
best_recall = None
not_best_count = 0 # Stop training after several epochs without improvement.
for epoch in range(max_epoch):
print('###### Epoch ' + str(epoch) + ' ######')
tot_correct = 0
tot_not_na_correct = 0
tot = 0
tot_not_na = 0
i = 0
time_sum = 0
batch_stack = []
# Update policy agent
for i, batch_data in enumerate(self.train_data_loader):
# Make action
batch_data['agent_label'] = batch_data['ins_rel'] + 0
batch_data['agent_label'][batch_data['agent_label'] > 0] = 1
batch_stack.append(batch_data)
iter_logit = self.agent_one_step(self.sess, self.agent_model, batch_data, [self.agent_model.train_logit()])[0]
action_result = iter_logit.argmax(-1)
# Calculate reward
batch_delete = np.sum(np.logical_and(batch_data['ins_rel'] != 0, action_result == 0))
batch_data['ins_rel'][action_result == 0] = 0
iter_loss = self.one_step(self.sess, self.model, batch_data, [self.model.loss()])[0]
reward += iter_loss
tot_delete += batch_delete
batch_count += 1
# Update parameters of policy agent
alpha = 0.1
if batch_count == 100:
reward = reward / float(batch_count)
average_loss = reward
reward = - math.log(1 - math.e ** (-reward))
sys.stdout.write('tot delete : %f | reward : %f | average loss : %f\r' % (tot_delete, reward, average_loss))
sys.stdout.flush()
for batch_data in batch_stack:
self.agent_one_step(self.sess, self.agent_model, batch_data, [self.agent_train_op], weights=reward * alpha)
batch_count = 0
reward = 0
tot_delete = 0
batch_stack = []
i += 1
# Train the main model
for i, batch_data in enumerate(self.train_data_loader):
batch_data['agent_label'] = batch_data['ins_rel'] + 0
batch_data['agent_label'][batch_data['agent_label'] > 0] = 1
time_start = time.time()
# Make actions
iter_logit = self.agent_one_step(self.sess, self.agent_model, batch_data, [self.agent_model.train_logit()])[0]
action_result = iter_logit.argmax(-1)
batch_data['ins_rel'][action_result == 0] = 0
# Real training
iter_loss, iter_logit, _train_op = self.agent_one_step(self.sess, self.agent_model, batch_data, [self.agent_model.loss(), self.agent_model.train_logit(), self.agent_train_op])
time_end = time.time()
t = time_end - time_start
time_sum += t
iter_output = iter_logit.argmax(-1)
if tot_not_na > 0:
sys.stdout.write("epoch %d step %d time %.2f | loss: %f, not NA accuracy: %f, accuracy: %f\r" % (epoch, i, t, iter_loss, float(tot_not_na_correct) / tot_not_na, float(tot_correct) / tot))
sys.stdout.flush()
i += 1
print("\nAverage iteration time: %f" % (time_sum / i))
if (epoch + 1) % test_epoch == 0:
metric = self.test(model)
if metric > best_metric:
best_metric = metric
best_prec = self.cur_prec
best_recall = self.cur_recall
print("Best model, storing...")
if not os.path.isdir(ckpt_dir):
os.mkdir(ckpt_dir)
path = saver.save(self.sess, os.path.join(ckpt_dir, model_name))
print("Finish storing")
not_best_count = 0
else:
not_best_count += 1
if not_best_count >= 20:
break
print("######")
print("Finish training " + model_name)
print("Best epoch auc = %f" % (best_metric))
if (not best_prec is None) and (not best_recall is None):
if not os.path.isdir(test_result_dir):
os.mkdir(test_result_dir)
np.save(os.path.join(test_result_dir, model_name + "_x.npy"), best_recall)
np.save(os.path.join(test_result_dir, model_name + "_y.npy"), best_prec)
| 46.525692 | 231 | 0.572084 | [
"MIT"
] | qingdujun/manual-nre | nrekit/rl.py | 11,771 | Python |
import _plotly_utils.basevalidators
class CmaxValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name='cmax',
parent_name='scattergl.marker.line',
**kwargs
):
super(CmaxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
implied_edits={'cauto': False},
role='info',
**kwargs
)
| 24.2 | 66 | 0.584711 | [
"MIT"
] | Elpiro/plotly.py | plotly/validators/scattergl/marker/line/_cmax.py | 484 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.