id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
154439 | <gh_stars>1000+
#!/usr/bin/env python3
import binascii
import sys
from collections import defaultdict
import cereal.messaging as messaging
from common.realtime import sec_since_boot
def can_printer(bus=0):
"""Collects messages and prints when a new bit transition is observed.
This is very useful to find signals based on user triggered actions, such as blinkers and seatbelt.
Leave the script running until no new transitions are seen, then perform the action."""
logcan = messaging.sub_sock('can')
low_to_high = defaultdict(int)
high_to_low = defaultdict(int)
while 1:
can_recv = messaging.drain_sock(logcan, wait_for_one=True)
for x in can_recv:
for y in x.can:
if y.src == bus:
i = int.from_bytes(y.dat, byteorder='big')
l_h = low_to_high[y.address]
h_l = high_to_low[y.address]
change = None
if (i | l_h) != l_h:
low_to_high[y.address] = i | l_h
change = "+"
if (~i | h_l) != h_l:
high_to_low[y.address] = ~i | h_l
change = "-"
if change:
print(f"{sec_since_boot():.2f}\t{hex(y.address)} ({y.address})\t{change}{binascii.hexlify(y.dat)}")
if __name__ == "__main__":
if len(sys.argv) > 1:
can_printer(int(sys.argv[1]))
else:
can_printer()
| StarcoderdataPython |
3363368 | <reponame>CryptoSalamander/DeepFake-Detection
import os
import random
ri = random.randint
DIR = "."
Folder = [ foldername for foldername in os.listdir(DIR) if foldername.find('_') != -1 ]
for folder in Folder:
mp4s = [ filename for filename in os.listdir("./" + folder) if filename[-4:] == '.mp4' ]
Len = len(mp4s)
li = getNumbers(10, Len)
for mv in li:
print(mp4s[mv])
os.system(f'cp ./{folder}/{mp4s[mv]} ../mini_test')
def getNumbers(n, maxLen):
li = []
ran_num = random.randint(0,maxLen)
for i in range(n):
while ran_num in li:
ran_num = random.randint(0,maxLen)
li.append(ran_num)
li.sort()
return li | StarcoderdataPython |
160089 | <reponame>bparazin/skyportal
__all__ = ['Spectrum', 'SpectrumReducer', 'SpectrumObserver']
import warnings
import json
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql as psql
from sqlalchemy.orm import relationship
import numpy as np
import yaml
from astropy.utils.exceptions import AstropyWarning
from astropy.io import fits, ascii
from baselayer.app.models import (
join_model,
Base,
User,
AccessibleIfUserMatches,
accessible_by_owner,
)
from baselayer.app.json_util import to_json
from .group import accessible_by_groups_members
from ..enum_types import (
allowed_spectrum_types,
default_spectrum_type,
ALLOWED_SPECTRUM_TYPES,
)
class NumpyArray(sa.types.TypeDecorator):
"""SQLAlchemy representation of a NumPy array."""
impl = psql.ARRAY(sa.Float)
def process_result_value(self, value, dialect):
return np.array(value)
class Spectrum(Base):
"""Wavelength-dependent measurement of the flux of an object through a
dispersive element."""
read = accessible_by_groups_members
update = delete = accessible_by_owner
__tablename__ = 'spectra'
# TODO better numpy integration
wavelengths = sa.Column(
NumpyArray, nullable=False, doc="Wavelengths of the spectrum [Angstrom]."
)
fluxes = sa.Column(
NumpyArray,
nullable=False,
doc="Flux of the Spectrum [F_lambda, arbitrary units].",
)
errors = sa.Column(
NumpyArray,
doc="Errors on the fluxes of the spectrum [F_lambda, same units as `fluxes`.]",
)
obj_id = sa.Column(
sa.ForeignKey('objs.id', ondelete='CASCADE'),
nullable=False,
index=True,
doc="ID of this Spectrum's Obj.",
)
obj = relationship('Obj', back_populates='spectra', doc="The Spectrum's Obj.")
observed_at = sa.Column(
sa.DateTime,
nullable=False,
doc="Median UTC ISO time stamp of the exposure or exposures in which the Spectrum was acquired.",
)
origin = sa.Column(sa.String, nullable=True, doc="Origin of the spectrum.")
type = sa.Column(
allowed_spectrum_types,
nullable=False,
default=default_spectrum_type,
doc=f'''Type of spectrum. One of: {', '.join(f"'{t}'" for t in ALLOWED_SPECTRUM_TYPES)}.
Defaults to 'f{default_spectrum_type}'.''',
)
label = sa.Column(
sa.String,
nullable=True,
doc='User defined label (can be used to replace default instrument/date labeling on plot legends).',
)
# TODO program?
instrument_id = sa.Column(
sa.ForeignKey('instruments.id', ondelete='CASCADE'),
nullable=False,
index=True,
doc="ID of the Instrument that acquired the Spectrum.",
)
instrument = relationship(
'Instrument',
back_populates='spectra',
doc="The Instrument that acquired the Spectrum.",
)
groups = relationship(
"Group",
secondary="group_spectra",
back_populates="spectra",
cascade="save-update, merge, refresh-expire, expunge",
passive_deletes=True,
doc='Groups that can view this spectrum.',
)
reducers = relationship(
"User",
secondary="spectrum_reducers",
doc="Users that reduced this spectrum, or users to serve as points of contact given an external reducer.",
)
observers = relationship(
"User",
secondary="spectrum_observers",
doc="Users that observed this spectrum, or users to serve as points of contact given an external observer.",
)
followup_request_id = sa.Column(
sa.ForeignKey('followuprequests.id', ondelete='SET NULL'), nullable=True
)
followup_request = relationship('FollowupRequest', back_populates='spectra')
assignment_id = sa.Column(
sa.ForeignKey('classicalassignments.id', ondelete='SET NULL'), nullable=True
)
assignment = relationship('ClassicalAssignment', back_populates='spectra')
altdata = sa.Column(
psql.JSONB, doc="Miscellaneous alternative metadata.", nullable=True
)
original_file_string = sa.Column(
sa.String,
doc="Content of original file that was passed to upload the spectrum.",
)
original_file_filename = sa.Column(
sa.String, doc="Original file name that was passed to upload the spectrum."
)
owner_id = sa.Column(
sa.ForeignKey('users.id', ondelete='CASCADE'),
nullable=False,
index=True,
doc="ID of the User who uploaded the spectrum.",
)
owner = relationship(
'User',
back_populates='spectra',
foreign_keys=[owner_id],
cascade='save-update, merge, refresh-expire, expunge',
doc="The User who uploaded the spectrum.",
)
comments = relationship(
'CommentOnSpectrum',
back_populates='spectrum',
cascade='save-update, merge, refresh-expire, expunge, delete',
passive_deletes=True,
order_by="CommentOnSpectrum.created_at",
doc="Comments posted about this spectrum.",
)
annotations = relationship(
'AnnotationOnSpectrum',
back_populates='spectrum',
cascade='save-update, merge, refresh-expire, expunge, delete',
passive_deletes=True,
order_by="AnnotationOnSpectrum.created_at",
doc="Annotations posted about this spectrum.",
)
@classmethod
def from_ascii(
cls,
file,
obj_id=None,
instrument_id=None,
type=None,
label=None,
observed_at=None,
wave_column=0,
flux_column=1,
fluxerr_column=None,
):
"""Generate a `Spectrum` from an ascii file.
Parameters
----------
file : str or file-like object
Name or handle of the ASCII file containing the spectrum.
obj_id : str
The id of the Obj that this Spectrum is of, if not present
in the ASCII header.
instrument_id : int
ID of the Instrument with which this Spectrum was acquired,
if not present in the ASCII header.
type : str
What is the underlying source of the spectrum.
Possible types are defined in the config under spectrum types.
label : str
User defined label to show on plot legends.
If not given, the default displayed label is
<instrument>-<date taken>.
observed_at : string or datetime
Median UTC ISO time stamp of the exposure or exposures in which
the Spectrum was acquired, if not present in the ASCII header.
wave_column : integer, optional
The 0-based index of the ASCII column corresponding to the wavelength
values of the spectrum (default 0).
flux_column : integer, optional
The 0-based index of the ASCII column corresponding to the flux
values of the spectrum (default 1).
fluxerr_column : integer or None, optional
The 0-based index of the ASCII column corresponding to the flux error
values of the spectrum (default None).
Returns
-------
spec : `skyportal.models.Spectrum`
The Spectrum generated from the ASCII file.
"""
try:
f = open(file, 'rb') # read as ascii
except TypeError:
# it's already a stream
f = file
try:
table = ascii.read(f, comment='#', header_start=None)
except Exception as e:
e.args = (f'Error parsing ASCII file: {e.args[0]}',)
raise
finally:
f.close()
tabledata = np.asarray(table)
colnames = table.colnames
# validate the table and some of the input parameters
# require at least 2 columns (wavelength, flux)
ncol = len(colnames)
if ncol < 2:
raise ValueError(
'Input data must have at least 2 columns (wavelength, '
'flux, and optionally flux error).'
)
spec_data = {}
# validate the column indices
for index, name, dbcol in zip(
[wave_column, flux_column, fluxerr_column],
['wave_column', 'flux_column', 'fluxerr_column'],
['wavelengths', 'fluxes', 'errors'],
):
# index format / type validation:
if dbcol in ['wavelengths', 'fluxes']:
if not isinstance(index, int):
raise ValueError(f'{name} must be an int')
else:
if index is not None and not isinstance(index, int):
# The only other allowed value is that fluxerr_column can be
# None. If the value of index is not None, raise.
raise ValueError(f'invalid type for {name}')
# after validating the indices, ensure that the columns they
# point to exist
if isinstance(index, int):
if index >= ncol:
raise ValueError(
f'index {name} ({index}) is greater than the '
f'maximum allowed value ({ncol - 1})'
)
spec_data[dbcol] = tabledata[colnames[index]].astype(float)
# parse the header
if 'comments' in table.meta:
# this section matches lines like:
# XTENSION: IMAGE
# BITPIX: -32
# NAXIS: 2
# NAXIS1: 433
# NAXIS2: 1
header = {}
for line in table.meta['comments']:
try:
result = yaml.load(line, Loader=yaml.FullLoader)
except yaml.YAMLError:
continue
if isinstance(result, dict):
header.update(result)
# this section matches lines like:
# FILTER = 'clear ' / Filter
# EXPTIME = 600.003 / Total exposure time (sec); avg. of R&B
# OBJECT = 'ZTF20abpuxna' / User-specified object name
# TARGNAME= 'ZTF20abpuxna_S1' / Target name (from starlist)
# DICHNAME= '560 ' / Dichroic
cards = []
with warnings.catch_warnings():
warnings.simplefilter('error', AstropyWarning)
for line in table.meta['comments']:
# this line does not raise a warning
card = fits.Card.fromstring(line)
try:
# this line warns (exception in this context)
card.verify()
except AstropyWarning:
continue
cards.append(card)
# this ensures lines like COMMENT and HISTORY are properly dealt
# with by using the astropy.header machinery to coerce them to
# single strings
fits_header = fits.Header(cards=cards)
serialized = dict(fits_header)
commentary_keywords = ['', 'COMMENT', 'HISTORY', 'END']
for key in serialized:
# coerce things to serializable JSON
if key in commentary_keywords:
# serialize as a string - otherwise it returns a
# funky astropy type that is not json serializable
serialized[key] = str(serialized[key])
if len(fits_header.comments[key]) > 0:
header[key] = {
'value': serialized[key],
'comment': fits_header.comments[key],
}
else:
header[key] = serialized[key]
# this ensures that the spectra are properly serialized to the
# database JSONB (database JSONB cant handle datetime/date values)
header = json.loads(to_json(header))
else:
header = None
return cls(
obj_id=obj_id,
instrument_id=instrument_id,
type=type,
label=label,
observed_at=observed_at,
altdata=header,
**spec_data,
)
SpectrumReducer = join_model("spectrum_reducers", Spectrum, User)
SpectrumObserver = join_model("spectrum_observers", Spectrum, User)
SpectrumReducer.create = (
SpectrumReducer.delete
) = SpectrumReducer.update = AccessibleIfUserMatches('spectrum.owner')
SpectrumObserver.create = (
SpectrumObserver.delete
) = SpectrumObserver.update = AccessibleIfUserMatches('spectrum.owner')
# should be accessible only by spectrumowner ^^
SpectrumReducer.external_reducer = sa.Column(
sa.String,
nullable=True,
doc="The actual reducer for the spectrum, provided as free text if the "
"reducer is not a user in the database. Separate from the point-of-contact "
"user designated as reducer",
)
SpectrumObserver.external_observer = sa.Column(
sa.String,
nullable=True,
doc="The actual observer for the spectrum, provided as free text if the "
"observer is not a user in the database. Separate from the point-of-contact "
"user designated as observer",
)
| StarcoderdataPython |
3253506 | <reponame>DanPorter/Dans_Diffraction<filename>Dans_Diffraction/functions_scattering.py
# -*- coding: utf-8 -*-
"""
Module: functions_scattering.py
Functions:
intensity(structurefactor)
Returns the squared structure factor
phase_factor(hkl, uvw)
Return the complex phase factor:
phase_factor_qr(q, r)
Return the complex phase factor:
scatteringbasis(q, azi_ref_q=(1, 0, 0), psi=0)
Determine the scattering and polarisation vectors of a reflection based on energy, azimuth and polarisation.
scatteringcomponents(moment, q, azi_ref_q=(1, 0, 0), psi=0)
Transform magnetic vector into components within the scattering plane
scatteringvectors(q, energy_kev, azi_ref_q=(1, 0, 0), psi=0, polarisation='s-p')
Determine the scattering and polarisation vectors of a reflection based on energy, azimuth and polarisation.
sf_magnetic_neutron(q, r, occ, moment, magnetic_formfactor=None, debyewaller=False)
Calculate the magnetic structure factor for the given HKL, using neutron magnetic form factor
sf_magnetic_neutron_polarised(q, r, occ, moment, incident_polarisation_vector=(1, 0, 0), magnetic_formfactor=None, debyewaller=False)
Calculate the magnetic structure factor for the given HKL, using neutron magnetic form factor
sf_magnetic_xray(q, r, occ, moment, magnetic_formfactor=None, debyewaller=False)
Calculate the non-resonant magnetic component of the structure factor
sf_magnetic_xray_beamline(q, r, occ, moment, energy_kev, magnetic_formfactor=None, debyewaller=False, azi_ref_q=(1, 0, 0), psi=0, polarisation='s-p')
Calculate the non-resonant magnetic component of the structure factor
sf_magnetic_xray_polarised(q, r, occ, moment, incident_polarisation_vector=(1, 0, 0), magnetic_formfactor=None, debyewaller=False)
Calculate the non-resonant magnetic component of the structure factor
sf_magnetic_xray_resonant(q, r, occ, moment, energy_kev, debyewaller=False, azi_ref_q=(1, 0, 0), psi=0, polarisation='s-p', f0=0, f1=1, f2=0)
Calculate the non-resonant magnetic component of the structure factor
sf_magnetic_xray_resonant_alternate(q, r, occ, moment, energy_kev, debyewaller=False, polarisation='sp', azi_ref_q=(1, 0, 0), psi=0, f0=1, f1=1, f2=1)
Calculate structure factors using resonant scattering factors in the dipolar approximation
structure_factor(scattering_factor, occupancy, debyewaller, phase)
Return the complex structure factor:
xray_resonant_scattering_factor(q, moment, energy_kev, polarisation='sp', flm=(1, 1, 1), psi=0, azi_ref_q=(1, 0, 0))
Calcualte fxres, the resonant x-ray scattering factor
By <NAME>, PhD
Diamond
2018
Version 0.9
Last updated: 13/07/21
Version History:
11/11/18 0.1 Version History started.
13/07/21 0.9 Functions re-written and tested
@author: DGPorter
"""
import numpy as np
import datetime
from . import functions_general as fg
from . import functions_crystallography as fc
__version__ = '0.9'
MAX_QR_ARRAY = 1.0e7
TIME_REPORT = True
DEBUG_MODE = False
SCATTERING_TYPES = {
'xray': ['xray', 'x', 'x-ray', 'thomson', 'charge'],
'xray fast': ['xray fast', 'xfast'],
'neutron': ['neutron', 'n', 'nuclear'],
'xray magnetic': ['xray magnetic', 'magnetic xray', 'spin xray', 'xray spin'],
'neutron magnetic': ['neutron magnetic', 'magnetic neutron', 'magnetic'],
'xray resonant': ['xray resonant', 'resonant', 'resonant xray', 'rxs'],
'xray dispersion': ['dispersion', 'xray dispersion']
}
def _debug(message):
if DEBUG_MODE:
print(message)
def phase_factor(hkl, uvw):
"""
Return the complex phase factor:
phase_factor = exp(i.2.pi.HKL.UVW')
:param hkl: array [n,3] integer reflections
:param uvw: array [m,3] atomic positions in atomic basis units
:return: complex array [n,m]
"""
hkl = np.asarray(np.rint(hkl), dtype=np.float).reshape([-1, 3])
uvw = np.asarray(uvw, dtype=np.float).reshape([-1, 3])
dotprod = np.dot(hkl, uvw.T)
return np.exp(1j * 2 * np.pi * dotprod)
def phase_factor_qr(q, r):
"""
Return the complex phase factor:
phase_factor = exp(i.Q.R')
:param q: array [n,3] reflection positions in A^-1
:param r: array [m,3] atomic positions in A
:return: complex array [n,m]
"""
q = np.asarray(q, dtype=np.float).reshape([-1, 3])
r = np.asarray(r, dtype=np.float).reshape([-1, 3])
dotprod = np.dot(q, r.T)
return np.exp(1j * dotprod)
def structure_factor(scattering_factor, occupancy, debyewaller, phase):
"""
Return the complex structure factor:
structure_factor = sum_i( f.occ.dw.phase )
:param scattering_factor: array [n,m] or [n]: radiation dependent scattering factor/ form factor,/ scattering length
:param occupancy: array [m]: occupancy of each atom
:param debyewaller: array [n,m]: thermal vibration factor of each atom and reflection
:param phase: complex array [n,m]: complex phase factor exp(-i.Q.R)
:return: complex array [n]
"""
#nrefs, natoms = phase.shape
#scattering_factor = np.asarray(scattering_factor).reshape([-1, natoms])
#occupancy = np.asarray(occupancy, dtype=float).reshape([1, natoms])
return np.sum(scattering_factor * occupancy * debyewaller * phase, axis=1)
def intensity(structurefactor):
"""
Returns the squared structure factor
:param structurefactor: complex array [n] structure factor
:return: array [n]
"""
return np.real(structurefactor * np.conj(structurefactor))
########################################################################################################################
# ---------------------------------- NONMAGNETIC STRUCTURE FACTORS ------------------------------------------------- #
########################################################################################################################
def sf_atom(q, r, scattering_factor=None, occ=None, debyewaller=None, **kwargs):
"""
:param q: array [n,3] reflection positions in A^-1
:param r: array [m,3] atomic positions in A
:param scattering_factor: array [n,m] or [n]: radiation dependent scattering factor/ form factor,/ scattering length
:param occ: array [m]: occupancy of each atom
:param debyewaller: array [n,m]: thermal vibration factor of each atom and reflection
:param kwargs: additional options[*unused]
:return: complex array [n]
"""
phase = phase_factor_qr(q, r)
if scattering_factor is None:
scattering_factor = np.ones(phase.shape)
if occ is None:
occ = np.ones(phase.shape[1])
if debyewaller is None:
debyewaller = np.ones(phase.shape)
_debug('sf_atom(phase.shape=%s)' % (phase.shape,))
return structure_factor(scattering_factor, occ, debyewaller, phase)
def sf_xray_dispersion(q, r, scattering_factor, energy_kev=None, occ=None, debyewaller=None, **kwargs):
"""
Calculate the resonant x-ray structure factor
:param q: [n,3] array of hkl reflections
:param r: [m,3] array of atomic positions in r.l.u.
:param scattering_factor: array [n,m,e]: energy dependent complex atomic form factor
:param energy_kev: [e] array of incident x-ray energy in keV
:param occ: [m,1] array of atomic occupancies
:param debyewaller: [n,m] array of thermal factors for each atom and reflection
:param kwargs: additional options[*unused]
:return sf: [n, e] complex array of structure factors
"""
phase = phase_factor_qr(q, r)
scattering_factor = np.asarray(scattering_factor, dtype=np.complex).reshape(*phase.shape, -1)
if occ is None:
occ = np.ones(phase.shape[1])
if debyewaller is None:
debyewaller = np.ones(phase.shape)
neng = scattering_factor.shape[2]
_debug('sf_xray_dispersion(phase.shape=%s, energies=%s)' % (phase.shape, neng))
sf = np.zeros([len(q), neng], dtype=np.complex)
for engval in range(neng):
sf[:, engval] = structure_factor(scattering_factor[:, :, engval], occ, debyewaller, phase)
if neng == 1:
return sf[:, 0]
return sf
########################################################################################################################
# ----------------------------------- MAGNETIC STRUCTURE FACTORS --------------------------------------------------- #
########################################################################################################################
def sf_magnetic_neutron(q, r, moment, magnetic_formfactor=None, occ=None, debyewaller=None, **kwargs):
"""
Calculate the magnetic structure factor for the given HKL, using neutron magnetic form factor
Assumes an unpolarised incident beam.
Reference: <NAME>, Introduction to the Theory of Thermal Neutron Scattering (Cambridge University Press, 1997).
:param q: [n,3] array of reflections in cartesian coordinate system in units of inverse-A
:param r: [m,3] array of atomic positions in A
:param moment: [m,3] array of magnetic moment direction in orthogonal basis
:param magnetic_formfactor: [n,m] array of magnetic form factors for each atom and relection
:param occ: [m,1] array of atomic occupancies
:param debyewaller: [n,m] array of thermal factors for each atom and reflection, or False to omit
:param kwargs: additional options[*unused]
:return sf: [n] complex array of structure factors
"""
phase = phase_factor_qr(q, r)
moment = np.asarray(moment, dtype=np.float).reshape((-1, 3))
if occ is None:
occ = np.ones(phase.shape[1])
if debyewaller is None:
debyewaller = np.ones(phase.shape)
if debyewaller is None:
debyewaller = np.ones(phase.shape)
if magnetic_formfactor is None:
magnetic_formfactor = np.ones(phase.shape)
# direction of q
qhat = fg.norm(q).reshape([-1, 3])
# Calculate structure factor
_debug('sf_magnetic_neutron(phase.shape=%s)' % (phase.shape,))
sf = np.zeros(len(q), dtype=np.complex)
for n, qh in enumerate(qhat):
sfm = np.array([0., 0., 0.])
for m, mom in enumerate(moment):
# Calculate Magnetic part
qm = mom - np.dot(qh, mom) * qh
# Calculate structure factor
sfm = sfm + (magnetic_formfactor[n, m] * debyewaller[n, m] * occ[m] * phase[n, m] * qm)
# Calculate polarisation with incident neutron
# sf[n] = np.dot(sfm, incident_polarisation_vector)
# SF[n] = np.dot(SFm,SFm) # maximum possible
# average polarisation
sf[n] = (np.dot(sfm, [1, 0, 0]) + np.dot(sfm, [0, 1, 0]) + np.dot(sfm, [0, 0, 1])) / 3
return sf
def sf_magnetic_neutron_polarised(q, r, moment, incident_polarisation_vector=(1, 0, 0),
magnetic_formfactor=None, occ=None, debyewaller=None, **kwargs):
"""
Calculate the magnetic structure factor for the given HKL, using neutron magnetic form factor
Reference: <NAME>, Introduction to the Theory of Thermal Neutron Scattering (Cambridge University Press, 1997).
:param q: [n,3] array of hkl reflections
:param r: [m,3] array of atomic positions in r.l.u.
:param moment: [m,3] array of magnetic moment direction in orthogonal basis
:param incident_polarisation_vector: [1,3] direction of incident polarisation
:param magnetic_formfactor: [n,m] array of magnetic form factors for each atom and relection
:param occ: [m,1] array of atomic occupancies
:param debyewaller: [n,m] array of thermal factors for each atom and reflection, or False to omit
:param kwargs: additional options[*unused]
:return sf: [n] complex array of structure factors
"""
phase = phase_factor_qr(q, r)
moment = np.asarray(moment, dtype=np.float).reshape((-1, 3))
if occ is None:
occ = np.ones(phase.shape[1])
if debyewaller is None:
debyewaller = np.ones(phase.shape)
if debyewaller is None:
debyewaller = np.ones(phase.shape)
if magnetic_formfactor is None:
magnetic_formfactor = np.ones(phase.shape)
# direction of q
qhat = fg.norm(q).reshape([-1, 3])
# Calculate structure factor
_debug('sf_magnetic_neutron_polarised(phase.shape=%s)' % (phase.shape,))
sf = np.zeros(len(q), dtype=np.complex)
for n, qh in enumerate(qhat):
sfm = np.array([0., 0., 0.])
for m, mom in enumerate(moment):
# Calculate Magnetic part
qm = mom - np.dot(qh, mom) * qh
# Calculate structure factor
sfm = sfm + (magnetic_formfactor[n, m] * debyewaller[n, m] * occ[m] * phase * qm)
# Calculate polarisation with incident neutron
sf[n] = np.dot(sfm, incident_polarisation_vector)
return sf
def sf_magnetic_xray(q, r, moment, magnetic_formfactor=None, occ=None, debyewaller=None, **kwargs):
"""
Calculate the non-resonant magnetic component of the structure factor
:param q: [n,3] array of hkl reflections
:param r: [m,3] array of atomic positions in r.l.u.
:param moment: [m,3] array of magnetic moment direction in orthogonal basis
:param magnetic_formfactor: [n,m] array of magnetic form factors for each atom and relection
:param occ: [m,1] array of atomic occupancies
:param debyewaller: [n,m] array of thermal factors for each atom and reflection
:param kwargs: additional options[*unused]
:return sf: [n] complex array of structure factors
f_non-res_mag = i.r0.(hw/mc^2).fD.[.5.L.A + S.B]
B = e_o X e_i + (k_o X e_o) * k_o.e_i - (k_i X e_i) * k_i.e_o - (k_o X e_o) X (k_i X e_i)
- ignore orbital moment L
- fD = magnetic form factor
- S = spin moment
- k_i, k_o = wavevector in, out
- e_i, e_o = polarisation in, out
From Hill+McMorrow Acta Cryst. 1996 A52, 236-244 Equ. (2)
Book: "X-ray Scattering and Absorption by Magnetic Materials" by <NAME> Collins. Ch 2. Eqn.2.21+1
No orbital component assumed
magnetic moments assumed to be in the same reference frame as the polarisation
"""
phase = phase_factor_qr(q, r)
moment = np.asarray(moment, dtype=np.float).reshape((-1, 3))
if occ is None:
occ = np.ones(phase.shape[1])
if debyewaller is None:
debyewaller = np.ones(phase.shape)
if magnetic_formfactor is None:
magnetic_formfactor = np.ones(phase.shape)
# Calculate structure factor
_debug('sf_magnetic_xray(phase.shape=%s)' % (phase.shape,))
sf = np.zeros(len(q), dtype=np.complex)
for n in range(len(q)):
# Calculate vector structure factor
sfm = np.array([0., 0., 0.])
for m, mom in enumerate(moment):
sfm = sfm + magnetic_formfactor[n, m] * debyewaller[n, m] * occ[m] * phase[n, m] * mom
# average polarisation
sf[n] = (np.dot(sfm, [1, 0, 0]) + np.dot(sfm, [0, 1, 0]) + np.dot(sfm, [0, 0, 1])) / 3
return sf
def sf_magnetic_xray_polarised(q, r, moment, incident_polarisation_vector=(1, 0, 0),
magnetic_formfactor=None, occ=None, debyewaller=None, **kwargs):
"""
Calculate the non-resonant magnetic component of the structure factor
:param q: [n,3] array of hkl reflections
:param r: [m,3] array of atomic positions in r.l.u.
:param moment: [m,3] array of magnetic moment direction in orthogonal basis
:param incident_polarisation_vector: [1,3] direction of incident polarisation
:param magnetic_formfactor: [n,m] array of magnetic form factors for each atom and relection
:param occ: [m,1] array of atomic occupancies
:param debyewaller: [n,m] array of thermal factors for each atom and reflection
:param kwargs: additional options[*unused]
:return sf: [n] complex array of structure factors
f_non-res_mag = i.r0.(hw/mc^2).fD.[.5.L.A + S.B]
B = e_o X e_i + (k_o X e_o) * k_o.e_i - (k_i X e_i) * k_i.e_o - (k_o X e_o) X (k_i X e_i)
- ignore orbital moment L
- fD = magnetic form factor
- S = spin moment
- k_i, k_o = wavevector in, out
- e_i, e_o = polarisation in, out
From Hill+McMorrow Acta Cryst. 1996 A52, 236-244 Equ. (2)
Book: "X-ray Scattering and Absorption by Magnetic Materials" by <NAME> Collins. Ch 2. Eqn.2.21+1
No orbital component assumed
magnetic moments assumed to be in the same reference frame as the polarisation
"""
phase = phase_factor_qr(q, r)
moment = np.asarray(moment, dtype=np.float).reshape((-1, 3))
if occ is None:
occ = np.ones(phase.shape[1])
if debyewaller is None:
debyewaller = np.ones(phase.shape)
if magnetic_formfactor is None:
magnetic_formfactor = np.ones(phase.shape)
# Calculate structure factor
_debug('sf_magnetic_xray_polarised(phase.shape=%s)' % (phase.shape,))
sf = np.zeros(len(q), dtype=np.complex)
for n in range(len(q)):
# Calculate vector structure factor
sfm = np.array([0., 0., 0.])
for m, mom in enumerate(moment):
sfm = sfm + magnetic_formfactor[n, m] * debyewaller[n, m] * occ[m] * phase[n, m] * mom
# Calculate polarisation with incident x-ray
# The reference frame of the x-ray and the crystal are assumed to be the same
# i.e. pol=[1,0,0] || mom=[1,0,0] || (1,0,0)
sf[n] = np.dot(sfm, incident_polarisation_vector)
return sf
def sf_magnetic_xray_beamline(q, r, moment, energy_kev, magnetic_formfactor=None, occ=None, debyewaller=None,
azi_ref_q=(1, 0, 0), psi=0, polarisation='s-p', **kwargs):
"""
Calculate the non-resonant magnetic component of the structure factor
:param q: [n,3] array of hkl reflections
:param r: [m,3] array of atomic positions in r.l.u.
:param moment: [m,3] array of magnetic moment direction in orthogonal basis
:param energy_kev: float value of incident x-ray energy in keV
:param magnetic_formfactor: [n,m] array of magnetic form factors for each atom and relection
:param occ: [m,1] array of atomic occupancies
:param debyewaller: [n,m] array of thermal factors for each atom and reflection
:param azi_ref_q: [1,3] azimuthal refence, in cartesian basis (Q)
:param psi: [p] array of azimthal angles - the rotation out of the scattering plane.
:param polarisation: str definition of the polarisation can be: ['ss','sp','ps','pp'] with 's'=sigma, 'p'=pi
:param kwargs: additional options[*unused]
:return sf: [n, p] complex array of structure factors for different reflections and azimuths
f_non-res_mag = i.r0.(hw/mc^2).fD.[.5.L.A + S.B]
B = e_o X e_i + (k_o X e_o) * k_o.e_i - (k_i X e_i) * k_i.e_o - (k_o X e_o) X (k_i X e_i)
- ignore orbital moment L
- fD = magnetic form factor
- S = spin moment
- k_i, k_o = wavevector in, out
- e_i, e_o = polarisation in, out
From Hill+McMorrow Acta Cryst. 1996 A52, 236-244 Equ. (2)
Book: "X-ray Scattering and Absorption by Magnetic Materials" by <NAME> Collins. Ch 2. Eqn.2.21+1
No orbital component assumed
magnetic moments assumed to be in the same reference frame as the polarisation
"""
phase = phase_factor_qr(q, r)
moment = np.asarray(moment, dtype=np.float).reshape((-1, 3))
if occ is None:
occ = np.ones(phase.shape[1])
if debyewaller is None:
debyewaller = np.ones(phase.shape)
if magnetic_formfactor is None:
magnetic_formfactor = np.ones(phase.shape)
psi = np.asarray(psi, dtype=np.float).reshape([-1])
npsi = len(psi)
_debug('sf_magnetic_xray_beamline(phase.shape=%s, npsi=%d)' % (phase.shape, npsi))
sf = np.zeros([len(q), npsi], dtype=np.complex)
for psival in range(npsi):
kin, kout, ein, eout = scatteringvectors(q, energy_kev, azi_ref_q, psi, polarisation)
# Magnetic form factor
# f_non-res_mag = i.r0.(hw/mc^2).fD.[.5.L.A + S.B] #equ 2 Hill+McMorrow 1996
# ignore orbital moment L
fspin = np.zeros([len(q), len(r)], dtype=np.complex)
for n in range(len(q)):
B = np.cross(eout[n], ein[n]) + \
np.cross(kout[n], eout[n]) * np.dot(kout[n], ein[n]) - \
np.cross(kin[n], ein[n]) * np.dot(kin[n], eout[n]) - \
np.cross(np.cross(kout[n], eout[n]), np.cross(kin[n], ein[n]))
fspin[n, :] = 1j * magnetic_formfactor[n, :] * np.dot(moment, B)
sf[:, psival] = np.sum(fspin * occ * debyewaller * phase, axis=1)
if npsi == 1:
return sf[:, 0]
return sf
def sf_magnetic_xray_resonant(q, r, moment, energy_kev, occ=None, debyewaller=None, azi_ref_q=(1, 0, 0), psi=0,
polarisation='sp', f0=0, f1=1, f2=0, **kwargs):
"""
Calculate the non-resonant magnetic component of the structure factor
:param q: [n,3] array of hkl reflections
:param r: [m,3] array of atomic positions in r.l.u.
:param moment: [m,3] array of magnetic moment direction in orthogonal basis
:param energy_kev: float value of incident x-ray energy in keV
:param occ: [m,1] array of atomic occupancies
:param debyewaller: [n,m] array of thermal factors for each atom and reflection
:param azi_ref_q: [1,3] azimuthal refence, in cartesian basis (Q)
:param psi: [p] array of azimthal angles - the rotation out of the scattering plane.
:param polarisation: str definition of the polarisation can be: ['ss','sp','ps','pp'] with 's'=sigma, 'p'=pi
:param f0: float Flm value 0 (charge)
:param f1: float Flm value 1
:param f2: float Flm value 2
:param kwargs: additional options[*unused]
:return sf: [n, p] complex array of structure factors for different reflections and azimuths
f_res_mag = [(e'.e)F0 - i(e'xe).Z*F1 + (e'.Z)*(e.Z)*F2]
From Hill+McMorrow Acta Cryst. 1996 A52, 236-244 Equ. (2)
Book: "X-ray Scattering and Absorption by Magnetic Materials" by <NAME> Collins. Ch 2. Eqn.2.21+1
No orbital component assumed
magnetic moments assumed to be in the same reference frame as the polarisation
"""
phase = phase_factor_qr(q, r)
moment = fg.norm(moment).reshape((-1, 3))
z = fg.norm(moment) # z^n is a unit vector in the direction of the magnetic moment of the nth ion.
if occ is None:
occ = np.ones(phase.shape[1])
if debyewaller is None:
debyewaller = np.ones(phase.shape)
if debyewaller is None:
debyewaller = np.ones(phase.shape)
psi = np.asarray(psi, dtype=np.float).reshape([-1])
npsi = len(psi)
_debug('sf_magnetic_xray_resonant(phase.shape=%s, npsi=%d)' % (phase.shape, npsi))
sf = np.zeros([len(q), npsi], dtype=np.complex)
for psival in range(npsi):
kin, kout, ein, eout = scatteringvectors(q, energy_kev, azi_ref_q, psi[psival], polarisation)
fe1e1 = np.zeros([len(q), len(r)], dtype=np.complex)
flm0, flm1, flm2 = 0, 0, 0
for ref in range(len(q)):
# z = scatteringcomponents(moment, q[ref], azi_ref_q, psi)
# Magnetic form factor
# f_res_mag = [(e'.e)F0 - i(e'xe).Z*F1 + (e'.Z)*(e.Z)*F2] #equ 7 Hill+McMorrow 1996
if f0 != 0:
flm0 = np.dot(eout[ref], ein[ref])
if f1 != 0:
flm1 = np.dot(np.cross(eout[ref], ein[ref]), z.T)
if f2 != 0:
flm2 = np.dot(eout[ref], z.T) * np.dot(ein[ref], z.T)
fe1e1[ref, :] = flm0 * f0 - 1j * flm1 * f1 + flm2 * f2
# flm0 = np.array([np.dot(i_eout, i_ein).repeat(len(z)) for i_eout, i_ein in zip(eout, ein)])
# flm1 = np.array([np.dot(np.cross(i_eout, i_ein), z.T) for i_eout, i_ein in zip(eout, ein)])
# flm2 = np.array([np.dot(i_eout, z.T) * np.dot(i_ein, z.T) for i_eout, i_ein in zip(eout, ein)])
# fe1e1 = flm0 * f0 - 1j * flm1 * f1 + flm2 * f2
# flm0 = np.tile(np.dot(eout, ein.T).diagonal(), (len(z), 1)).T
# flm1 = np.array([np.dot(np.cross(i_eout, i_ein), z.T) for i_eout, i_ein in zip(eout, ein)])
# flm2 = np.dot(eout, z.T).diagonal() * np.dot(ein, z.T).diagonal()
# fe1e1 = flm0 * f0 - 1j * flm1 * f1 + flm2 * f2
# Calculate structure factor
sf[:, psival] = np.sum(fe1e1 * debyewaller * occ * phase, axis=1)
if npsi == 1:
return sf[:, 0]
return sf
def sf_magnetic_xray_resonant_alternate(q, r, moment, energy_kev, occ=None, debyewaller=None, polarisation='sp',
azi_ref_q=(1, 0, 0), psi=0, f0=0, f1=1, f2=0, **kwargs):
"""
Calculate structure factors using resonant scattering factors in the dipolar approximation
:param q: [n,3] array of hkl reflections
:param r: [m,3] array of atomic positions in r.l.u.
:param moment: [m,3] array of magnetic moment direction in orthogonal basis
:param energy_kev: float value of incident x-ray energy in keV
:param occ: [m,1] array of atomic occupancies
:param debyewaller: [n,m] array of thermal factors for each atom and reflection
:param azi_ref_q: [1,3] azimuthal refence, in cartesian basis (Q)
:param psi: [p] array of azimthal angles - the rotation out of the scattering plane.
:param polarisation: str definition of the polarisation can be: ['ss','sp','ps','pp'] with 's'=sigma, 'p'=pi
:param f0: float Flm value 0 (charge)
:param f1: float Flm value 1
:param f2: float Flm value 2
:param kwargs: additional options[*unused]
:return sf: [n, p] complex array of structure factors for different reflections and azimuths
I = Scattering.xray_resonant(HKL,energy_kev,polarisation,F0,F1,F2)
Returns an array with the same length as HKL, giving the real intensity at each reflection.
energy_kev = x-ray energy in keV
polarisation = x-ray polarisation: 'ss',{'sp'},'ps','pp'
f0/1/2 = Resonance factor Flm
azim_zero = [h,k,l] vector parallel to the origin of the azimuth
psi = azimuthal angle defining the scattering plane
Uses the E1E1 resonant x-ray scattering amplitude:
fxr_n = (ef.ei)*f0 -i(ef X ei).z_n*f1 + (ef.z_n)(ei.z_n)f2
Where ei and ef are the initial and final polarisation states, respectively,
and z_n is a unit vector in the direction of the magnetic moment of the nth ion.
The polarisation states are determined to be one of the natural synchrotron
states, where sigma (s) is perpendicular to the scattering plane and pi (p) is
parallel to it.
( s-s s-p )
( p-s p-p )
From Hill+McMorrow Acta Cryst. 1996 A52, 236-244 Equ. (15)
"""
phase = phase_factor_qr(q, r)
# z^n is a unit vector in the direction of the magnetic moment of the nth ion.
moment = fg.norm(moment).reshape((-1, 3))
if occ is None:
occ = np.ones(phase.shape[1])
if debyewaller is None:
debyewaller = np.ones(phase.shape)
if debyewaller is None:
debyewaller = np.ones(phase.shape)
psi = np.asarray(psi, dtype=np.float).reshape([-1])
npsi = len(psi)
_debug('sf_magnetic_xray_resonant_alternate(phase.shape=%s, npsi=%d)' % (phase.shape, npsi))
sf = np.zeros([len(q), npsi], dtype=np.complex)
for psival in range(npsi):
# Get resonant form factor
fxres = xray_resonant_scattering_factor(q, moment, energy_kev, polarisation,
(f0, f1, f2), psi[psival], azi_ref_q)
# Calculate structure factor
# Broadcasting used on 2D fxres
sf[:, psival] = np.sum(fxres * debyewaller * occ * phase, axis=1)
if npsi == 1:
return sf[:,0]
return sf
########################################################################################################################
# ----------------------------------------- MAGNETIC PROJECTIONS --------------------------------------------------- #
########################################################################################################################
def xray_resonant_scattering_factor(q, moment, energy_kev, polarisation='sp', flm=(1, 1, 1), psi=0,
azi_ref_q=(1, 0, 0)):
"""
Calcualte fxres, the resonant x-ray scattering factor
fxres = Scattering.xray_resonant_scattering_factor(HKL,energy_kev,polarisation,flm,azim_zero,psi)
energy_kev = x-ray energy in keV
polarisation = x-ray polarisation: 'ss',{'sp'},'ps','pp'
flm = (f0, f1, f2) Resonance factor Flm, f0/1/2 should be 0 or 1 each
azim_zero = [h,k,l] vector parallel to the origin of the azimuth {[1,0,0]}
psi = azimuthal angle defining the scattering plane {0}
:param q: [n*3] array of reflection coordinates in cartesian basis (Q)
:param moment: [mx3] array of magnetic moments in cartesian basis
:param energy_kev: float energy in keV
:param polarisation: polarisation condition: 'sp', 'ss', 'ps', 'pp'. s=sigma, p=pi
:param flm: (f0, f1, f2) Resonance factor Flm, f0/1/2 should be 0 or 1 each
:param azi_ref_q: azimuthal refence, in cartesian basis (Q)
:param psi: float, azimuthal angle
:return: fxres [n*1] array of resonant x-ray scattering factors
Uses the E1E1 resonant x-ray scattering amplitude:
fxr_n = (ef.ei)*F0 -i(ef X ei).z_n*F1 + (ef.z_n)(ei.z_n)F2
Where ei and ef are the initial and final polarisation states, respectively,
and z_n is a unit vector in the direction of the magnetic moment of the nth ion.
The polarisation states are determined to be one of the natural synchrotron
states, where sigma (s) is perpendicular to the scattering plane and pi (p) is
parallel to it.
( s-s s-p )
( p-s p-p )
From Hill+McMorrow Acta Cryst. 1996 A52, 236-244 Equ. (15)
"""
q = np.asarray(q, dtype=np.float).reshape((-1, 3))
moment = np.asarray(moment, dtype=np.float).reshape((-1, 3))
polarisation = polarisation.lower().replace('-', '').replace(' ', '')
nref = len(q)
nat = len(moment)
qmag = fg.mag(q)
bragg = fc.cal2theta(qmag, energy_kev) / 2
fxres = np.zeros([nref, nat], dtype=np.complex)
for ref in range(nref):
# Resonant scattering factor
# Electric Dipole transition at 3d L edge
z1, z2, z3 = scatteringcomponents(moment, q[ref], azi_ref_q, psi).T
bragg_r = np.deg2rad(bragg[ref])
if polarisation in ['sigmasigma', 'sigsig', 'ss']: # Sigma-Sigma
f0 = 1 * np.ones(nat)
f1 = 0 * np.ones(nat)
f2 = z2 ** 2
elif polarisation in ['sigmapi', 'sigpi', 'sp']: # Sigma-Pi
f0 = 0 * np.ones(nat)
f1 = z1 * np.cos(bragg_r) + z3 * np.sin(bragg_r)
f2 = -z2 * (z1 * np.sin(bragg_r) - z3 * np.cos(bragg_r))
elif polarisation in ['pisigma', 'pisig', 'ps']: # Pi-Sigma
f0 = 0 * np.ones(nat)
f1 = z3 * np.sin(bragg_r) - z1 * np.cos(bragg_r)
f2 = z2 * (z1 * np.sin(bragg_r) + z3 * np.cos(bragg_r))
elif polarisation in ['pipi', 'pp']: # Pi-Pi
f0 = np.cos(2 * bragg_r) * np.ones(nat)
f1 = -z2 * np.sin(2 * bragg_r)
f2 = -(np.cos(bragg_r) ** 2) * (z1 ** 2 * np.tan(bragg_r) ** 2 + z3 ** 2)
else:
raise ValueError('Incorrect polarisation. pol should be e.g. ''ss'' or ''sp''')
fxres[ref, :] = flm[0] * f0 - 1j * flm[1] * f1 + flm[2] * f2
return fxres
def scatteringbasis(q, azi_ref_q=(1, 0, 0), psi=0):
"""
Determine the scattering and polarisation vectors of a reflection based on energy, azimuth and polarisation.
:param q: [1*3] reflection vector in a cartesian basis
:param azi_ref_q: [1,3] direction along which the azimuthal zero angle is determind
:param psi: float azimuthal angle about U3 in degrees
:return: U1, U2, U3
The basis is chosen such that Q defines the scattering plane, the sigma direction is normal to this plane,
the pi direction is always within this plane.
The azimuthal angle defines a rotation about the Q axis in a clockwise mannor, matching I16.
At an azimuth of 0degrees, U1 is perpendicular to Q, along the direction of azim_zero.
"""
# Define coordinate system I,J,Q (U1,U2,U3)
# See FDMNES User's Guide p20 'II-11) Anomalous or resonant diffraction'
# U1 || projection of azim_zero
# U2 _|_ U1,U3
# U3 || Q = kf-ki
Qhat = fg.norm(q) # || Q
AxQ = fg.norm(np.cross(azi_ref_q, Qhat))
Ihat = fg.norm(np.cross(Qhat, AxQ)) # || to azim_zero
Jhat = fg.norm(np.cross(Qhat, Ihat)) # -| to I and Q
# Rotate psi about Qhat
rpsi = np.deg2rad(psi)
# -ve sin makes clockwise rotation
# This was checked on 21/1/19 vs CRO paper + sergio's calculations and seems to agree with experiment,
# however we never did an azimuthal scan of the (103) which would have distinguished this completely.
Ihat_psi = fg.norm(np.cos(rpsi) * Ihat - np.sin(rpsi) * Jhat)
Jhat_psi = fg.norm(np.cross(Qhat, Ihat_psi))
return np.vstack([Ihat_psi, Jhat_psi, Qhat])
def scatteringcomponents(moment, q, azi_ref_q=(1, 0, 0), psi=0):
"""
Transform magnetic vector into components within the scattering plane
:param moment: [n*3] array of magnetic moments in a cartesian basis
:param q: [1*3] reflection vector in a cartesian basis
:param azi_ref_q: [1*3] azimuthal reference in a cartesian basis
:param psi: float azimuthal angle
:return: (z1, z2, z3) components of the magnetic moment along the reflection direction
"""
U = scatteringbasis(q, azi_ref_q, psi)
# Determine components of the magnetic vector
z1z2z3 = np.dot(moment, U.T) # [mxmymz.I, mxmymz.J, mxmymz.Q]
return fg.norm(z1z2z3)
def scatteringvectors(q, energy_kev, azi_ref_q=(1, 0, 0), psi=0, polarisation='s-p'):
"""
Determine the scattering and polarisation vectors of a reflection based on energy, azimuth and polarisation.
:param q: [n,3] reflection vector in a cartesian basis
:param energy_kev: x-ray scattering energy in keV
:param azi_ref_q: [1,3] direction along which the azimuthal zero angle is determind
:param psi: float angle in degrees about the azimuth
:param polarisation: polarisation with respect to the scattering plane, options:
'ss' : sigma-sigma polarisation
'sp' : sigma-pi polarisation
'ps' : pi-sigma polarisation
'pp' : pi-pi polarisation
:return: kin, kout, ein, eout
Returned values are [n,3] arrays
kin : [n,3] array of incident wavevectors
kout: [n,3] array of scattered wavevectors
ein : [n,3] array of incident polarisation
eout: [n,3] array of scattered polarisation
The basis is chosen such that Q defines the scattering plane, sigma and pi directions are normal to this plane.
"""
q = np.asarray(q, dtype=np.float).reshape([-1, 3])
azi_ref_q = np.asarray(azi_ref_q, dtype=np.float).reshape(3)
polarisation = polarisation.replace('-', '').replace(' ', '')
out_kin = np.zeros([len(q), 3])
out_kout = np.zeros([len(q), 3])
out_ein = np.zeros([len(q), 3])
out_eout = np.zeros([len(q), 3])
for n in range(len(q)):
# Define coordinate system I,J,Q (U1,U2,U3)
# See FDMNES User's Guide p20 'II-11) Anomalous or resonant diffraction'
Qhat = fg.norm(q[n, :]) # || Q
AxQ = fg.norm(np.cross(azi_ref_q, Qhat))
Ihat = fg.norm(np.cross(Qhat, AxQ)) # || to azim_zero
Jhat = fg.norm(np.cross(Qhat, Ihat)) # -| to I and Q
# Determine wavevectors
bragg = fc.cal2theta(fg.mag(q[n, :]), energy_kev) / 2
if np.isnan(bragg):
raise Exception('Bragg > 180deg at this energy: q(%s) @ E=%s' % (q[n, :], energy_kev))
rb = np.deg2rad(bragg)
rp = np.deg2rad(psi)
kin = np.cos(rb) * np.cos(rp) * Ihat - np.cos(rb) * np.sin(rp) * Jhat - np.sin(rb) * Qhat
kout = np.cos(rb) * np.cos(rp) * Ihat - np.cos(rb) * np.sin(rp) * Jhat + np.sin(rb) * Qhat
esig = np.sin(rp) * Ihat + np.cos(rp) * Jhat # sigma polarisation (in or out)
piin = np.cross(kin, esig) # pi polarisation in
piout = np.cross(kout, esig) # pi polarisation out
# Polarisations
if polarisation in ['sigmasigma', 'sigsig', 'ss']:
ein = 1.0 * esig
eout = 1.0 * esig
elif polarisation in ['sigmapi', 'sigpi', 'sp']:
ein = 1.0 * esig
eout = 1.0 * piout
elif polarisation in ['pisigma', 'pisig', 'ps']:
ein = 1.0 * piin
eout = 1.0 * esig
elif polarisation in ['pipi', 'pp']:
ein = 1.0 * piin
eout = 1.0 * piout
else:
raise ValueError('Incorrect polarisation. pol should be e.g. ''ss'' or ''sp''')
out_kin[n, :] = kin
out_kout[n, :] = kout
out_ein[n, :] = ein
out_eout[n, :] = eout
return out_kin, out_kout, out_ein, out_eout
########################################################################################################################
# ---------------------------------------- ScatteringTypes Class --------------------------------------------------- #
########################################################################################################################
def get_scattering_function(scattering_type):
"""
Return function for given scattering type
Function will return structure factor function
:param scattering_type: str : scattering name as defined in SCATTERING_NAMES
:return: function
"""
scattering_type = scattering_type.lower()
if scattering_type in SCATTERING_TYPES['xray']:
return sf_atom
if scattering_type in SCATTERING_TYPES['xray fast']:
return sf_atom
if scattering_type in SCATTERING_TYPES['xray dispersion']:
return sf_xray_dispersion
if scattering_type in SCATTERING_TYPES['neutron']:
return sf_atom
if scattering_type in SCATTERING_TYPES['xray magnetic']:
return sf_magnetic_xray
if scattering_type in SCATTERING_TYPES['neutron magnetic']:
return sf_magnetic_neutron
if scattering_type in SCATTERING_TYPES['xray resonant']:
return sf_magnetic_xray_resonant
raise(Exception('Scattering name %s not recognised' % scattering_type))
def options(occ=None, debyewaller=None, scattering_factor=None,
moment=None, incident_polarisation_vector=(1, 0, 0), magnetic_formfactor=None,
energy_kev=8, polarisation='sp', azi_ref_q=(1, 0, 0), psi=0, f0=0, f1=1, f2=0):
"""
Create an input dict that will work with all structure factor (sf_) functions
:param occ: [m,1] array of atomic occupancies
:param debyewaller: [n,m] array of thermal factors for each atom and reflection
:param scattering_factor: array [n,m] or [n]: radiation dependent scattering factor/ form factor,/ scattering length
:param moment: [m,3] array of magnetic moment direction in orthogonal basis
:param incident_polarisation_vector: [1,3] direction of incident polarisation
:param magnetic_formfactor: [n,m] array of magnetic form factors for each atom and relection
:param energy_kev: float value of incident x-ray energy in keV
:param azi_ref_q: [1,3] azimuthal refence, in cartesian basis (Q)
:param psi: float value of the azimthal angle - the rotation out of the scattering plane.
:param polarisation: str definition of the polarisation can be: ['ss','sp','ps','pp'] with 's'=sigma, 'p'=pi
:param f0: float Flm value 0 (charge)
:param f1: float Flm value 1
:param f2: float Flm value 2
:return: dict
"""
return locals()
def autostructurefactor(scattering_type, q, r, *args, **kwargs):
"""
Choose a scattering type can calcuate the structure factor
:param scattering_type:
:param q: array [n,3] reflection positions in A^-1
:param r: array [m,3] atomic positions in A
:param args: additional arguments to pass to choosen scattering function
:param kwargs: named arguments to pass to choosen scattering function
:return: complex array [n]
"""
scatter_fun = get_scattering_function(scattering_type)
opt = options(*args, **kwargs)
q = np.asarray(q, dtype=np.float).reshape([-1, 3])
r = np.asarray(r, dtype=np.float).reshape([-1, 3])
energy_kev = np.asarray(opt['energy_kev'], dtype=np.float).reshape(-1)
psi = np.asarray(opt['psi'], dtype=np.float).reshape(-1)
nref = q.shape[0]
natom = r.shape[0]
nenergy = energy_kev.size
npsi = psi.size
scattering_factor = opt['scattering_factor']
scattering_factor = np.asarray(scattering_factor) if scattering_factor is not None else np.ones((nref, len(r)))
if scattering_factor.ndim < 2 or scattering_factor.shape[1] < 2:
scattering_factor = np.tile(scattering_factor.reshape((-1, len(r))), (nref, 1))
scattering_factor = scattering_factor.reshape((nref, len(r), -1))
debyewaller = opt['debyewaller']
debyewaller = np.asarray(debyewaller) if debyewaller is not None else np.ones((nref, len(r)))
if debyewaller.ndim < 2 or debyewaller.shape[1] < 2:
debyewaller = np.tile(debyewaller.reshape((-1, len(r))), (nref, 1))
magff = opt['magnetic_formfactor']
magff = np.asarray(magff) if magff is not None else np.ones((nref, len(r)))
if magff.ndim < 2 or magff.shape[1] < 2:
magff = np.tile(magff.reshape((-1, len(r))), (nref, 1))
# Break up long lists of HKLs
n_arrays = np.ceil(nref * natom / MAX_QR_ARRAY)
if n_arrays > 1:
print('Splitting %d reflections (%d atoms) into %1.0f parts' % (nref, natom, n_arrays))
q_array = np.array_split(q, n_arrays)
scattering_factor = np.array_split(scattering_factor, n_arrays)
debyewaller = np.array_split(debyewaller, n_arrays)
magff = np.array_split(magff, n_arrays)
sf = np.zeros([nref, nenergy, npsi], dtype=np.complex)
start_time = datetime.datetime.now()
for e, enval in enumerate(energy_kev):
opt['energy_kev'] = enval
for p, psival in enumerate(psi):
opt['psi'] = psival
ls = 0
for n, _q in enumerate(q_array):
if n_arrays > 1:
print(' Starting %2.0f/%2.0f: %d:%d' % (n+1, n_arrays, ls, ls+len(_q)))
opt['scattering_factor'] = scattering_factor[n][:, :, e]
opt['debyewaller'] = debyewaller[n]
opt['magnetic_formfactor'] = magff[n]
sf[ls: ls+len(_q), e, p] = scatter_fun(_q, r, **opt)
ls = ls+len(_q)
end_time = datetime.datetime.now()
time_difference = end_time - start_time
if TIME_REPORT and time_difference.total_seconds() > 10:
print('Calculated %d structure factors in %s' % (nref, time_difference))
if nenergy == 1 and npsi == 1:
return sf[:, 0, 0] # shape(nref)
if nenergy == 1:
return sf[:, 0, :] # shape(nref, nenergy)
if npsi == 1:
return sf[:, :, 0] # shape(nref, nspi)
return sf
def autointensity(scattering_type, q, r, *args, **kwargs):
"""
Choose a scattering type can calcuate the scattered intensity
:param scattering_type: named scattering function, see "get_scattering_function()"
:param q: array [n,3] reflection positions in A^-1
:param r: array [m,3] atomic positions in A
:param args: additional arguments to pass to choosen scattering function
:param kwargs: named arguments to pass to choosen scattering function
:return: float array [n]
"""
sf = autostructurefactor(scattering_type, q, r, *args, **kwargs)
return intensity(sf)
| StarcoderdataPython |
4802138 | <gh_stars>1-10
"""
Sync full-text search indices with the database
- Mark events for later syncing
- Sync events
- Rebuild the indices
"""
import logging
from app.models.search.event import SearchableEvent
from app.views.elastic_search import client
from app.views.redis_store import redis_store
logger = logging.getLogger(__name__)
INDEX_CLASSES = [SearchableEvent]
REDIS_EVENT_INDEX = 'event_index'
REDIS_EVENT_DELETE = 'event_delete'
def sync_event_from_database(db_event):
"""Fetches the event with id `id` from the database and creates or updates the
document in the Elasticsearch index
"""
logger.info('Indexing event %i %s', db_event.id, db_event.name)
searchable = SearchableEvent()
searchable.from_event(db_event)
searchable.save()
def rebuild_indices(client=client):
"""Rebuilds all search indices, deletes all data"""
redis_store.delete(REDIS_EVENT_INDEX)
redis_store.delete(REDIS_EVENT_DELETE)
for index_class in INDEX_CLASSES:
if client.indices.exists(index_class.meta.index):
logger.info('Deleting index %s', index_class.meta.index)
client.indices.delete(index_class.meta.index)
index_class.init()
def delete_event_from_index(event_id):
"""Deletes an event from the Elasticsearch index"""
searchable = SearchableEvent()
searchable.id = event_id
searchable.delete()
def mark_event(purpose, event_id):
"""Marks an event id in redis for later syncing.
Purpose can be taken from this namespace (Look for global REDIS_X
variables)
"""
redis_store.sadd(purpose, event_id)
def _events_marked(purpose):
"""Retrieve all event ids from redis marked as `purpose`"""
marked_event_id = redis_store.spop(purpose)
while marked_event_id:
yield marked_event_id
marked_event_id = redis_store.spop(purpose)
def sync():
"""Syncs all events that have been marked"""
logger.info('Syncing marked events')
for event_id in list(_events_marked(REDIS_EVENT_INDEX)):
logger.info('Syncing event %i', event_id)
sync_event_from_database(event_id)
for event_id in list(_events_marked(REDIS_EVENT_DELETE)):
logger.info('Deleting event %i', event_id)
delete_event_from_index(event_id)
| StarcoderdataPython |
3270895 | <gh_stars>1-10
import qiskit.circuit.library as library
import math, qiskit, random
import networkx as nx
import numpy as np
from qcg.generators import gen_supremacy, gen_hwea, gen_BV, gen_sycamore, gen_adder
from qiskit_helper_functions.random_benchmark import RandomCircuit
def factor_int(n):
nsqrt = math.ceil(math.sqrt(n))
val = nsqrt
while 1:
co_val = int(n/val)
if val*co_val == n:
return val, co_val
else:
val -= 1
def gen_secret(num_qubit):
num_digit = num_qubit-1
num = 2**num_digit-1
num = bin(num)[2:]
num_with_zeros = str(num).zfill(num_digit)
return num_with_zeros
def construct_qaoa_plus(P, G, params, reg_name, barriers=False, measure=False):
assert len(params) == 2 * P, 'Number of parameters should be 2P'
nq = len(G.nodes())
circ = qiskit.QuantumCircuit(qiskit.QuantumRegister(nq, reg_name))
# Initial state
circ.h(range(nq))
gammas = [param for i, param in enumerate(params) if i % 2 == 0]
betas = [param for i, param in enumerate(params) if i % 2 == 1]
for i in range(P):
# Phase Separator Unitary
for edge in G.edges():
q_i, q_j = edge
circ.rz(gammas[i] / 2, [q_i, q_j])
circ.cx(q_i, q_j)
circ.rz(-1 * gammas[i] / 2, q_j)
circ.cx(q_i, q_j)
if barriers:
circ.barrier()
# Mixing Unitary
for q_i in range(nq):
circ.rx(-2 * betas[i], q_i)
if measure:
circ.measure_all()
return circ
def construct_random(num_qubits, depth):
random_circuit_obj = RandomCircuit(width=num_qubits,depth=depth,
connection_degree=0.5,num_hadamards=5,seed=None)
circuit, _ = random_circuit_obj.generate()
return circuit
def generate_circ(num_qubits,depth,circuit_type,reg_name,connected_only,seed):
random.seed(seed)
full_circ = None
num_trials = 100
density = 0.001
while num_trials:
if circuit_type == 'supremacy':
i,j = factor_int(num_qubits)
if abs(i-j)<=2:
full_circ = gen_supremacy(i,j,depth*8,regname=reg_name)
elif circuit_type == 'sycamore':
i,j = factor_int(num_qubits)
full_circ = gen_sycamore(i,j,depth,regname=reg_name)
elif circuit_type == 'hwea':
full_circ = gen_hwea(num_qubits,depth,regname=reg_name)
elif circuit_type == 'bv':
full_circ = gen_BV(gen_secret(num_qubits),barriers=False,regname=reg_name)
elif circuit_type == 'qft':
full_circ = library.QFT(num_qubits=num_qubits,approximation_degree=0,do_swaps=False).decompose()
elif circuit_type=='aqft':
approximation_degree=int(math.log(num_qubits,2)+2)
full_circ = library.QFT(num_qubits=num_qubits,approximation_degree=num_qubits-approximation_degree,do_swaps=False).decompose()
elif circuit_type == 'adder':
full_circ = gen_adder(nbits=int((num_qubits-2)/2),barriers=False,regname=reg_name)
elif circuit_type=='regular':
if 3*num_qubits%2==0:
graph = nx.random_regular_graph(3, num_qubits)
full_circ = construct_qaoa_plus(P=depth,G=graph,
params=[np.random.uniform(-np.pi,np.pi) for _ in range(2*depth)],reg_name=reg_name)
elif circuit_type=='erdos':
graph = nx.generators.random_graphs.erdos_renyi_graph(num_qubits, density)
full_circ = construct_qaoa_plus(P=depth,G=graph,
params=[np.random.uniform(-np.pi,np.pi) for _ in range(2*depth)],reg_name=reg_name)
density += 0.001
elif circuit_type=='random':
full_circ = construct_random(num_qubits=num_qubits,depth=depth)
else:
raise Exception('Illegal circuit type:',circuit_type)
if full_circ is not None and full_circ.num_tensor_factors()==1:
break
elif full_circ is not None and not connected_only:
break
else:
full_circ = None
num_trials -= 1
assert full_circ is None or full_circ.num_qubits==num_qubits
return full_circ | StarcoderdataPython |
1749857 | <reponame>EGAMAGZ/Terminal-Music-Player<filename>pymusicterm/ui/menus.py<gh_stars>1-10
import py_cui
from typing import List
from py_cui import widget_set
class LocalPlayerSettingsMenu:
MENU_OPTIONS:List[str]=["Repeat All","Repeat","Shuffle","Block N|P key on repeat"]
TITLE:str="Player Settings"
ROW:int=4
COLUMN:int=0
ROW_SPAN:int=2
COLUMN_SPAN:int=2
window:py_cui.widget_set.WidgetSet
def __init__(self,window:py_cui.widget_set.WidgetSet) -> None:
""" Constructor of LocalPlayerSettingsMenu class
"""
self.window=window
self.menu=self.window.add_checkbox_menu(self.TITLE,self.ROW,self.COLUMN,
self.ROW_SPAN,self.COLUMN_SPAN)
self.__config()
def __config(self):
""" Function that configures the CheckBoxMenu widget
"""
self.menu.add_item_list(self.MENU_OPTIONS)
self.menu.add_text_color_rule("X",py_cui.GREEN_ON_BLACK,'contains',match_type='regex')
self.menu.set_focus_text("|Enter - Enable/Disable setting|")
def create(self) -> py_cui.widgets.CheckBoxMenu:
""" Function that returns the CheckBoxMenu Widget created
Returns
-------
menu : CheckBoxMenu
Return a CheckBoxMenu Widget
"""
return self.menu
class LocalPlayerQueueMenu:
TITLE="Songs queue"
ROW:int=0
COLUMN:int=0
ROW_SPAN:int=4
COLUMN_SPAN:int=2
window:py_cui.widget_set.WidgetSet
def __init__(self,window:py_cui.widget_set.WidgetSet) -> None:
""" Constructor of LocalPlayerQueueMenu class
"""
self.window=window
self.menu=self.window.add_scroll_menu(self.TITLE,self.ROW,self.COLUMN,
self.ROW_SPAN,self.COLUMN_SPAN)
self.__config()
def __config(self):
""" Function that configures the ScrollMenu widget
"""
self.menu.set_focus_text("| Backspace - Remove song | Enter - Play Song")
def create(self) -> py_cui.widgets.ScrollMenu:
""" Function that returns the ScrollMenu Widget created
Returns
-------
menu : ScrollMenu
Return a ScrollMenu Widget
"""
return self.menu
class LocalPlayerSongsMenu:
TITLE="Song Files List"
ROW:int=3
COLUMN:int=2
ROW_SPAN:int=3
COLUMN_SPAN:int=3
window:py_cui.widget_set.WidgetSet
def __init__(self,window:py_cui.widget_set.WidgetSet) -> None:
""" Constructor of LocalPlayerSongsMenu class
"""
self.window=window
self.menu=self.window.add_scroll_menu(self.TITLE,self.ROW,self.COLUMN,
self.ROW_SPAN,self.COLUMN_SPAN)
self.__config()
def __config(self):
""" Function that configures the ScrollMenu widget
"""
pass
def create(self) -> py_cui.widgets.ScrollMenu:
""" Function that returns the ScrollMenu widget created
Returns
-------
menu : ScrollMenu
Returns a ScrollMenu widget
"""
return self.menu
| StarcoderdataPython |
3342431 | <reponame>alexxxiong/pybindx
import ntpath
from pygccxml import declarations
from pybindx.writers import base_writer
from pybindx.writers import method_writer
from pybindx.writers import class_arg_writer
from pybindx.writers import constructor_writer
class CppClassWrapperWriter(base_writer.CppBaseWrapperWriter):
"""
This class generates wrapper code for Cpp classes
"""
def __init__(self, class_info, wrapper_templates):
super(CppClassWrapperWriter, self).__init__(wrapper_templates)
self.hpp_string = ""
self.cpp_string = ""
self.class_info = class_info
self.class_decls = []
self.exposed_class_full_names = []
self.class_full_names = self.class_info.get_full_names()
self.class_short_names = self.class_info.get_short_names()
self.has_shared_ptr = True
self.is_abstract = False
self.needs_override = False
if len(self.class_full_names) != len(self.class_short_names):
message = 'Full and short name lists should be the same length'
raise ValueError(message)
def write_files(self, work_dir, class_short_name):
"""
Write the hpp and cpp wrapper codes to file
"""
path = work_dir + "/" + class_short_name
hpp_file = open(path + ".pybindx.hpp", "w")
hpp_file.write(self.hpp_string)
hpp_file.close()
cpp_file = open(path + ".pybindx.cpp", "w")
cpp_file.write(self.cpp_string)
cpp_file.close()
def add_hpp(self, class_short_name):
"""
Add the class wrapper hpp file
"""
wrapper_dict = {'class_short_name': class_short_name}
self.hpp_string += self.wrapper_templates['class_hpp_header'].format(**wrapper_dict)
def add_cpp_header(self, class_full_name, class_short_name):
"""
Add the 'top' of the class wrapper cpp file
"""
header = "wrapper_header_collection"
# Check for custom smart pointers
smart_ptr_handle = ""
smart_pointer_handle = self.class_info.hierarchy_attribute('smart_ptr_type')
if smart_pointer_handle is not None:
smart_ptr_template = self.wrapper_templates["smart_pointer_holder"]
smart_ptr_handle = "\n" + smart_ptr_template.format(smart_pointer_handle) + ";"
header_dict = {'wrapper_header_collection': header,
'class_short_name': class_short_name,
'class_full_name': class_full_name,
'smart_ptr_handle': smart_ptr_handle,
'includes': '#include "' + header + '.hpp"\n'}
extra_include_string = ""
common_include_file = self.class_info.hierarchy_attribute('common_include_file')
source_includes = self.class_info.hierarchy_attribute_gather('source_includes')
if not common_include_file:
for eachInclude in source_includes:
if eachInclude[0] != "<":
extra_include_string += '#include "' + eachInclude + '"\n'
else:
extra_include_string += '#include ' + eachInclude + '\n'
if self.class_info.source_file is not None:
extra_include_string += '#include "' + self.class_info.source_file + '"\n'
else:
include_name = ntpath.basename(self.class_info.decl.location.file_name)
extra_include_string += '#include "' + include_name + '"\n'
header_dict['includes'] = extra_include_string
header_string = self.wrapper_templates["class_cpp_header"].format(**header_dict)
self.cpp_string += header_string
for eachLine in self.class_info.prefix_code:
self.cpp_string += eachLine + "\n"
# Any custom generators
if self.class_info.custom_generator is not None:
self.cpp_string += self.class_info.custom_generator.get_class_cpp_pre_code(class_short_name)
def add_virtual_overrides(self, class_decl, short_class_name):
"""
Virtual over-rides if needed
"""
# Identify any methods needing over-rides, i.e. any that are virtual
# here or in a parent.
cpp_string = ""
cpp_typedef_string = ""
methods_needing_override = []
return_types = []
for eachMemberFunction in class_decl.member_functions(allow_empty=True):
is_pure_virtual = eachMemberFunction.virtuality == "pure virtual"
is_virtual = eachMemberFunction.virtuality == "virtual"
if is_pure_virtual or is_virtual:
methods_needing_override.append(eachMemberFunction)
return_types.append(eachMemberFunction.return_type.decl_string)
if is_pure_virtual:
self.is_abstract = True
for eachReturnString in return_types:
if eachReturnString != self.tidy_name(eachReturnString):
typedef_string = "typedef {full_name} {tidy_name};\n"
typedef_dict = {'full_name': eachReturnString,
'tidy_name': self.tidy_name(eachReturnString)}
cpp_typedef_string += typedef_string.format(**typedef_dict)
cpp_typedef_string += "\n"
self.needs_override = self.needs_override or len(methods_needing_override) > 0
if len(methods_needing_override) > 0:
for eachMethod in methods_needing_override:
num_arg_types = len(eachMethod.argument_types)
commandline_type = (num_arg_types == 2 and eachMethod.arguments[0].decl_type.decl_string == 'int' and eachMethod.arguments[1].decl_type.decl_string == 'char * *')
if not commandline_type:
writer = method_writer.CppMethodWrapperWriter(self.class_info,
eachMethod,
class_decl,
self.wrapper_templates,
short_class_name)
cpp_string = writer.add_override(cpp_string)
return cpp_string, cpp_typedef_string
@staticmethod
def is_taf_rpc_call(class_decl):
if len(class_decl.bases) == 1:
if hasattr(class_decl.bases[0], "related_class"):
return class_decl.bases[0].related_class.name == 'ServantProxy'
return False
@staticmethod
def get_full_method_name(func_decl):
full_name = func_decl.name
parent = func_decl.parent
while parent.name != "::":
full_name = parent.name + "::" + full_name
parent = parent.parent
return "::" + full_name
def write(self, work_dir):
if len(self.class_decls) != len(self.class_full_names):
message = 'Not enough class decls added to do write.'
raise ValueError(message)
for idx, full_name in enumerate(self.class_full_names):
short_name = self.class_short_names[idx]
class_decl = self.class_decls[idx]
self.hpp_string = ""
self.cpp_string = ""
# Add the cpp file header
self.add_cpp_header(class_decl.decl_string, short_name)
if self.is_taf_rpc_call(class_decl):
self.cpp_string += "\n#include \"servant/Application.h\"\n"
# Check for struct-enum pattern
if declarations.is_struct(class_decl):
enums = class_decl.enumerations(allow_empty=True)
if len(enums) == 1:
replacements = {'class': class_decl.name, 'enum': enums[0].name}
self.cpp_string += 'void register_{class}_class(py::module &m){{\n'.format(**replacements)
self.cpp_string += ' py::class_<{class}> myclass(m, "{class}");\n'.format(**replacements)
self.cpp_string += ' py::enum_<{class}::{enum}>(myclass, "{enum}")\n'.format(**replacements)
for each_val in enums[0].values:
replacements = {'class': class_decl.name,
'enum': enums[0].name,
'val': each_val[0]}
self.cpp_string += ' .value("{val}", {class}::{enum}::{val})\n'.format(**replacements)
self.cpp_string += " .export_values();\n}\n"
# Set up the hpp
self.add_hpp(short_name)
# Do the write
self.write_files(work_dir, short_name)
continue
# Define any virtual function overloads
overrides_string = ""
override_cpp_string, cpp_typedef_string = self.add_virtual_overrides(class_decl, short_name)
if self.needs_override:
over_ride_dict = {'class_short_name': short_name,
'class_base_name': self.class_info.name}
override_template = self.wrapper_templates['class_virtual_override_header']
self.cpp_string += cpp_typedef_string
self.cpp_string += override_template.format(**over_ride_dict)
self.cpp_string += override_cpp_string
self.cpp_string += "\n};\n"
# Add overrides if needed
overrides_string = ', ' + short_name + '_Overloads'
# Add smart ptr support if needed
smart_pointer_handle = self.class_info.hierarchy_attribute('smart_ptr_type')
ptr_support = ""
if self.has_shared_ptr and smart_pointer_handle is not None:
ptr_support = ', ' + smart_pointer_handle + '<' + short_name + ' > '
# Add base classes if needed
bases = ""
for eachBase in class_decl.bases:
cleaned_base = eachBase.related_class.name.replace(" ", "")
exposed = any(cleaned_base in t.replace(" ", "") for t in self.exposed_class_full_names)
public = not eachBase.access_type == "private"
if exposed and public:
bases += ', ' + eachBase.related_class.decl_string + " "
# Add the class registration
class_definition_dict = {'short_name': short_name,
'short_alias': short_name if short_name not in self.class_info.module_info.class_alias else self.class_info.module_info.class_alias[short_name],
'overrides_string': overrides_string,
'ptr_support': ptr_support,
'bases': bases}
class_definition_template = self.wrapper_templates["class_definition"]
self.cpp_string += class_definition_template.format(**class_definition_dict)
# Add constructors
# if not self.is_abstract and not class_decl.is_abstract:
# No constructors for classes with private pure virtual methods!
ppv_class = False
for eachMemberFunction in class_decl.member_functions(allow_empty=True):
if eachMemberFunction.virtuality == "pure virtual" and eachMemberFunction.access_type == "private":
ppv_class = True
break
if not ppv_class:
query = declarations.access_type_matcher_t('public')
for eachConstructor in class_decl.constructors(function=query,
allow_empty=True):
writer = constructor_writer.CppConsturctorWrapperWriter(self.class_info,
eachConstructor,
class_decl,
self.wrapper_templates,
short_name)
self.cpp_string = writer.add_self(self.cpp_string)
query = declarations.access_type_matcher_t('public')
# Add public member args
for eachMemberArg in class_decl.variables(function=query, allow_empty=True):
excluded = False
if self.class_info.excluded_variables is not None:
excluded = (eachMemberArg.name in self.class_info.excluded_variables)
if not excluded:
writer = class_arg_writer.CppClassArgWrapperWriter(self.class_info,
eachMemberArg,
class_decl,
self.wrapper_templates,
short_name)
self.cpp_string = writer.add_self(self.cpp_string)
# Add public member functions
for eachMemberFunction in class_decl.member_functions(function=query, allow_empty=True):
excluded = False
if self.class_info.excluded_methods is not None:
excluded = (eachMemberFunction.name in self.class_info.excluded_methods)
if not excluded:
writer = method_writer.CppMethodWrapperWriter(self.class_info,
eachMemberFunction,
class_decl,
self.wrapper_templates,
short_name)
self.cpp_string = writer.add_self(self.cpp_string)
# Any custom generators
if self.class_info.custom_generator is not None:
self.cpp_string += self.class_info.custom_generator.get_class_cpp_def_code(short_name)
# Add repr function if needed
if self.class_info.decl.decl_string in self.class_info.module_info.repr_mapping:
function = self.class_info.module_info.repr_mapping[self.class_info.decl.decl_string]
method_name = self.get_full_method_name(function.decl)
method_dict = {'method_name': method_name,
'class_short_name': short_name
}
self.cpp_string += self.wrapper_templates['class_repr_method'].format(**method_dict)
# Close the class definition
self.cpp_string += ' ;\n'
# Add taf rpc call
if self.is_taf_rpc_call(class_decl):
rpc_call_dict = {'class_short_name': short_name}
self.cpp_string += self.wrapper_templates['taf_proxy_call'].format(**rpc_call_dict)
self.cpp_string += '\n}\n'
# Set up the hpp
self.add_hpp(short_name)
# Do the write
self.write_files(work_dir, short_name)
| StarcoderdataPython |
6425 | # Generated by Django 3.0.7 on 2020-08-24 06:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('datasets', '0008_auto_20200821_1427'),
]
operations = [
migrations.AddField(
model_name='rawdar',
name='AsB',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='rawdar',
name='AsB_BDL',
field=models.CharField(choices=[('1', 'below detection level'), ('0', 'above detection level'), ('nan', 'invalid')], default=0, max_length=3),
preserve_default=False,
),
migrations.AddField(
model_name='rawdar',
name='AsB_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='rawdar',
name='Ba',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='rawdar',
name='Ba_BDL',
field=models.CharField(choices=[('1', 'below detection level'), ('0', 'above detection level'), ('nan', 'invalid')], default=0, max_length=3),
preserve_default=False,
),
migrations.AddField(
model_name='rawdar',
name='Ba_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='rawdar',
name='Cs',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='rawdar',
name='Cs_BDL',
field=models.CharField(choices=[('1', 'below detection level'), ('0', 'above detection level'), ('nan', 'invalid')], default=0, max_length=3),
preserve_default=False,
),
migrations.AddField(
model_name='rawdar',
name='Cs_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='rawdar',
name='DMA',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='rawdar',
name='DMA_BDL',
field=models.CharField(choices=[('1', 'below detection level'), ('0', 'above detection level'), ('nan', 'invalid')], default=0, max_length=3),
preserve_default=False,
),
migrations.AddField(
model_name='rawdar',
name='DMA_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='rawdar',
name='MMA',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='rawdar',
name='MMA_BDL',
field=models.CharField(choices=[('1', 'below detection level'), ('0', 'above detection level'), ('nan', 'invalid')], default=0, max_length=3),
preserve_default=False,
),
migrations.AddField(
model_name='rawdar',
name='MMA_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='rawdar',
name='Sr',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='rawdar',
name='Sr_BDL',
field=models.CharField(choices=[('1', 'below detection level'), ('0', 'above detection level'), ('nan', 'invalid')], default=0, max_length=3),
preserve_default=False,
),
migrations.AddField(
model_name='rawdar',
name='Sr_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='rawdar',
name='iAs',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='rawdar',
name='iAs_BDL',
field=models.CharField(choices=[('1', 'below detection level'), ('0', 'above detection level'), ('nan', 'invalid')], default=0, max_length=3),
preserve_default=False,
),
migrations.AddField(
model_name='rawdar',
name='iAs_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Ag',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Ag_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Al',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Al_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='As',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='As_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Be',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Be_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Cd',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Cd_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Co',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Co_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Cr',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Cr_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Cu',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Cu_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Fe',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Fe_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Hg',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Hg_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Mn',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Mn_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Mo',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Mo_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Ni',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Ni_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Pb',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Pb_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Sb',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Sb_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Se',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Se_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Sn',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Sn_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Tl',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Tl_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='U',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='U_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='V',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='V_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='W',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='W_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Zn',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='Zn_IDL',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='rawdar',
name='urine_specific_gravity',
field=models.FloatField(blank=True, null=True),
),
]
| StarcoderdataPython |
1722953 | from autosklearn.estimators import AutoSklearnClassifier | StarcoderdataPython |
199253 | <filename>download/xml_pickle/xml_pickle-0.30.py
"""Store Python objects to (pickle-like) XML Documents
Note 0:
See http://gnosis.cx/publish/programming/xml_matters_1.txt
for a detailed discussion of this module.
Note 1:
The XML-SIG distribution is changed fairly frequently while
it is in beta versions. The changes in turn are extremely
likely to affect the functioning of [xml_pickle].
This version of [xml_pickle] is known to work with PyXML
0.6.1, and will probably continue to work with later betas
and also with the XML-SIG distribution included in Python
2.0. To download a current version of PyXML, go to:
http://download.sourceforge.net/pyxml/
Version 0.22 of [xml_pickle] was designed around PyXML 0.5.2.
If you use an older version of PyXML, you may download a
compatible version of [xml_pickle] at:
http://gnosis.cx/download/xml_pickle-0.22.py
And a known-to-be-compatible PyXML distribution at:
http://gnosis.cx/download/py_xml_04-21-00.exe
http://gnosis.cx/download/py_xml_04-21-00.zip
The first URL is the Windows self-installer, the latter is
simply an archive of those files to be unpacked under
$PYTHONPATH/xml.
Usage:
# By inheritence
from xml_pickle import XML_Pickler
class MyClass(XML_Pickler):
# create some behavior and attributes for MyClass...
o1 = MyClass()
xml_str = o.dumps()
o2 = MyClass()
o2.loads(xml_str)
# With inline instantiation
from xml_pickle import XML_Pickler
o1 = DataClass()
# ...assign attribute values to o1...
xml_str = XML_Pickler(o1).dumps()
o2 = XML_Pickler().loads(xml_str)
Classes:
PyObject
XML_Pickler
Functions:
thing_from_dom(dom_node, container)
obj_from_node(node)
subnodes(node)
_attr_tag(...)
_item_tag(...)
_entry_tag(...)
_tag_completer(...)
_klass(...)
safe_eval(s)
safe_string(s)
unsafe_string(s)
"""
__version__ = "$Revision: 0.30 $"
__author__=["<NAME> (<EMAIL>)",]
__thanks_to__=["<NAME> (<EMAIL>)",
"<NAME> (<EMAIL>)",
"<NAME> (<EMAIL>)"]
__copyright__="""
This file is released to the public domain. I (dqm) would
appreciate it if you choose to keep derived works under terms
that promote freedom, but obviously am giving up any rights
to compel such.
"""
__history__="""
0.1 Initial version
0.22 Compatible with PyXML 0.52
0.30 Compatible with PyXML 0.61+
"""
from types import *
from xml.dom import ext
from xml.dom.ext.reader import Sax2
import cStringIO
XMLPicklingError = "xml_pickle.XMLPicklingError"
XMLUnpicklingError = "xml_pickle.XMLUnpicklingError"
class PyObject:
"""Placeholder template class"""
def __init__(self, __fakename__=None):
if __fakename__: self.__fakename__ = __fakename__
class XML_Pickler:
"""Framework for 'pickle to XML'"""
def __init__(self, py_obj=None):
if py_obj is not None:
if type(py_obj)<>InstanceType:
raise ValueError, \
"XML_Pickler must be initialized with Instance (or None)"
self.py_obj = py_obj
else:
self.py_obj = PyObject(self.__class__.__name__)
def __setattr__(self, name, value):
if name == 'py_obj':
self.__dict__[name] = value
else:
setattr(self.py_obj, name, value)
def __getattr__(self, name):
return getattr(self.py_obj, name)
def __delattr__(self, name):
del self.py_obj.__dict__[name]
def dump(self, fh):
# admittedly, our approach requires creating whole output XML in
# memory first, which could be large for complex object. Maybe
# we'll make this more efficient later.
fh.write(self.dumps())
def load(self, fh):
return thing_from_dom(Sax2.FromXml(fh.read(), validate=0))
def dumps(self):
xml_str = '<?xml version="1.0"?>\n' +\
'<!DOCTYPE PyObject SYSTEM "PyObjects.dtd">\n'
xml_str = xml_str+'<PyObject class="%s">\n' % _klass(self.py_obj)
for name in dir(self.py_obj):
xml_str = xml_str+_attr_tag(name, getattr(self, name))
xml_str = xml_str+'</PyObject>'
return xml_str
def loads(self, xml_str):
fh = cStringIO.StringIO(xml_str)
obj = self.load(fh)
fh.close()
return obj
#-- support functions
def thing_from_dom(dom_node, container=None):
"""Converts an [xml_pickle] DOM tree to a "native" Python object"""
for node in subnodes(dom_node):
if node.nodeName == "PyObject":
# Add all the subnodes to PyObject container
container = thing_from_dom(node, obj_from_node(node))
elif node.nodeName == 'attr':
try:
node_type = node.attributes[('','type')].value
except:
print "node", node.attributes, repr(node.attributes)
print node.attributes.keys()
raise # WHAT?!
node_name = node.attributes[('', 'name')].value
if node_type == 'None':
setattr(container, node_name, None)
elif node_type == 'numeric':
node_val = safe_eval(node.attributes[('','value')].value)
setattr(container, node_name, node_val)
elif node_type == 'string':
node_val = node.attributes[('','value')].value
node_val = unsafe_string(node_val)
setattr(container, node_name, node_val)
elif node_type == 'list':
subcontainer = thing_from_dom(node, [])
setattr(container, node_name, subcontainer)
elif node_type == 'tuple':
subcontainer = thing_from_dom(node, []) # use list then convert
setattr(container, node_name, tuple(subcontainer))
elif node_type == 'dict':
subcontainer = thing_from_dom(node, {})
setattr(container, node_name, subcontainer)
elif node_type == 'PyObject':
subcontainer = thing_from_dom(node, obj_from_node(node))
setattr(container, node_name, subcontainer)
elif node.nodeName in ['item', 'key', 'val']:
# -- Odd behavior warning --
# The 'node_type' expression has an odd tendency to be a
# one-element tuple rather than a string. Doing the str()
# fixes things, but I'm not sure why!
# -- About key/val nodes --
# There *should not* be mutable types as keys, but to cover
# all cases, elif's are defined for mutable types. Furthermore,
# there should only ever be *one* item in any key/val list,
# but we again rely on other validation of the XML happening.
node_type = str(node.attributes[('','type')].value)
if node_type == 'numeric':
node_val = safe_eval(node.attributes[('','value')].value)
container.append(node_val)
elif node_type == 'string':
node_val = node.attributes[('','value')].value
node_val = unsafe_string(node_val)
container.append(node_val)
elif node_type == 'list':
subcontainer = thing_from_dom(node, [])
container.append(subcontainer)
elif node_type == 'tuple':
subcontainer = thing_from_dom(node, []) # use list then convert
container.append(tuple(subcontainer))
elif node_type == 'dict':
subcontainer = thing_from_dom(node, {})
container.append(subcontainer)
elif node_type == 'PyObject':
subcontainer = thing_from_dom(node, obj_from_node(node))
container.append(subcontainer)
elif node.nodeName == 'entry':
keyval = thing_from_dom(node, [])
key, val = keyval[0], keyval[1]
container[key] = val
else:
raise XMLUnpicklingError, \
"element %s is not in PyObjects.dtd" % node.nodeName
return container
def obj_from_node(node):
# Get classname of object (with fallback to 'PyObject')
try:
if node.attributes:
klass = node.attributes[('','class')].value
else:
klass = 'PyObject'
except KeyError: klass = 'PyObject'
# does the class exist, or should we create it?
try: safe_eval(klass)
except NameError:
exec ('class %s: pass' % klass)
return eval('%s()' % klass)
def subnodes(node):
return filter(lambda n: n.nodeName<>'#text', node.childNodes)
def _attr_tag(name, thing, level=0):
start_tag = ' '*level+('<attr name="%s" ' % name)
close_tag =' '*level+'</attr>\n'
if name == '__fakename__': return ''
else:
return _tag_completer(start_tag, thing, close_tag, level)
def _item_tag(thing, level=0):
start_tag = ' '*level+'<item '
close_tag =' '*level+'</item>\n'
return _tag_completer(start_tag, thing, close_tag, level)
def _entry_tag(key, val, level=0):
start_tag = ' '*level+'<entry>\n'
close_tag = ' '*level+'</entry>\n'
start_key = ' '*level+' <key '
close_key = ' '*level+' </key>\n'
key_block = _tag_completer(start_key, key, close_key, level+1)
start_val = ' '*level+' <val '
close_val = ' '*level+' </val>\n'
val_block = _tag_completer(start_val, val, close_val, level+1)
return (start_tag + key_block + val_block + close_tag)
def _tag_completer(start_tag, thing, close_tag, level=0):
tag_body = ''
if type(thing) == NoneType:
start_tag = start_tag+'type="None" />\n'
close_tag = ''
elif type(thing) in [IntType, LongType, FloatType, ComplexType]:
start_tag = start_tag+'type="numeric" value="%s" />\n' % `thing`
close_tag = ''
elif type(thing) in [StringType]:
thing = safe_string(thing)
start_tag = start_tag+'type="string" value="%s" />\n' % thing
close_tag = ''
elif type(thing) in [TupleType]:
start_tag = start_tag+'type="tuple">\n'
for item in thing:
tag_body = tag_body+_item_tag(item, level+1)
elif type(thing) in [ListType]:
start_tag = start_tag+'type="list">\n'
for item in thing:
tag_body = tag_body+_item_tag(item, level+1)
elif type(thing) in [DictType]:
start_tag = start_tag+'type="dict">\n'
for key, val in thing.items():
tag_body = tag_body+_entry_tag(key, val, level+1)
elif type(thing) in [InstanceType]:
start_tag = start_tag+'type="PyObject" class="%s">\n' % _klass(thing)
for name in dir(thing):
tag_body = tag_body+_attr_tag(name, getattr(thing, name), level+1)
else:
raise XMLPicklingError, "non-handled type %s." % type(thing)
return start_tag+tag_body+close_tag
def _klass(thing):
if type(thing)<>InstanceType:
raise ValueError, \
"non-Instance type %s passed to _klass()" % type(thing)
if hasattr(thing, '__fakename__'): return thing.__fakename__
else: return thing.__class__.__name__
def safe_eval(s):
if 0: # Condition for malicious string in eval() block
raise "SecurityError", \
"Malicious string '%s' should not be eval()'d" % s
else:
return eval(s)
def safe_string(s):
import string, re
# markup XML entities
s = string.replace(s, '&', '&')
s = string.replace(s, '<', '<')
s = string.replace(s, '>', '>')
s = string.replace(s, '"', '"')
s = string.replace(s, "'", ''')
# for others, use Python style escapes
s = repr(s)
return s[1:-1] # without the extra single-quotes
def unsafe_string(s):
import string, re
# for Python escapes, exec the string
# (niggle w/ literalizing apostrophe)
s = string.replace(s, "'", r"\047")
exec "s='"+s+"'"
# XML entities (DOM does it for us)
return s
#-- Hand generated test object
test_xml = """<?xml version="1.0"?>
<!DOCTYPE PyObject SYSTEM "PyObjects.dtd">
<PyObject class="Automobile">
<attr name="doors" type="numeric" value="4" />
<attr name="make" type="string" value="Honda" />
<attr name="tow_hitch" type="None" />
<attr name="prev_owners" type="tuple">
<item type="string" value="<NAME>" />
<item type="tuple">
<item type="string" value="<NAME>" />
<item type="string" value="<NAME>" />
</item>
<item type="string" value="<NAME>" />
</attr>
<attr name="repairs" type="list">
<item type="string" value="June 1, 1999: Fixed radiator" />
<item type="PyObject" class="Swindle">
<attr name="date" type="string" value="July 1, 1999" />
<attr name="swindler" type="string" value="Ed's Auto" />
<attr name="purport" type="string" value="Fix A/C" />
</item>
</attr>
<attr name="options" type="dict">
<entry>
<key type="string" value="Cup Holders" />
<val type="numeric" value="4" />
</entry>
<entry>
<key type="string" value="Custom Wheels" />
<val type="string" value="Chrome Spoked" />
</entry>
</attr>
<attr name="engine" type="PyObject" class="Engine">
<attr name="cylinders" type="numeric" value="4" />
<attr name="manufacturer" type="string" value="Ford" />
</attr>
</PyObject>"""
#-- Self test
if __name__ == "__main__":
class MyClass: pass
o = XML_Pickler()
o.num = 37
o.str = "Hello World \n Special Chars: \t \000 < > & ' \207"
o.lst = [1, 3.5, 2, 4+7j]
o2 = MyClass()
o2.tup = ("x", "y", "z")
o2.num = 2+2j
o2.dct = { "this": "that", "spam": "eggs", 3.14: "about PI" }
o.obj = o2
print '------* Print python-defined pickled object *-----'
print o.dumps()
print '-----* Load a test xml_pickle object, and print it *-----'
u = o.loads(test_xml)
print XML_Pickler(u).dumps()
| StarcoderdataPython |
3200827 | <filename>permission/views.py
from django.shortcuts import render
from django.contrib.contenttypes.models import ContentType
# Create your views here.
# from reception.models import index
from django.contrib.auth.models import User,Permission,Group
def useraddr():
username = request.POST.get('username')
password = request.POST.get('password')
mail = request.POST.get('mail')
try:
User.objects.create_user(username,mail,password).save()
#User.objects.create_user(username,mail,password)
except:
print('用户已存在')
def grouaddruser():
usernamei = request.POST.get('username')
group = request.POST.get('group')
userobject = User.objects.get(username=usernamei)
usergroup = userobject.groups.add(group)
#获取组对象
#u = User.objects.get(username='alice')
#alice_group = User.groups.through.objects.get(user=u)
#更改用户组
#alice_group.group = manager
#alice_group.save()
def permissioninquire(): #是否有模块的权限
username = request.POST.get('username')
modeapp = request.POST.get('modeapp')
username.has_perm('%s_permission' %modeapp)
return HttpResponse('123')
#alice.has_perm('apps.reception') ???
#False
def permissioninquire2():
usernamei = request.POST.get('username')
usernamei=User.objects.get(username=usernamei)
usernamei.user_permissions.all() #查询用户权限
usernamei.groups.all() #查询组权限
#usernamei.user_permissions #????
#alice.groups.all()[0].permissions.all() #????
def permissionindex(): #创建新权限
modeapp = request.POST.get('modeapp')
content_type = ContentType.objects.get_for_model(modeapp) #???
permission = Permission.objects.create(
codename=('%s_permission' %modeapp),
name=('%s_permission_posts' %modeapp),
content_type=content_type
)
return HttpResponse('123')
def permissionuser(): #给用户权限操作
what = request.POST.get('what')
username = request.POST.get('username')
permissioni = request.POST.get('permissioni')
if what == 1: #添加用户权限
username.permission.add(permissioni,)
# elif what == 2: #重设用户权限
# username.permission.set([permissionilist])
elif what == 3: #移除用户权限
username.permission.remove(permissioni,)
elif what == 4: #清空用户权限
username.permission.clear()
def permissiongroup(): #给用户组权限操作
what = request.POST.get('what')
username = request.POST.get('username')
permissioni = request.POST.get('permissioni')
if what == 1: #添加用户组权限
username.groups.add(permissioni,)
# elif what == 2: #重设用户权限
# username.groups.set([permissioni])
elif what == 3: #移除用户组权限
username.groups.remove(permissioni,)
elif what == 4: #清空用户组权限
username.groups.clear() | StarcoderdataPython |
74972 | class Test:
"""
Lorem ipsum dolor sit amet, consectetur adipiscing elit,
deserunt mollit anim id est laborum.
.. sourcecode:: pycon
>>> # extract 100 LDA topics, using default parameters
>>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)
using distributed version with 4 workers
running online LDA training, 100 topics, 1 passes over the supplied corpus of 3199665 documets,
updating model once every 40000 documents
..
Some another text
"""
some_field = 1
| StarcoderdataPython |
149742 | <reponame>mutalyzer/spdi-parser<filename>mutalyzer_spdi_parser/convert.py
"""
Module for converting SPDI descriptions and lark parse trees
to their equivalent dictionary models.
"""
from lark import Transformer
from .spdi_parser import parse
def to_spdi_model(description):
"""
Convert an SPDI description to a dictionary model.
:arg str description: SPDI description.
:returns: Description dictionary model.
:rtype: dict
"""
return parse_tree_to_model(parse(description))
def to_hgvs_internal_model(description):
"""
Convert an SPDI description to an internal HGVS dictionary model
(delins variants with internal locations).
:arg str description: SPDI description.
:returns: HGVS internal dictionary model.
:rtype: dict
"""
return _to_hgvs_internal(parse_tree_to_model(parse(description)))
def parse_tree_to_model(parse_tree):
"""
Convert a parse tree to a dictionary model.
:arg lark.Tree parse_tree: SPDI description equivalent parse tree.
:returns: Description dictionary model.
:rtype: dict
"""
return Converter().transform(parse_tree)
class Converter(Transformer):
def description(self, children):
return {k: v for d in children for k, v in d.items()}
def deleted_sequence(self, children):
return {"deleted_sequence": children[0]}
def inserted_sequence(self, children):
return {"inserted_sequence": children[0]}
def position(self, children):
return {"position": children[0]}
def deleted_length(self, children):
return {"deleted_length": children[0]}
def sequence(self, children):
return children[0]
def NUMBER(self, name):
return int(name)
def D_SEQUENCE(self, name):
return name.value
def R_SEQUENCE(self, name):
return name.value
def P_SEQUENCE(self, name):
return name.value
def ID(self, name):
return {"seq_id": name.value}
def _to_hgvs_internal(s_m):
m = {"type": "description_dna", "reference": {"id": s_m["seq_id"]}}
v = {"type": "deletion_insertion"}
if s_m.get("deleted_sequence"):
v["location"] = _range(
s_m["position"], s_m["position"] + len(s_m["deleted_sequence"])
)
v["deleted"] = [{"sequence": s_m["deleted_sequence"], "source": "description"}]
elif s_m.get("deleted_length"):
v["location"] = _range(s_m["position"], s_m["position"] + s_m["deleted_length"])
else:
v["location"] = _range(s_m["position"], s_m["position"])
if s_m.get("inserted_sequence"):
v["inserted"] = [
{"sequence": s_m["inserted_sequence"], "source": "description"}
]
if not s_m.get("inserted_sequence") and not (
s_m.get("deleted_sequence") or s_m.get("deleted_length")
):
v["location"] = _range(s_m["position"], s_m["position"] + 1)
v["inserted"] = [
{
"location": _range(s_m["position"], s_m["position"] + 1),
"source": "reference",
}
]
m["variants"] = [v]
return m
def _range(s, e):
return {
"type": "range",
"start": {
"type": "point",
"position": s,
},
"end": {
"type": "point",
"position": e,
},
}
def _point(p):
return {
"type": "point",
"position": p,
}
| StarcoderdataPython |
111396 | <gh_stars>100-1000
"""
Paper: Recurrent Neural Networks with Top-k Gains for Session-based Recommendations
Author: <NAME>, and <NAME>
Reference: https://github.com/hidasib/GRU4Rec
https://github.com/Songweiping/GRU4Rec_TensorFlow
@author: <NAME>
"""
import numpy as np
from model.AbstractRecommender import SeqAbstractRecommender
import tensorflow as tf
from util import l2_loss
class GRU4RecPlus(SeqAbstractRecommender):
def __init__(self, sess, dataset, conf):
super(GRU4RecPlus, self).__init__(dataset, conf)
self.train_matrix = dataset.train_matrix
self.dataset = dataset
self.users_num, self.items_num = self.train_matrix.shape
self.lr = conf["lr"]
self.reg = conf["reg"]
self.layers = conf["layers"]
self.batch_size = conf["batch_size"]
self.n_sample = conf["n_sample"]
self.sample_alpha = conf["sample_alpha"]
self.epochs = conf["epochs"]
self.bpr_reg = conf["bpr_reg"]
if conf["hidden_act"] == "relu":
self.hidden_act = tf.nn.relu
elif conf["hidden_act"] == "tanh":
self.hidden_act = tf.nn.tanh
else:
raise ValueError("There is not hidden_act named '%s'." % conf["hidden_act"])
# final_act = leaky-relu
if conf["final_act"] == "relu":
self.final_act = tf.nn.relu
elif conf["final_act"] == "linear":
self.final_act = tf.identity
elif conf["final_act"] == "leaky_relu":
self.final_act = tf.nn.leaky_relu
else:
raise ValueError("There is not final_act named '%s'." % conf["final_act"])
if conf["loss"] == "bpr_max":
self.loss_fun = self._bpr_max_loss
elif conf["loss"] == "top1_max":
self.loss_fun = self._top1_max_loss
else:
raise ValueError("There is not loss named '%s'." % conf["loss"])
self.data_uit, self.offset_idx = self._init_data()
# for sampling negative items
_, pop = np.unique(self.data_uit[:, 1], return_counts=True)
pop = np.power(pop, self.sample_alpha)
pop_cumsum = np.cumsum(pop)
self.pop_cumsum = pop_cumsum / pop_cumsum[-1]
self.sess = sess
def _init_data(self):
time_dok = self.dataset.time_matrix.todok()
data_uit = [[row, col, time] for (row, col), time in time_dok.items()]
data_uit.sort(key=lambda x: (x[0], x[-1]))
data_uit = np.array(data_uit, dtype=np.int32)
_, idx = np.unique(data_uit[:, 0], return_index=True)
offset_idx = np.zeros(len(idx)+1, dtype=np.int32)
offset_idx[:-1] = idx
offset_idx[-1] = len(data_uit)
return data_uit, offset_idx
def _create_variable(self):
self.X_ph = tf.placeholder(tf.int32, [self.batch_size], name='input')
self.Y_ph = tf.placeholder(tf.int32, [self.batch_size+self.n_sample], name='output')
self.state_ph = [tf.placeholder(tf.float32, [self.batch_size, n_unit], name='layer_%d_state' % idx)
for idx, n_unit in enumerate(self.layers)]
init = tf.random.truncated_normal([self.items_num, self.layers[0]], mean=0.0, stddev=0.01)
self.input_embeddings = tf.Variable(init, dtype=tf.float32, name="input_embeddings")
init = tf.random.truncated_normal([self.items_num, self.layers[-1]], mean=0.0, stddev=0.01)
self.item_embeddings = tf.Variable(init, dtype=tf.float32, name="item_embeddings")
self.item_biases = tf.Variable(tf.zeros([self.items_num]), dtype=tf.float32, name="item_biases")
def _softmax_neg(self, logits):
# logits: (b, size_y)
hm = 1.0 - tf.eye(tf.shape(logits)[0], tf.shape(logits)[1])
logits = logits * hm
logits = logits - tf.reduce_max(logits, axis=1, keep_dims=True)
e_x = tf.exp(logits) * hm # (b, size_y)
e_x = e_x / tf.reduce_sum(e_x, axis=1, keep_dims=True)
return e_x # (b, size_y)
def _bpr_max_loss(self, logits):
# logits: (b, size_y)
softmax_scores = self._softmax_neg(logits) # (b, size_y)
pos_logits = tf.matrix_diag_part(logits) # (b,)
pos_logits = tf.reshape(pos_logits, shape=[-1, 1]) # (b, 1)
prob = tf.sigmoid((pos_logits - logits)) # (b, size_y)
prob = tf.reduce_sum(tf.multiply(prob, softmax_scores), axis=1) # (b,)
loss = -tf.log(prob + 1e-24)
reg_loss = tf.reduce_sum(tf.multiply(tf.pow(logits, 2), softmax_scores), axis=1) # (b,)
return tf.reduce_mean(loss + self.bpr_reg*reg_loss)
def _top1_max_loss(self, logits):
softmax_scores = self._softmax_neg(logits) # (b, size_y)
pos_logits = tf.matrix_diag_part(logits) # (b,)
pos_logits = tf.reshape(pos_logits, shape=[-1, 1]) # (b, 1)
prob = tf.sigmoid(-pos_logits + logits) + tf.sigmoid(tf.pow(logits, 2))
loss = tf.reduce_sum(tf.multiply(prob, softmax_scores), axis=1)
return tf.reduce_mean(loss)
def build_graph(self):
self._create_variable()
# get embedding and bias
# b: batch size
# l1: the dim of the first layer
# ln: the dim of the last layer
# size_y: the length of Y_ph, i.e., n_sample+batch_size
cells = [tf.nn.rnn_cell.GRUCell(size, activation=self.hidden_act) for size in self.layers]
drop_cell = [tf.nn.rnn_cell.DropoutWrapper(cell) for cell in cells]
stacked_cell = tf.nn.rnn_cell.MultiRNNCell(drop_cell)
inputs = tf.nn.embedding_lookup(self.input_embeddings, self.X_ph) # (b, l1)
outputs, state = stacked_cell(inputs, state=self.state_ph)
self.u_emb = outputs # outputs: (b, ln)
self.final_state = state # [(b, l1), (b, l2), ..., (b, ln)]
# for training
items_embed = tf.nn.embedding_lookup(self.item_embeddings, self.Y_ph) # (size_y, ln)
items_bias = tf.gather(self.item_biases, self.Y_ph) # (size_y,)
logits = tf.matmul(outputs, items_embed, transpose_b=True) + items_bias # (b, size_y)
logits = self.final_act(logits)
loss = self.loss_fun(logits)
# reg loss
reg_loss = l2_loss(inputs, items_embed, items_bias)
final_loss = loss + self.reg*reg_loss
self.update_opt = tf.train.AdamOptimizer(self.lr).minimize(final_loss)
def _sample_neg_items(self, size):
samples = np.searchsorted(self.pop_cumsum, np.random.rand(size))
return samples
def train_model(self):
self.logger.info(self.evaluator.metrics_info())
data_uit, offset_idx = self.data_uit, self.offset_idx
data_items = data_uit[:, 1]
for epoch in range(self.epochs):
state = [np.zeros([self.batch_size, n_unit], dtype=np.float32) for n_unit in self.layers]
user_idx = np.random.permutation(len(offset_idx) - 1)
iters = np.arange(self.batch_size, dtype=np.int32)
maxiter = iters.max()
start = offset_idx[user_idx[iters]]
end = offset_idx[user_idx[iters]+1]
finished = False
while not finished:
min_len = (end - start).min()
out_idx = data_items[start]
for i in range(min_len-1):
in_idx = out_idx
out_idx = data_items[start+i+1]
out_items = out_idx
if self.n_sample:
neg_items = self._sample_neg_items(self.n_sample)
out_items = np.hstack([out_items, neg_items])
feed = {self.X_ph: in_idx, self.Y_ph: out_items}
for l in range(len(self.layers)):
feed[self.state_ph[l]] = state[l]
_, state = self.sess.run([self.update_opt, self.final_state], feed_dict=feed)
start = start+min_len-1
mask = np.arange(len(iters))[(end - start) <= 1]
for idx in mask:
maxiter += 1
if maxiter >= len(offset_idx)-1:
finished = True
break
iters[idx] = maxiter
start[idx] = offset_idx[user_idx[maxiter]]
end[idx] = offset_idx[user_idx[maxiter]+1]
if len(mask):
for i in range(len(self.layers)):
state[i][mask] = 0
result = self.evaluate_model()
self.logger.info("epoch %d:\t%s" % (epoch, result))
def _get_user_embeddings(self):
users = np.arange(self.users_num, dtype=np.int32)
u_nnz = np.array([self.train_matrix[u].nnz for u in users], dtype=np.int32)
users = users[np.argsort(-u_nnz)]
user_embeddings = np.zeros([self.users_num, self.layers[-1]], dtype=np.float32) # saving user embedding
data_uit, offset_idx = self.data_uit, self.offset_idx
data_items = data_uit[:, 1]
state = [np.zeros([self.batch_size, n_unit], dtype=np.float32) for n_unit in self.layers]
batch_iter = np.arange(self.batch_size, dtype=np.int32)
next_iter = batch_iter.max() + 1
start = offset_idx[users[batch_iter]]
end = offset_idx[users[batch_iter] + 1] # the start index of next user
batch_mask = np.ones([self.batch_size], dtype=np.int32)
while np.sum(batch_mask) > 0:
min_len = (end - start).min()
for i in range(min_len):
cur_items = data_items[start + i]
feed = {self.X_ph: cur_items}
for l in range(len(self.layers)):
feed[self.state_ph[l]] = state[l]
u_emb, state = self.sess.run([self.u_emb, self.final_state], feed_dict=feed)
start = start + min_len
mask = np.arange(self.batch_size)[(end - start) == 0]
for idx in mask:
u = users[batch_iter[idx]]
user_embeddings[u] = u_emb[idx] # saving user embedding
if next_iter < self.users_num:
batch_iter[idx] = next_iter
start[idx] = offset_idx[users[next_iter]]
end[idx] = offset_idx[users[next_iter] + 1]
next_iter += 1
else:
batch_mask[idx] = 0
start[idx] = 0
end[idx] = offset_idx[-1]
for i, _ in enumerate(self.layers):
state[i][mask] = 0
return user_embeddings
def evaluate_model(self):
self.cur_user_embeddings = self._get_user_embeddings()
self.cur_item_embeddings, self.cur_item_biases = self.sess.run([self.item_embeddings, self.item_biases])
return self.evaluator.evaluate(self)
def predict(self, users, items=None):
user_embeddings = self.cur_user_embeddings[users]
all_ratings = np.matmul(user_embeddings, self.cur_item_embeddings.T) + self.cur_item_biases
# final_act = leaky-relu
if self.final_act == tf.nn.relu:
all_ratings = np.maximum(all_ratings, 0)
elif self.final_act == tf.identity:
all_ratings = all_ratings
elif self.final_act == tf.nn.leaky_relu:
all_ratings = np.maximum(all_ratings, all_ratings*0.2)
else:
pass
all_ratings = np.array(all_ratings, dtype=np.float32)
if items is not None:
all_ratings = [all_ratings[idx][item] for idx, item in enumerate(items)]
return all_ratings
| StarcoderdataPython |
1600058 | from flask import jsonify
from flask_sqlalchemy import SQLAlchemy
from src.webservice.base import Base
db = SQLAlchemy()
Base.query = db.session.query_property()
class Menu(Base):
__tablename__ = 'tbl_Menu'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
parent = db.Column(db.Integer, db.ForeignKey('tbl_Menu.id'))
children = db.relationship("Menu")
order = db.Column(db.Integer)
level = db.Column(db.Integer)
@staticmethod
def get_all_menus():
output = []
root_menus = db.session.query(Menu).filter(Menu.parent == None)
for root in root_menus:
output.append({'id': root.id, 'name': root.name, 'display_name': root.name,
'parent': root.parent, 'parent_name': '', 'order': root.order})
root.children = sorted(root.children, key=lambda structure: structure.order)
for child in root.children:
output.append({'id': child.id, 'name': child.name, 'display_name': '-> '+child.name,
'parent': child.parent, 'parent_name': root.name, 'order': child.order})
child.children = sorted(child.children, key=lambda structure: structure.order)
for grandchild in child.children:
output.append({'id': grandchild.id, 'name': grandchild.name, 'display_name': '-> -> '+grandchild.name,
'parent': grandchild.parent, 'parent_name': child.name, 'order': grandchild.order})
db.session.commit()
return jsonify({'response': output})
@staticmethod
def create_new_menu(request):
data = request.get_json()
new_menu = Menu(name=data["name"], parent=data["parent"])
db.session.add(new_menu)
db.session.commit()
return jsonify({'result': 'Menu has been created'})
@staticmethod
def update_menu(request):
data = request.get_json()
menu = db.session.query(Menu).filter_by(id=data['id']).first()
if 'name' in data:
menu.name = data['name']
if 'parent' in data:
menu.parent = data['parent']
db.session.commit()
return jsonify({'result': 'Menu has been changed'})
@staticmethod
def delete_menu(request):
data = request.get_json()
menus = db.session.query(Menu).filter(Menu.id.in_(data['id'])).all()
for menu in menus:
db.session.delete(menu)
db.session.commit()
return jsonify({'result': 'Menu has been deleted'})
@staticmethod
def get_menu_fields_menus():
rows = db.session.query(Menu).all()
output = []
for row in rows:
output.append({'id': row.id, 'name': row.name})
db.session.close()
return jsonify({'response': output})
| StarcoderdataPython |
1789287 | <filename>tests/circuit/test_symm.py<gh_stars>1-10
# -*- coding: utf-8 -*-
import unittest
import freqerica.circuit.symm
class BasicTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_SymmRemoveClifford(self):
n_qubit = 4
dim = 2**n_qubit
from qulacs import QuantumState
from qulacs.state import inner_product
from freqerica.circuit.universal import prepstate
civec = {0b0011:+0.5,
0b0101:+0.5j,
0b1001:-0.5j,
0b0110:-0.5}
s0 = prepstate(n_qubit, civec)
print('s0:', s0)
from openfermion import QubitOperator
ZZZZ = QubitOperator('Z0 Z1 Z2 Z3')
from freqerica.op.symm import SymmRemover
remover = SymmRemover(n_qubit, [ZZZZ])
print(remover)
from freqerica.circuit.symm import SymmRemoveClifford
cliffords = SymmRemoveClifford(n_qubit, remover)
s1 = s0.copy()
cliffords.circuit_list[0].update_quantum_state(s1)
print('s1:', s1)
import numpy as np
s1vec = s1.get_vector()
s1vec = s1vec.reshape(*(2,2,2,2))[:,:,:,0].reshape(-1)
s1vec /= np.linalg.norm(s1vec)
s2 = prepstate(n_qubit-1, s1vec)
print('s2:', s2)
assert True
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
197573 | # -*- coding: utf-8
# flake8: noqa: F401
# Core
import pytest
# Django
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.options import ModelAdmin
# Models
from custom_auth_user.models import AuthToken
# Admins
from custom_auth_user.admin import *
class MockRequest:
pass
request = MockRequest()
@pytest.mark.django_db
class TestAuthTokenAdmin():
@pytest.fixture
def auth_token_model_admin(self):
return ModelAdmin(AuthToken, AdminSite())
def test_user_admin(self, auth_token_model_admin):
assert str(auth_token_model_admin) == 'custom_auth_user.ModelAdmin'
def test_user_admin_fields(self, auth_token_model_admin):
admin_fields = set(list(auth_token_model_admin.get_fields(request)))
admin_fields_compare = set(['token', 'expiration_date', 'user'])
assert len(admin_fields) == len(admin_fields_compare)
assert admin_fields == admin_fields_compare
| StarcoderdataPython |
4816001 | <gh_stars>0
version = '0.0.1'
time = '2020-09-02 10:00:25'
| StarcoderdataPython |
3234952 | #!/usr/bin/env python3
# This file is a part of toml++ and is subject to the the terms of the MIT license.
# Copyright (c) 2019-2020 <NAME> <<EMAIL>>
# See https://github.com/marzer/tomlplusplus/blob/master/LICENSE for the full license text.
import sys
import re
import os
import os.path as path
import traceback
def get_script_folder():
return path.dirname(path.realpath(sys.argv[0]))
def read_all_text_from_file(path):
print("Reading {}".format(path))
with open(path, 'r', encoding='utf-8') as file:
text = file.read()
return text
def make_divider(text = None, text_col = 40):
if (text is None):
return "//" + ('-' * 118)
else:
text = "//{} {} ".format('-' * (text_col - 2),text);
if (len(text) < 120):
return text + ('-' * (120 - len(text)))
else:
return text
class Preprocessor:
def __init__(self):
pass
def preprocess(self, match):
raw_incl = match if isinstance(match, str) else match.group(1)
incl = raw_incl.strip().lower()
if incl in self.processed_includes:
return ''
self.processed_includes.append(incl)
text = read_all_text_from_file(path.join(get_script_folder(), '..', 'include', 'toml++', incl))
text = re.sub(r'^\s*#\s*pragma\s+once\s*$', '', text, 0, re.I | re.M)
text = re.sub(r'^\s*//\s*clang-format\s+(?:off|on)\s*$', '', text, 0, re.I | re.M)
self.current_level += 1
text = re.sub(r'^\s*#\s*include\s+"(.+?)"', lambda m : self.preprocess(m), text, 0, re.I | re.M)
self.current_level -= 1
if (self.current_level == 1):
header_text = '↓ ' + raw_incl
lpad = 28 + ((25 * (self.header_indent % 4)) - int((len(header_text) + 4) / 2))
self.header_indent += 1
return '\n{}\n#pragma region {}\n\n{}\n\n#pragma endregion {}\n{}'.format(
make_divider(header_text, lpad), '', text, '', make_divider('↑ ' + raw_incl, lpad))
else:
return text
def __call__(self, file):
self.processed_includes = []
self.header_indent = 0
self.current_level = 0
return self.preprocess(file)
def main():
# preprocess header(s)
source_text = Preprocessor()('toml.h')
source_text = re.sub('\r\n', '\n', source_text, 0, re.I | re.M) # convert windows newlines
source_text = re.sub('(?:(?:\n|^)[ \t]*//[/#!<]+[^\n]*)+\n', '\n', source_text, 0, re.I | re.M) # remove 'magic' comment blocks
source_text = re.sub('(?:///[<].*?)\n', '\n', source_text, 0, re.I | re.M) # remove inline doxy briefs
source_text = re.sub('\n(?:[ \t]*\n[ \t]*)+\n', '\n\n', source_text, 0, re.I | re.M) # remove double newlines
source_text = re.sub('([^ \t])[ \t]+\n', '\\1\n', source_text, 0, re.I | re.M) # remove trailing whitespace
return_type_pattern \
= r'(?:' \
+ r'(?:\[\[nodiscard\]\]\s*)?' \
+ r'(?:(?:friend|explicit|virtual|inline|const|operator)\s+)*' \
+ r'(?:' \
+ r'bool|int64_t|(?:const_)?iterator|double|void' \
+ r'|node(?:_(?:view|of)<.+?>|)?|table|array|value(?:<.+?>)?' \
+ r'|T|U|parse_(?:error|result)' \
+ r')' \
+ r'(?:\s*[&*]+)?' \
+ r'(?:\s*[(]\s*[)])?' \
+ r'\s+' \
+ r')'
blank_lines_between_returns_pattern = '({}[^\n]+)\n\n([ \t]*{})'.format(return_type_pattern, return_type_pattern)
for i in range(0, 5): # remove blank lines between simple one-liner definitions
source_text = re.sub('(using .+?;)\n\n([ \t]*using)', '\\1\n\\2', source_text, 0, re.I | re.M)
source_text = re.sub(
'([a-zA-Z_][a-zA-Z0-9_]*[ \t]+[a-zA-Z_][a-zA-Z0-9_]*[ \t]*;)' \
+ '\n\n([ \t]*[a-zA-Z_][a-zA-Z0-9_]*[ \t]+[a-zA-Z_][a-zA-Z0-9_]*[ \t]*;)', '\\1\n\\2',
source_text, 0, re.I | re.M)
source_text = re.sub(blank_lines_between_returns_pattern, '\\1\n\\2', source_text, 0, re.I | re.M)
source_text = source_text.strip()
# extract library version
library_version = [0,0,0]
match = re.search(r'^\s*#\s*define\s+TOML_LIB_MAJOR\s+([0-9]+)\s*$', source_text, re.I | re.M)
if match is not None:
library_version[0] = match.group(1)
match = re.search(r'^\s*#\s*define\s+TOML_LIB_MINOR\s+([0-9]+)\s*$', source_text, re.I | re.M)
if match is not None:
library_version[1] = match.group(1)
match = re.search(r'^\s*#\s*define\s+TOML_LIB_(?:REVISION|PATCH)\s+([0-9]+)\s*$', source_text, re.I | re.M)
if match is not None:
library_version[2] = match.group(1)
# build the preamble (license etc)
preamble = []
preamble.append('''
toml++ v{}
https://github.com/marzer/tomlplusplus
SPDX-License-Identifier: MIT'''.format('.'.join(str(x) for x in library_version)))
preamble.append('''
- THIS FILE WAS ASSEMBLED FROM MULTIPLE HEADER FILES BY A SCRIPT - PLEASE DON'T EDIT IT DIRECTLY -
If you wish to submit a contribution to toml++, hooray and thanks! Before you crack on, please be aware that this
file was assembled from a number of smaller files by a python script, and code contributions should not be made
against it directly. You should instead make your changes in the relevant source file(s). The file names of the files
that contributed to this header can be found at the beginnings and ends of the corresponding sections of this file.''')
preamble.append('''
TOML language specifications:
Latest: https://github.com/toml-lang/toml/blob/master/README.md
v1.0.0: https://github.com/toml-lang/toml/blob/master/README.md
v0.5.0: https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md''')
preamble.append(read_all_text_from_file(path.join(get_script_folder(), '..', 'LICENSE')))
# write the output file
output_file_path = path.join(get_script_folder(), '..', 'toml.hpp')
print("Writing to {}".format(output_file_path))
with open(output_file_path,'w', encoding='utf-8', newline='\n') as output_file:
if (len(preamble) > 0):
print(make_divider(), file=output_file)
for pre in preamble:
print('//', file=output_file)
for line in pre.strip().splitlines():
print('//', file=output_file, end = '')
if (len(line) > 0):
print(' ', file=output_file, end = '')
print(line, file=output_file)
else:
print('\n', file=output_file, end = '')
print('//', file=output_file)
print(make_divider(), file=output_file)
print('''// clang-format off
#pragma once
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunknown-pragmas"
#endif
''', file=output_file)
print(source_text, file=output_file)
print('''
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
// clang-format on''', file=output_file)
if __name__ == '__main__':
try:
main()
except Exception as err:
print(
'Fatal error: [{}] {}'.format(
type(err).__name__,
str(err)
),
file=sys.stderr
)
traceback.print_exc(file=sys.stderr)
sys.exit(1)
sys.exit()
| StarcoderdataPython |
44896 | <filename>test_autofit/tools/test_edenise/test_import.py
import pytest
from autofit.tools.edenise import Package, Import, LineItem
@pytest.fixture(
name="import_"
)
def make_import(package):
return Import(
"from autofit.tools.edenise import Line",
parent=package
)
@pytest.fixture(
name="local_import"
)
def make_local_import(
package
):
return LineItem(
" from autofit.tools.edenise import Line",
parent=package
)
def test_local_import_type(
local_import
):
assert isinstance(
local_import,
Import
)
def test_local_import_target(
local_import
):
string = " from VIS_CTI_Autofit.VIS_CTI_Tools.VIS_CTI_Edenise import Line"
assert local_import.target_import_string == string
def test_project_import(import_, package):
import_.parent = package["tools"]["edenise"]["converter"]
assert "autofit/tools/edenise" in str(import_.path)
assert import_.is_in_project is True
def test_non_project_import(package):
import_ = Import(
"import os",
parent=package
)
import_.parent = package
assert import_.is_in_project is False
def test_target_import_string(import_):
string = "from VIS_CTI_Autofit.VIS_CTI_Tools.VIS_CTI_Edenise import Line"
assert import_.target_import_string == string
def test_multi_import(
package
):
import_ = Import(
"from autofit.tools.edenise import Package, File, Import",
parent=package
)
string = "from VIS_CTI_Autofit.VIS_CTI_Tools.VIS_CTI_Edenise import Package, File, Import"
assert import_.target_import_string == string
def test_import_as(
package
):
import_ = Import(
"from autofit.tools import edenise as e",
parent=package
)
string = "from VIS_CTI_Autofit.VIS_CTI_Tools import VIS_CTI_Edenise as e"
assert import_.target_import_string == string
def test_package_import(
package
):
import_ = Import(
"from autofit.tools import edenise",
parent=package["tools"]
)
assert isinstance(
package["tools"]["edenise"],
Package
)
assert import_.module_path == ["autofit", "tools"]
assert isinstance(
import_.module["edenise"],
Package
)
string = "from VIS_CTI_Autofit.VIS_CTI_Tools import VIS_CTI_Edenise"
assert import_.target_import_string == string
@pytest.mark.parametrize(
"string, result",
[
("from . import util", "from autofit.tools import util"),
("from .. import conf", "from autofit import conf"),
("from ..tools import util", "from autofit.tools import util")
]
)
def test_relative_import(
package,
string,
result
):
import_ = Import(
string,
parent=package["tools"]["namer"]
)
assert import_.string == result
def test_module_import_name(
package
):
conf = package["conf"]
assert conf.target_import_string == "VIS_CTI_Conf as conf"
assert conf.target_file_name == "VIS_CTI_Conf.py"
package._should_rename_modules = False
assert conf.target_import_string == "conf"
assert conf.target_file_name == "conf.py"
def test_module_path_import_name(
package
):
package._should_rename_modules = False
import_ = Import(
"from autofit.non_linear.samples import NestSamples, Sample",
parent=package
)
assert import_.target_string == "from VIS_CTI_Autofit.VIS_CTI_NonLinear.VIS_CTI_Samples import NestSamples, Sample"
| StarcoderdataPython |
1654153 | <gh_stars>0
"""
Search integral/derivative algorithm class
"""
from ..items import Items
from ..sequence import integral, derivative, summation, product
from ..utils import sequence_matches
from .base import RecursiveAlgorithm
__all__ = [
"SummationAlgorithm",
"ProductAlgorithm",
"IntegralAlgorithm",
"DerivativeAlgorithm",
]
class SummationAlgorithm(RecursiveAlgorithm):
"""Search for sums"""
__min_items__ = 3
__accepts_undefined__ = False
def rank_increase(self):
return 1
def sub_search(self, manager, items, rank):
s_items = []
last = 0
for item in items:
value = item - last
s_items.append(value)
last = item
sub_items = Items(s_items)
self.sub_queue(
manager, rank, items, sub_items, self._found_operand,
{})
def _found_operand(self, manager, items, sequences):
for operand in sequences:
sequence = summation(operand)
if sequence_matches(sequence, items):
yield sequence
class ProductAlgorithm(RecursiveAlgorithm):
"""Search for prods"""
__min_items__ = 3
__accepts_undefined__ = False
def rank_increase(self):
return 1
def sub_search(self, manager, items, rank):
s_items = []
last = 1
for item in items:
if last == 0:
value = 0
else:
value, mod = divmod(item, last)
if mod != 0:
return
s_items.append(value)
last = item
sub_items = Items(s_items)
self.sub_queue(
manager, rank, items, sub_items, self._found_operand,
{})
def _found_operand(self, manager, items, sequences):
for operand in sequences:
sequence = product(operand)
if sequence_matches(sequence, items):
yield sequence
class IntegralAlgorithm(RecursiveAlgorithm):
"""Search for integrals"""
__min_items__ = 3
__accepts_undefined__ = False
def rank_increase(self):
return 1
def sub_search(self, manager, items, rank):
if items.derivative:
sub_items = Items(items.derivative)
self.sub_queue(
manager, rank, items, sub_items, self._found_operand,
{})
def _found_operand(self, manager, items, sequences):
for operand in sequences:
sequence = integral(operand, start=items[0]).simplify()
if sequence_matches(sequence, items):
yield sequence
class DerivativeAlgorithm(RecursiveAlgorithm):
"""Search for derivatives"""
__min_items__ = 3
__accepts_undefined__ = False
def rank_increase(self):
return 1
def sub_search(self, manager, items, rank):
sub_items = Items(items.make_integral())
self.sub_queue(
manager, rank, items, sub_items, self._found_operand,
{})
def _found_operand(self, manager, items, sequences):
for operand in sequences:
sequence = derivative(operand).simplify()
if sequence_matches(sequence, items):
yield sequence
| StarcoderdataPython |
1664172 | import discord, firebase_api, requests
from discord.ext import commands
import random
TOKEN = '<KEY>'
PREFIX = "!"
class MyClient(discord.Client):
async def on_ready(self):
self.db = firebase_api.Db(
"https://project-b-5b43c.firebaseio.com/",
"key.json"
)
print('Logged in as')
print(self.user.name)
print(self.user.id)
print('------')
@staticmethod
def isCommand(message, command, params = False):
if params:
if message.content.startswith(PREFIX) and command in message.content[1:]:
return message[1:]
else:
if message.content.startswith(PREFIX) and message.content.endswith(command):
print("Command Call: ", message.author.name)
print("Command: ", command)
print("------")
return True
async def on_message(self, message):
# we do not want the bot to reply to itself
if message.author.id == self.user.id:
return
if message.content.startswith('!hello'):
await message.channel.send('Hello {0.author.mention}'.format(message))
if self.isCommand(message, "api"):
await message.channel.send("API WOO IT WORKS FUCK YEAH")
if self.isCommand(message, "admin raw"):
local_data = requests.get('https://project-b-5b43c.firebaseio.com/.json').text
await message.channel.send(local_data)
if self.isCommand(message, "admin indexes"):
data = self.db.load("/test/").get()
await message.channel.send(len(data))
if self.isCommand(message, "admin index search", params = True):
data = self.db.load("/test/").get()
await message.channel.send(len(data))
client = MyClient()
client.run(TOKEN) | StarcoderdataPython |
31710 | <reponame>mac389/semantic-distance
import os, json, matplotlib
matplotlib.use('Agg')
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
READ = 'rb'
directory = json.load(open('directory.json',READ))
filename = os.path.join(directory['data-prefix'],'test-similarity-matrix.npy')
data = np.load(filename).astype(float)
data = (data-data.min())/(data.max()-data.min()) #Think more about how to scale
f,ax = plt.subplots(figsize=(12,9))
#Only for control
color_series = {i:color for i,color in enumerate(sns.color_palette("husl", 3))}
colors = pd.Series([color_series[i%3] for i in xrange(data.shape[0])])
print colors
hmap = sns.clustermap(np.corrcoef(data),col_colors = colors,row_colors=colors)
#plt.tight_layout()
plt.savefig('./results/clustermap-corr2.png') | StarcoderdataPython |
4832768 | import tkinter
from PIL import Image, ImageTk
class LoginPage(tkinter.Frame):
def __init__(self, parent, App):
self.application = App
self.config = App.config
super().__init__(parent)
self.configure(bg="grey")
self.grid(row=0, column=0, sticky="nsew")
parent.grid_rowconfigure(0, weight=1)
parent.grid_columnconfigure(0, weight=1)
self.mainFrame = tkinter.Frame(self, height=self.config.side, width=self.config.side, bg="grey")
self.mainFrame.pack(expand=True)
image = Image.open(self.config.logo_path)
image_w, image_h = image.size
ratio = image_w/self.config.side
image = image.resize((int(image_w//ratio//2), int(image_h//ratio//2)))
#Putting logo on the screen
self.logo = ImageTk.PhotoImage(image)
self.label_logo = tkinter.Label(self.mainFrame, image=self.logo)
self.label_logo.pack()
self.button_image = tkinter.PhotoImage(width=3, height=1)
self.button_width = self.config.side//4
self.button_height = self.config.side//21
self.label_username = tkinter.Label(self.mainFrame, text="Username", bg="grey", fg="black", font=("Arial", 14, "bold"))
self.label_username.pack(pady=5)
self.var_username = tkinter.StringVar()
self.username_box = tkinter.Entry(self.mainFrame,font=("Arial", 14, "bold"), textvariable=self.var_username)
self.username_box.pack()
self.label_password = tkinter.Label(self.mainFrame, text="Password", font=("Arial", 14, "bold"), bg="grey")
self.label_password.pack(pady=5)
self.var_password = tkinter.StringVar()
self.password_box = tkinter.Entry(self.mainFrame,font=("Arial", 14, "bold"), show="*", textvariable=self.var_password)
self.password_box.pack(pady=5)
self.login_button = tkinter.Button(self.mainFrame, text="Login", font=("Algerian", 14), image=self.button_image,compound="c", height=self.button_height,fg="black", width=self.button_width, command=lambda:self.application.check_login())
self.login_button.pack(pady=5)
self.signup_button = tkinter.Button(self.mainFrame, text="Sign up", font=("Algerian", 14), image=self.button_image,compound="c", height=self.button_height, width=self.button_width,command=lambda:self.application.sign_up())
self.signup_button.pack(pady=5)
| StarcoderdataPython |
3236032 | # This Python file uses the following encoding: utf-8
import os
from pathlib import Path
import sys
import requests
import asyncio
import pproxy
import nest_asyncio
nest_asyncio.apply()
from PySide6.QtWidgets import QApplication, QWidget, QListWidgetItem, QSystemTrayIcon
from PySide6.QtCore import QFile, QThread, Qt, QEvent, Slot
from PySide6.QtUiTools import QUiLoader
from PySide6.QtGui import QIcon
nodes = {}
class ProxyThread(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
self.command = ""
def set_connection(self, command):
self.command = command
def run(self):
server = pproxy.Server('tunnel://127.0.0.1:1234')
remote = pproxy.Connection(self.command)
args = dict( rserver = [remote],
verbose = print )
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.handler = self.loop.run_until_complete(server.start_server(args))
self.loop.run_forever()
def stop(self):
self.loop.call_soon_threadsafe(self.handler.close)
self.loop.call_soon_threadsafe(self.handler.wait_closed)
self.loop.call_soon_threadsafe(self.loop.shutdown_asyncgens)
self.loop.call_soon_threadsafe(self.loop.stop)
self.loop.close()
class MainWindow(QWidget):
def __init__(self):
super(MainWindow, self).__init__()
self.systemTrayIcon = QSystemTrayIcon(self)
self.systemTrayIcon.setIcon(QIcon("icon.ico"))
self.systemTrayIcon.setVisible(True)
self.systemTrayIcon.activated.connect(self.on_systemTrayIcon_activated)
self.load_ui_addnode()
self.load_ui()
@Slot(QSystemTrayIcon.ActivationReason)
def on_systemTrayIcon_activated(self, reason):
if reason == QSystemTrayIcon.DoubleClick:
print(self.isHidden())
if self.isHidden():
self.show()
else:
self.hide()
# def changeEvent(self, event):
# if event.type() == QEvent.WindowStateChange:
# if self.windowState() & Qt.WindowMinimized:
# self.hide()
# event.ignore()
# super(MainWindow, self).changeEvent(event)
def load_ui(self):
loader = QUiLoader()
path = os.fspath("ui/mainwindow.ui")
ui_file = QFile(path)
ui_file.open(QFile.ReadOnly)
self.windo_mainwindow = loader.load(ui_file, self)
self.load_nodes()
self.windo_mainwindow.pushButton_addnode.clicked.connect(self.show_window_addnode)
self.windo_mainwindow.pushButton_removenode.clicked.connect(self.remove_node)
self.proxythread = ProxyThread()
self.windo_mainwindow.pushButton_connect.clicked.connect(self.start_proxy)
self.windo_mainwindow.pushButton_disconnect.clicked.connect(self.stop_proxy)
self.windo_mainwindow.actionConnectivity_check.triggered.connect(self.connectivity_check)
ui_file.close()
def start_proxy(self):
connection_name = self.windo_mainwindow.listWidget_nodes.currentItem().text()
self.proxythread.set_connection(nodes[connection_name])
self.proxythread.start()
self.windo_mainwindow.textEdit_log.append("[+] Proxy started")
def stop_proxy(self):
self.windo_mainwindow.textEdit_log.append("[+] Proxy Stopped")
self.proxythread.stop()
def load_ui_addnode(self):
loader = QUiLoader()
path = os.fspath("ui/addnode.ui")
ui_file = QFile(path)
ui_file.open(QFile.ReadOnly)
self.window_addnode = loader.load(ui_file, self)
self.window_addnode.pushButton_save.clicked.connect(self.add_node)
ui_file.close()
def show_window_addnode(self):
self.window_addnode.show()
def add_node(self):
name = self.window_addnode.lineEdit_name.text()
command = ""
if self.window_addnode.radioButton_addcommand.isChecked():
command = self.window_addnode.lineEdit_command.text()
else:
command = command + self.window_addnode.comboBox_protocol.currentText()
command = command + "://" + self.window_addnode.lineEdit_host.text()
command = command + ":" + self.window_addnode.lineEdit_port.text()
if self.window_addnode.checkBox_auth.isChecked():
command = command + "#" + self.window_addnode.lineEdit_username.text()
command = command + ":" + self.window_addnode.lineEdit_password.text()
pass
self.write_to_db(name, command)
self.windo_mainwindow.textEdit_log.append("[+] Added new node")
self.windo_mainwindow.textEdit_log.append(f"[*] {name} -> {command}")
self.load_nodes()
def remove_node(self):
# get current row and delete it
selected_node = self.windo_mainwindow.listWidget_nodes.currentRow()
connection_name = self.windo_mainwindow.listWidget_nodes.currentItem().text()
self.windo_mainwindow.listWidget_nodes.takeItem(selected_node)
with open("nodes.dat", "r") as f:
lines = f.readlines()
with open("nodes.dat", "w") as f:
for line in lines:
name, _ = line.strip().split("||")
if not name == connection_name:
f.write(line)
def write_to_db(self, name, command):
with open("nodes.dat", "a") as f:
f.write(f"{name}||{command}\n")
def load_nodes(self):
self.windo_mainwindow.listWidget_nodes.clear()
with open("nodes.dat", "r") as f:
for line in f:
name, command = line.strip().split("||")
nodes[name] = command
item = QListWidgetItem(name)
self.windo_mainwindow.listWidget_nodes.addItem(item)
self.windo_mainwindow.textEdit_log.append("[+] Loaded nodes")
def connectivity_check(self):
http_proxy = "socks5://127.0.0.1:1234"
https_proxy = "socks5://127.0.0.1:1234"
proxyDict = {
"http" : http_proxy,
"https" : https_proxy
}
self.windo_mainwindow.textEdit_log.append("[+] Started connectivity check")
try:
result = requests.get("http://ident.me", timeout=3, proxies=proxyDict)
self.windo_mainwindow.textEdit_log.append(f"[*] Connectivity check status: {result.status_code}")
except:
self.windo_mainwindow.textEdit_log.append(f"[*] Connectivity check status: not connected")
if __name__ == "__main__":
#
app = QApplication([])
window = MainWindow()
window.setFixedSize(300, 500)
window.setWindowTitle("TProxy")
window.show()
sys.exit(app.exec())
| StarcoderdataPython |
1761597 | from dataclasses import dataclass
import logging
from typing import ClassVar
from bitey.cpu.arch import EightBitArch
@dataclass
class AddressingMode:
"""
Addressing mode base class
"""
bytes: int
"""
The number of bytes an instruction with this addressing mode takes,
including the instruction itself.
"""
def __post_init__(self):
self.logger = logging.getLogger("bitey.cpu.addressing_mode")
def get_value(self, flags, registers, memory):
"""
Get the value at the address
Returns a tuple of the address and value for convenience
"""
return (None, None)
def get_address(self, flags, registers, memory):
"""
Return the effective address
"""
# The size to consume is bytes minus one for the opcode itself
size = self.bytes - 1
if size > 0:
registers["PC"].add(size)
return None
def get_inst_str(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
if address is not None:
return "${0:02x}".format(address)
else:
return ""
@dataclass
class AbsoluteAddressingMode(AddressingMode):
"""
Absolute addressing mode
Absolute addressing is a three-byte instruction
The address is encoded in the next two bytes after the opcode
The first byte contains the opcode
The second byte contains the low-order byte of the effective address
The effective address contains the data
The third byte contains the high-order byte of the effective address
The following absolute JMP command would jump to the NOP instruction
0x0000 0x4C JMP
0x0001 0x12
0x0002 0x34
...
0x3412 NOP
"""
adl: int = 0
"The low-order byte"
adh: int = 0
"The high-order byte"
bytes: ClassVar[int] = 3
def get_address(self, flags, registers, memory):
self.adl = memory.read(registers["PC"].get())
registers["PC"].inc()
self.adh = memory.read(registers["PC"].get())
registers["PC"].inc()
return memory.get_16bit_address(self.adl, self.adh)
def get_value(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
return (address, memory.read(address))
def get_inst_str(self, flags, registers, memory):
# address = self.get_address(flags, registers, memory)
address = self.get_address(flags, registers, memory)
self.logger.debug("AbsoluteAddressing get_value address: {}".format(address))
if address is not None:
return "${0:04x}".format(address)
else:
return ""
@dataclass
class AccumulatorAddressingMode(AddressingMode):
"""
Accumulator Addressing Mode
The value is set to the current value of the accumulator
Instructions can test the addressing mode to set the accumulator after they
have performed their operation.
"""
bytes: ClassVar[int] = 1
def get_address(self, flags, registers, memory):
return None
def get_value(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
value = registers["A"].get()
return (address, value)
def get_inst_str(self, flags, registers, memory):
self.get_address(flags, registers, memory)
return ""
@dataclass
class AbsoluteIndirectAddressingMode(AddressingMode):
bytes: ClassVar[int] = 3
"""
Absolute Indirectaddressing mode
Absolute Indirect addressing is a three-byte instruction
The address is encoded in the next two bytes after the opcode
The first byte contains the opcode
The second byte contains the low-order byte of an address
that contains the effective address
The third byte contains the high-order byte of an address that contains
the effective address
The effective address points to the actual location
The following absolute indirect JMP command would jump to the NOP instruction
0x0000 0x6C JMP
0x0001 0x12
0x0002 0x34
...
0x3412 0x15
0x3413 0x34
...
0x3415 0xEA NOP
"""
adl: int = 0
"The low-order byte"
adh: int = 0
"The high-order byte"
bytes: ClassVar[int] = 3
def get_address(self, flags, registers, memory):
self.adl = memory.read(registers["PC"].get())
registers["PC"].inc()
self.adh = memory.read(registers["PC"].get())
registers["PC"].inc()
address_to_address = memory.get_16bit_address(self.adl, self.adh)
self.adl = memory.read(address_to_address)
self.adh = memory.read(address_to_address + 1)
effective_address = memory.get_16bit_address(self.adl, self.adh)
return effective_address
def get_value(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
return (address, memory.read(address))
def get_inst_str(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
if address is not None:
return "(${0:04x})".format(address)
else:
return ""
class AbsoluteIndirectPageBoundaryBugAddressingMode(AddressingMode):
bytes: ClassVar[int] = 3
"""
Absolute Indirect addressing mode with the Page Boundary Bug
Absolute Indirect addressing is a three-byte instruction
The address is encoded in the next two bytes after the opcode
The first byte contains the opcode
The second byte contains the low-order byte of an address
that contains the effective address
The third byte contains the high-order byte of an address that contains
the effective address
The effective address points to the actual location
This version of the addressing mode exhibits the JMP page boundary bug
seen on some NMOS chips.
If the base address is on the edge of a page boundary, it wraps to the
beginning of that page instead of going to the beginning of the next page.
The following absolute indirect JMP command would jump to the NOP instruction
0x0000 0x34 ; most-significant byte of address
...
0x00FE 0x6C JMP
0x00FF 0x12 ; least-significant byte of address
...
0x3412 0x15
0x3413 0x34
...
0x3415 0xEA NOP
"""
adl: int = 0
"The low-order byte"
adh: int = 0
"The high-order byte"
bytes: ClassVar[int] = 3
def get_address(self, flags, registers, memory):
pc = registers["PC"].get()
self.adl = memory.read(pc)
if (pc & 0xFF) == 0xFF:
# Memory form 0x??FF should wrap to the same page
self.adh = memory.read(pc & 0xFF00)
registers["PC"].inc()
else:
# Other memory should work the same as the normal AbsoluteIndirectAddressingMode
# TODO: Verify that the PC ends up in the correct place
# (it should still go to the next instruction)
# "technically" it doesn't matter, since this bug is exclusive to JMP
# instructions
# But for more accurate simulation and cycle-dependent stuff, it may matter
registers["PC"].inc()
self.adh = memory.read(registers["PC"].get())
registers["PC"].inc()
address_to_address = memory.get_16bit_address(self.adl, self.adh)
self.adl = memory.read(address_to_address)
self.adh = memory.read(address_to_address + 1)
effective_address = memory.get_16bit_address(self.adl, self.adh)
return effective_address
def get_value(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
return (address, memory.read(address))
def get_inst_str(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
if address is not None:
return "(${0:04x})".format(address)
else:
return ""
@dataclass
class AbsoluteXAddressingMode(AddressingMode):
bytes: ClassVar[int] = 3
"""
Absolute,X addressing mode
Absolute addressing is a three-byte instruction
The address is encoded in the next two bytes after the opcode
The first byte contains the opcode
The second byte contains the low-order byte of the effective address
The effective address contains the data
The third byte contains the high-order byte of the effective address
The X Index is then added to this address
"""
adl: int = 0
"""
The low-order byte
This does not include the X offset
"""
adh: int = 0
"""
The high-order byte
This does not include the X offset
"""
bytes: ClassVar[int] = 3
def get_address(self, flags, registers, memory):
self.adl = memory.read(registers["PC"].get())
# TODO: Maybe wrap the flag with bounds checking too, read expected
# behavior
registers["PC"].inc()
self.adh = memory.read(registers["PC"].get())
registers["PC"].inc()
address = memory.get_16bit_address(self.adl, self.adh)
address += registers["X"].get()
# Wrap at end of memory
# address = address % 0xFFFF
address = address % 0x10000
return address
def get_value(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
return (address, memory.read(address))
def get_inst_str(self, flags, registers, memory):
# address = self.get_address(flags, registers, memory)
address = self.get_address(flags, registers, memory)
if address is not None:
return "${0:04x},X".format(memory.get_16bit_address(self.adl, self.adh))
else:
return ""
@dataclass
class AbsoluteYAddressingMode(AddressingMode):
bytes: ClassVar[int] = 3
"""
Absolute,Y addressing mode
Absolute,Y addressing is a three-byte instruction
The address is encoded in the next two bytes after the opcode
The first byte contains the opcode
The second byte contains the low-order byte of the effective address
The effective address contains the data
The third byte contains the high-order byte of the effective address
The Y Index is then added to this address
"""
adl: int = 0
"""
The low-order byte
This does not include the X offset
"""
adh: int = 0
"""
The high-order byte
This does not include the X offset
"""
bytes: ClassVar[int] = 3
def get_address(self, flags, registers, memory):
self.adl = memory.read(registers["PC"].get())
# TODO: Maybe wrap the flag with bounds checking too, read expected
# behavior
registers["PC"].inc()
self.adh = memory.read(registers["PC"].get())
registers["PC"].inc()
address = memory.get_16bit_address(self.adl, self.adh)
address += registers["Y"].get()
# Wrap at end of memory
# address = address % 0xFFFF
address = address % 0x10000
return address
def get_value(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
return (address, memory.read(address))
def get_inst_str(self, flags, registers, memory):
# address = self.get_address(flags, registers, memory)
address = self.get_address(flags, registers, memory)
if address is not None:
return "${0:04x},Y".format(memory.get_16bit_address(self.adl, self.adh))
else:
return ""
@dataclass
class ImmediateAddressingMode(AddressingMode):
"""
Immediate addressing mode
The value is encoded as a constant in the next byte
"""
bytes: ClassVar[int] = 2
def get_value(self, flags, registers, memory):
byte = memory.read(registers["PC"].get())
# TODO: Maybe wrap the flag with bounds checking too, read expected
# behavior
registers["PC"].inc()
return (None, byte)
def get_inst_str(self, flags, registers, memory):
(address, value) = self.get_value(flags, registers, memory)
return "#${0:02x}".format(value)
@dataclass
class ImpliedAddressingMode(AddressingMode):
"""
Implied addressing mode
The address is encoded in the instruction
"""
bytes: ClassVar[int] = 1
def get_address(self, flags, registers, memory):
return None
def get_value(self, flags, registers, memory):
return (None, None)
@dataclass
class IndexedIndirectAddressingMode(AddressingMode):
"""
Indexed Indirect addressing mode
Get an address in zero page memory from the next byte and the X Index.
The X index is added to the base address before fetching the effective
address. This is different than Indirect Indexed, where the Index
is added after fetching the address from Zero Page.
Also called Indirect X
A1 80 LDA ($80,X)
"""
bytes: ClassVar[int] = 2
def get_address(self, flags, registers, memory):
zero_page_address = registers["PC"].get()
registers["PC"].inc()
# X register size is 8-bits
zero_page_address += registers["X"].get()
adl = zero_page_address
# TODO: Test for case we go beyond the page boundary
# Wraparound is assumed
# This behavior may still be incorrect
# TODO: Use one of the native functional test suites like
# https://github.com/Klaus2m5/6502_65C02_functional_tests.git
# after implementation
adh = (zero_page_address + 1) % 0x100
address = memory.get_16bit_value(adl, adh)
return address
def get_value(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
return (address, memory.read(address))
def get_inst_str(self, flags, registers, memory):
address = memory.read(registers["PC"].get())
self.get_address(flags, registers, memory)
return "(${0:02x},X)".format(address)
@dataclass
class IndirectIndexedAddressingMode(AddressingMode):
"""
Indirect Indexed addressing mode.
Get an address in zero page memory from the next byte.
The address in zero page is two bytes long. The first byte is the
low-order byte. The second is the high-order byte.
It then adds the Y Index to this address.
Also called Indirect Y
B1 80 LDA ($80),Y
"""
bytes: ClassVar[int] = 2
def get_address(self, flags, registers, memory):
zero_page_address = registers["PC"].get()
registers["PC"].inc()
adl = zero_page_address
# TODO: Test for case we go beyond the page boundary
# Wrap-around is assumed
adh = zero_page_address + 1
address = memory.get_16bit_value(adl, adh)
address += registers["Y"].get()
# TODO: Verify wrapping is the correct behavior
address = address % 0x10000
return address
def get_value(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
return (address, memory.read(address))
def get_inst_str(self, flags, registers, memory):
address = memory.read(registers["PC"].get())
self.get_address(flags, registers, memory)
return "(${0:02x}),Y".format(address)
@dataclass
class IndirectXAddressingMode(AddressingMode):
"""
Another name for Indexed Indirect Addressing
TODO: Maybe consolidate these
"""
bytes: ClassVar[int] = 2
def __post_init__(self):
self.am = IndexedIndirectAddressingMode()
def get_address(self, flags, registers, memory):
return self.am.get_address(flags, registers, memory)
def get_value(self, flags, registers, memory):
return (self, self.am.get_value(flags, registers, memory))
def get_inst_str(self, flags, registers, memory):
return self.am.get_inst_str(flags, registers, memory)
@dataclass
class IndirectYAddressingMode(AddressingMode):
"""
Another name for Indirect Indexed Addressing
TODO: Maybe consolidate these
"""
bytes: ClassVar[int] = 2
def __post_init__(self):
self.am = IndirectIndexedAddressingMode()
def get_address(self, flags, registers, memory):
return self.am.get_address(flags, registers, memory)
def get_value(self, flags, registers, memory):
return self.am.get_value(flags, registers, memory)
def get_inst_str(self, flags, registers, memory):
return self.am.get_inst_str(flags, registers, memory)
@dataclass
class ZeroPageAddressingMode(AddressingMode):
"""
Zero Page addressing mode
The address in Zero Page is encoded as a constant in the next byte
"""
bytes: ClassVar[int] = 2
def get_address(self, flags, registers, memory):
address = memory.read(registers["PC"].get())
registers["PC"].inc()
# TODO: Create exception API
assert address <= 0xFF
return address
def get_value(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
return (address, memory.read(address))
@dataclass
class ZeroPageXAddressingMode(AddressingMode):
"""
Zero Page X addressing mode
Compute the address by adding the value the PC points to
plus the X register value.
Wraps if the value is greater than 0x255.
"""
bytes: ClassVar[int] = 2
def get_address(self, flags, registers, memory):
address = memory.read(registers["PC"].get())
registers["PC"].inc()
address += registers["X"].get()
# wrap on values > 0xFF
address = address % 0x100
return address
def get_value(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
return (address, memory.read(address))
def get_inst_str(self, flags, registers, memory):
address = memory.read(registers["PC"].get())
self.get_value(flags, registers, memory)
return "${0:02x},X".format(address)
@dataclass
class ZeroPageYAddressingMode(AddressingMode):
"""
Zero Page Y addressing mode
Compute the address by adding the value the PC points to
plus the Y register value.
Wraps if the value is greater than 0x255.
"""
bytes: ClassVar[int] = 2
def get_value(self, flags, registers, memory):
address = memory.read(registers["PC"].get())
registers["PC"].inc()
address += registers["Y"].get()
# wrap on values > 0xFF
address = address % 0x100
return (address, memory.read(address))
def get_inst_str(self, flags, registers, memory):
address = memory.read(registers["PC"].get())
self.get_value(flags, registers, memory)
return "${0:02x},Y".format(address)
@dataclass
class RelativeAddressingMode(AddressingMode):
"""
Relative addressing mode
The value in the next byte is added to the PC to find the effective
address.
The effective address is calculated from the PC after it has been
incremented reading the offset, not from the JMP opcode position.
This uses two's complement, and supports negative offsets.
"""
bytes: ClassVar[int] = 2
def get_address(self, flags, registers, memory):
"Get the effective address"
offset = memory.read(registers["PC"].get())
# Calculate two's complement to get negative value
offset = EightBitArch.twos_complement_to_signed_int(offset)
# TODO: update flags
registers["PC"].inc()
effective_address = registers["PC"].get() + offset
return effective_address % 0x10000
def get_value(self, flags, registers, memory):
address = self.get_address(flags, registers, memory)
return (address, memory.read(address))
def get_inst_str(self, flags, registers, memory):
"Return the address as an effective address"
(address, value) = self.get_value(flags, registers, memory)
if address is not None:
return "${0:04x}".format(address)
else:
return ""
| StarcoderdataPython |
3365276 | <reponame>cliisberg/cvrapi-python-client<filename>cvrapi_client/client.py
from functools import partial
from .api import CVRAPI
class CVRAPIClient(object):
METHODS = ['get', 'post']
def __init__(self, *args):
self.api = CVRAPI(*args)
def __getattr__(self, method):
return partial(getattr(self.api, 'perform'), method)
| StarcoderdataPython |
3338789 | from autofunc.find_similarities import find_similarities
import pandas as pd
import os.path
script_dir = os.path.dirname(__file__)
file_to_learn = os.path.join(script_dir, '../autofunc/assets/consumer_systems.csv')
train_data = pd.read_csv(file_to_learn)
## Make similarity dataframe
similarity_df = find_similarities(train_data)
## This can take a while but never changes for each dataset, so uncomment this line to save to a csv
similarity_df.to_csv('consumer_similarity1.csv', index = True, index_label=False, header= True) | StarcoderdataPython |
1619439 | <filename>pdbtools/pdb_tidy.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Modifies the file to adhere (as much as possible) to the format specifications.
Expects a sorted file - REMARK/ATOM/HETATM/END - so use pdb_sort in case you are
not sure.
This includes:
- Adding TER statements after chain breaks/changes
- Truncating/Padding all lines to 80 characters
- Adds END statement at the end of the file
Will remove all original TER/END statements from the file.
Usage:
python pdb_tidy.py [-strict] <pdb file>
Example:
python pdb_tidy.py 1CTF.pdb
python pdb_tidy.py -strict 1CTF.pdb # does not add TER on chain breaks
This program is part of the `pdb-tools` suite of utilities and should not be
distributed isolatedly. The `pdb-tools` were created to quickly manipulate PDB
files using the terminal, and can be used sequentially, with one tool streaming
data to another. They are based on old FORTRAN77 code that was taking too much
effort to maintain and compile. RIP.
"""
import os
import sys
__author__ = "<NAME>"
__email__ = "<EMAIL>"
def check_input(args):
"""Checks whether to read from stdin/file and validates user input/options.
"""
# Defaults
option = False
fh = sys.stdin # file handle
if not len(args):
# Reading from pipe with default option
if sys.stdin.isatty():
sys.stderr.write(__doc__)
sys.exit(1)
elif len(args) == 1:
# One of two options: option & Pipe OR file & default option
if args[0] == '-strict':
option = True
if sys.stdin.isatty(): # ensure the PDB data is streamed in
emsg = 'ERROR!! No data to process!\n'
sys.stderr.write(emsg)
sys.stderr.write(__doc__)
sys.exit(1)
else:
if not os.path.isfile(args[0]):
emsg = 'ERROR!! File not found or not readable: \'{}\'\n'
sys.stderr.write(emsg.format(args[0]))
sys.stderr.write(__doc__)
sys.exit(1)
fh = open(args[0], 'r')
elif len(args) == 2:
# Two options: option & File
if not args[0] == '-strict':
emsg = 'ERROR! First argument is not a valid option: \'{}\'\n'
sys.stderr.write(emsg.format(args[0]))
sys.stderr.write(__doc__)
sys.exit(1)
if not os.path.isfile(args[1]):
emsg = 'ERROR!! File not found or not readable: \'{}\'\n'
sys.stderr.write(emsg.format(args[1]))
sys.stderr.write(__doc__)
sys.exit(1)
option = args[0][1:]
fh = open(args[1], 'r')
else: # Whatever ...
sys.stderr.write(__doc__)
sys.exit(1)
return (option, fh)
def tidy_pdbfile(fhandle, strict=False):
"""Adds TER/END statements and pads all lines to 80 characters.
If strict is True, does not add TER statements at intra-chain breaks.
"""
not_strict = not strict
def make_TER(prev_line):
"""Creates a TER statement based on the last ATOM/HETATM line.
"""
# Add last TER statement
serial = int(prev_line[6:11]) + 1
rname = prev_line[17:20]
chain = prev_line[21]
resid = prev_line[22:26]
icode = prev_line[26]
return fmt_TER.format(serial, rname, chain, resid, icode)
# TER 606 LEU A 75
fmt_TER = "TER {:>5d} {:3s} {:1s}{:>4s}{:1s}" + " " * 53 + "\n"
records = ('ATOM', 'HETATM')
ignored = ('TER', 'END ', 'END\n', 'CONECT', 'MASTER')
# Iterate up to the first ATOM/HETATM line
prev_line = None
for line in fhandle:
if line.startswith(ignored): # to avoid matching END _and_ ENDMDL
continue
line = line.strip() # We will pad/add \n later to make uniform
# Check line length
line = "{:<80}\n".format(line)
yield line
if line.startswith(records):
prev_line = line
break
# Now go through all the remaining lines
atom_section = False
serial_offset = 0 # To offset after adding TER records
for line in fhandle:
if line.startswith(ignored):
continue
line = line.strip()
# Treat ATOM/HETATM differently
# - no TER in HETATM
if line.startswith('ATOM'):
is_gap = (int(line[22:26]) - int(prev_line[22:26])) > 1
if atom_section and (line[21] != prev_line[21] or (not_strict and is_gap)):
serial_offset += 1 # account for TER statement
yield make_TER(prev_line)
serial = int(line[6:11]) + serial_offset
line = line[:6] + str(serial).rjust(5) + line[11:]
prev_line = line
atom_section = True
elif line.startswith('HETATM'):
if atom_section:
atom_section = False
serial_offset += 1 # account for TER statement
yield make_TER(prev_line)
serial = int(line[6:11]) + serial_offset
line = line[:6] + str(serial).rjust(5) + line[11:]
prev_line = line
elif line.startswith('ANISOU'):
# Fix serial based on previous atom
# Avoids doing the offset again
serial = int(prev_line[6:11])
line = line[:6] + str(serial).rjust(5) + line[11:]
else:
if atom_section:
atom_section = False
yield make_TER(prev_line)
if line.startswith('MODEL'):
serial_offset = 0
if serial > 99999:
emsg = 'ERROR!! Structure contains more than 99.999 atoms.\n'
sys.stderr.write(emsg)
sys.stderr.write(__doc__)
sys.exit(1)
# Check line length
line = "{:<80}\n".format(line)
yield line
else:
if atom_section:
# Add last TER statement
atom_section = False
yield make_TER(prev_line)
# Add END statement
yield "{:<80}\n".format("END")
def main():
# Check Input
strict, pdbfh = check_input(sys.argv[1:])
# Do the job
new_pdb = tidy_pdbfile(pdbfh, strict)
try:
_buffer = []
_buffer_size = 5000 # write N lines at a time
for lineno, line in enumerate(new_pdb):
if not (lineno % _buffer_size):
sys.stdout.write(''.join(_buffer))
_buffer = []
_buffer.append(line)
sys.stdout.write(''.join(_buffer))
sys.stdout.flush()
except IOError:
# This is here to catch Broken Pipes
# for example to use 'head' or 'tail' without
# the error message showing up
pass
# last line of the script
# We can close it even if it is sys.stdin
pdbfh.close()
sys.exit(0)
if __name__ == '__main__':
main()
| StarcoderdataPython |
96673 | #!/usr/bin/env python
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# All Rights Reserved.
#
from cgtsclient.common import utils
from cgtsclient import exc
from cgtsclient.v1 import ihost as ihost_utils
def _print_label_show(obj):
fields = ['uuid', 'host_uuid', 'label_key', 'label_value']
data = [(f, getattr(obj, f, '')) for f in fields]
utils.print_tuple_list(data)
@utils.arg('hostnameorid',
metavar='<hostname or id>',
help="Name or ID of host [REQUIRED]")
def do_host_label_list(cc, args):
"""List kubernetes labels assigned to a host."""
ihost = ihost_utils._find_ihost(cc, args.hostnameorid)
host_label = cc.label.list(ihost.uuid)
for i in host_label[:]:
setattr(i, 'hostname', ihost.hostname)
field_labels = ['hostname', 'label key', 'label value']
fields = ['hostname', 'label_key', 'label_value']
utils.print_list(host_label, fields, field_labels, sortby=1)
@utils.arg('hostnameorid',
metavar='<hostname or id>',
help="Name or ID of host [REQUIRED]")
@utils.arg('attributes',
metavar='<name=value>',
nargs='+',
action='append',
default=[],
help="List of Kubernetes labels")
def do_host_label_assign(cc, args):
"""Update the Kubernetes labels on a host."""
attributes = utils.extract_keypairs(args)
ihost = ihost_utils._find_ihost(cc, args.hostnameorid)
new_labels = cc.label.assign(ihost.uuid, attributes)
for p in new_labels.labels:
uuid = p['uuid']
if uuid is not None:
try:
label_obj = cc.label.get(uuid)
except exc.HTTPNotFound:
raise exc.CommandError('Host label not found: %s' % uuid)
_print_label_show(label_obj)
@utils.arg('hostnameorid',
metavar='<hostname or id>',
help="Name or ID of host [REQUIRED]")
@utils.arg('attributes',
metavar='<name>',
nargs='+',
action='append',
default=[],
help="List of Kubernetes label keys")
def do_host_label_remove(cc, args):
"""Remove Kubernetes label(s) from a host"""
ihost = ihost_utils._find_ihost(cc, args.hostnameorid)
for i in args.attributes[0]:
lbl = _find_host_label(cc, ihost, i)
if lbl:
cc.label.remove(lbl.uuid)
print('Deleted host label %s for host %s' % (i, ihost.hostname))
def _find_host_label(cc, host, label):
host_labels = cc.label.list(host.uuid)
for lbl in host_labels:
if lbl.host_uuid == host.uuid and lbl.label_key == label:
break
else:
lbl = None
print('Host label not found: host %s, label key %s ' %
(host.hostname, label))
return lbl
| StarcoderdataPython |
1654294 | <filename>datasets/GTSRB_SS.py
"""
Module for managing GTSRB_SS dataset download from https://www.towardsautonomy.com/perception/traffic_sign_classification
"""
import numpy as np
import os
import urllib
from .ImageDataset import ImageDataset
import pickle
import h5py
import sklearn
import sklearn.model_selection
from skimage import io
import os
import pandas as pd
from skimage import color, exposure, transform
import glob
import numpy as np
from .utils import normalize, min_max_data_np
NUM_CLASSES = 43
IMG_SIZE = 32
# IF WANT TO USE A DIFFERENT IMAGE SIZE OR WANT TO MAKE NEW TRAIN AND TEST SETS, YOU SOHULD FIRST RUN IT ON IRONMAN, AS IT'S THE ONLY WORKSTATION
# THAT HAS THE RAW IMAGES SAVED (/ssd_data/datasets/GTSRG/Final_Training/Images etc.)
class GTSRB_SS(ImageDataset):
"""
This class manage the dataset GTSRB, properties of the datasets are uniquely determined
by the params dictionary
"""
default_params = {
# "something" : param
}
def __init__(self, params):
super().__init__(params)
self._id = self.dataset_id(params)
default_data_dir = '/ssd_data/Traffic_signs_test/'
self.data_dir = self._params['data_dir'] if 'data_dir' in params else default_data_dir
self._train_set_x, self._train_set_y, \
self._validation_set_x, self._validation_set_y, \
self._test_set_x, self._test_set_y = self.load_data(self.data_dir)
@staticmethod
def dataset_id(params):
"""
This method interprets the parameters and generate an id
"""
Traffic_signs_test.check_params_impl(params)
id = 'GTSRB_SS'
return id
@staticmethod
def load_data(gtsrb_dir):
try:
os.stat(gtsrb_dir)
except:
os.mkdir(gtsrb_dir)
# import pdb;pdb.set_trace()
#X_shuffled, Y_shuffled = load_train(gtsrb_dir)
#val_frac = 0.15
#X_train, X_val, Y_train, Y_val = sklearn.model_selection.train_test_split(X_shuffled, Y_shuffled,
# test_size=val_frac, shuffle=False)
X_train, Y_train=import_data(gtsrb_dir, "train")
X_test, Y_test=import_data(gtsrb_dir, "test")
X_val, Y_val=import_data(gtsrb_dir, "valid")
#X_test, Y_test = load_test(gtsrb_dir)
# normalize data consistently (in case they would not already be)
all_min, all_max = min_max_data_np([X_train, X_val, X_test])
X_train = normalize(X_train, all_min, all_max)
X_val = normalize(X_val, all_min, all_max)
X_test = normalize(X_test, all_min, all_max)
return X_train, Y_train, X_val, Y_val, X_test, Y_test
# from https://chsasank.github.io/keras-tutorial.html
def get_class(img_path):
return int(img_path.split('/')[-2])
def preprocess_img(img):
# Histogram normalization in y
hsv = color.rgb2hsv(img)
hsv[:, :, 2] = exposure.equalize_hist(hsv[:, :, 2])
img = color.hsv2rgb(hsv)
# central scrop
min_side = min(img.shape[:-1])
centre = img.shape[0] // 2, img.shape[1] // 2
img = img[centre[0] - min_side // 2:centre[0] + min_side // 2,
centre[1] - min_side // 2:centre[1] + min_side // 2,
:]
# rescale to standard size
img = transform.resize(img, (IMG_SIZE, IMG_SIZE))
return img
def import_data(gtsrb_dir='/ssd_data/', data="train"):
data_file = os.path.join(gtsrb_dir, '{}'.format(data)+'.p' )
with open(data_file, mode='rb') as f:
data_file_numpy = pickle.load(f)
X, Y = np.float32(data_file_numpy['features']), data_file_numpy['labels']
return X, Y
# def load_train(gtsrb_dir='/ssd_data/datasets/GTSRB'):
# h5filename = os.path.join(gtsrb_dir, 'GTSRB_Train_and_Validation_shuffled' + str(IMG_SIZE) + '.h5')
# try:
# with h5py.File(h5filename, 'r') as hf:
# X, Y = hf['imgs'][:], hf['labels'][:]
# print("Loaded images from {:}".format(h5filename))
# except (IOError, OSError, KeyError):
# print("Error in reading {:}. Processing all images...".format(h5filename))
# img_root_dir = os.path.join(gtsrb_dir, 'Final_Training/Images/')
# imgs = []
# labels = []
# all_img_paths = glob.glob(os.path.join(img_root_dir, '*/*.ppm'))
# np.random.shuffle(all_img_paths)
# for img_path in all_img_paths:
# try:
# img = preprocess_img(io.imread(img_path))
# label = get_class(img_path)
# imgs.append(img)
# labels.append(label)
# if len(imgs) % 1000 == 0: print("Processed {}/{}".format(len(imgs), len(all_img_paths)))
# except (IOError, OSError):
# print('missed', img_path)
# pass
# X = np.array(imgs, dtype='float32')
# Y = np.array(labels, dtype='int32')
# with h5py.File(h5filename, 'w') as hf:
# hf.create_dataset('imgs', data=X)
# hf.create_dataset('labels', data=Y)
# return X, Y
# def load_test(gtsrb_dir='/ssd_data/datasets/GTSRB'):
# h5filename = os.path.join(gtsrb_dir, 'GTSRB_Test' + str(IMG_SIZE) + '.h5')
# try:
# with h5py.File(h5filename, 'r') as hf:
# X, Y = hf['imgs'][:], hf['labels'][:]
# print("Loaded images from {:}".format(h5filename))
# except (IOError, OSError, KeyError):
# print("Error in reading {:}. Processing all images...".format(h5filename))
# img_root_dir = os.path.join(gtsrb_dir, 'Final_Test/Images/')
# csvfilename = os.path.join(img_root_dir, 'GT-final_test.csv')
# test = pd.read_csv(csvfilename, sep=';')
# # Load test dataset
# X = []
# Y = []
# for file_name, class_id in zip(list(test['Filename']), list(test['ClassId'])):
# img_path = os.path.join(img_root_dir, file_name)
# X.append(preprocess_img(io.imread(img_path)))
# Y.append(class_id)
# X = np.array(X, dtype='float32')
# Y = np.array(Y, dtype='int32')
# with h5py.File(h5filename, 'w') as hf:
# hf.create_dataset('imgs', data=X)
# hf.create_dataset('labels', data=Y)
# return X, Y
| StarcoderdataPython |
1644049 | #Creating Dictionary
d = {22:"ss",23:"ftp",53:"dns"}
print(d)
#Length
print(len(d))
#Deleting
del d[22]
print(d)
| StarcoderdataPython |
3320876 | from collections import OrderedDict
from pathlib import Path
import pytest
from Pegasus.yaml import dumps, loads
@pytest.mark.parametrize(
"s, expected",
[
("key: 1", 1),
("key: 2018-10-10", "2018-10-10"),
("key: yes", "yes"),
("key: true", True),
],
)
def test_loads(s, expected):
"""Test :meth:`Pegasus.yaml.loads`."""
rv = loads(s)
assert type(rv["key"]) == type(expected)
assert rv["key"] == expected
@pytest.mark.parametrize(
"obj, expected",
[
({"key": 1}, "key: 1\n"),
({"key": "2018-10-10"}, "key: '2018-10-10'\n"),
({"key": "yes"}, "key: 'yes'\n"),
({"key": True}, "key: true\n"),
({"key": Path("./aaa")}, "key: aaa\n"),
({"key": Path("../aaa")}, "key: ../aaa\n"),
(OrderedDict([(1, 1), (2, 2)]), "1: 1\n2: 2\n"),
(OrderedDict([(1, OrderedDict([(2, 2)])), (3, 3)]), "1:\n 2: 2\n3: 3\n"),
],
)
def test_dumps(obj, expected):
"""Test :meth:`Pegasus.yaml.dumps`."""
assert dumps(obj) == expected
| StarcoderdataPython |
4829956 | <reponame>forging2012/opencmdb-backend
from flask_security import roles_accepted
from webargs.flaskparser import use_args
from api.models import (Aggregation, Mould)
from api.utils.custom.error import error
from api.utils.custom.interface_tips import InterfaceTips
from api.utils.custom.validators import validate_valid_layer_id
from api.utils.custom.resource import BaseResource
from api.utils.custom.schema import (aggregations_schema, aggregation_schema, moulds_base_schema)
class AggregationsResource(BaseResource):
@roles_accepted('admin')
def get(self, layer_id):
if not validate_valid_layer_id(layer_id):
error(InterfaceTips.INVALID_LAYER_ID)
aggregations, total = Aggregation.pagination(layer_id=layer_id)
result = []
for aggregation in aggregations:
aggregation_data = aggregation_schema.dump(aggregation).data
moulds = Mould.fetch_all(aggregation=aggregation)
aggregation_data.update({'moulds': moulds_base_schema.dump(moulds).data})
result.append(aggregation_data)
return result
@use_args(aggregation_schema)
@roles_accepted('admin')
def post(self, args, layer_id):
if not validate_valid_layer_id(layer_id):
error(InterfaceTips.INVALID_LAYER_ID)
if Aggregation.existed_record(code=args.get('code'), name=args.get('name')):
error(InterfaceTips.RECORD_HAS_EXISTED)
args.update(layer_id=layer_id)
aggregation = Aggregation.create(**args)
return aggregation_schema.dump(aggregation).data, 201
class AggregationResource(BaseResource):
@roles_accepted('admin')
@BaseResource.check_record(Aggregation, 'aggregation_id', '集合')
def get(self, aggregation_id):
aggregation = self.record
return aggregation_schema.dump(aggregation).data
@use_args(aggregation_schema)
@roles_accepted('admin')
@BaseResource.check_record(Aggregation, 'aggregation_id', '集合')
def put(self, args, aggregation_id):
layer_id = args.get('layer_id', None)
if layer_id and not validate_valid_layer_id(layer_id):
error(InterfaceTips.INVALID_LAYER_ID)
aggregation = self.record
if Aggregation.existed_record(aggregation, code=args.get('code'), name=args.get('name')):
error(InterfaceTips.RECORD_HAS_EXISTED)
aggregation = aggregation.update(**args)
return aggregation_schema.dump(aggregation).data
@roles_accepted('admin')
@BaseResource.check_record(Aggregation, 'aggregation_id', '集合')
def delete(self, aggregation_id):
aggregation = self.record
aggregation.delete()
return {}, 204
| StarcoderdataPython |
3342850 | import pickle
from sklearn import datasets
iris=datasets.load_iris()
x=iris.data
y=iris.target
#labels for iris dataset
labels ={
0: "setosa",
1: "versicolor",
2: "virginica"
}
#split the data set
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=.25)
#Using decision tree algorithm
from sklearn import tree
classifier=tree.DecisionTreeClassifier()
classifier.fit(x_train,y_train)
predictions=classifier.predict(x_test)
#export the model
pickle.dump(classifier, open('model.pkl','wb'))
#load the model and test with a custom input
model = pickle.load( open('model.pkl','rb'))
x = [[6.7, 3.3, 5.7, 2.1]]
predict = model.predict(x)
print("Test Run")
print(labels[predict[0]]) | StarcoderdataPython |
3226491 | import pytest
import datetime
from pupa.scrape import Event
def event_obj():
e = Event(
name="get-together",
start_date=datetime.datetime.utcnow().isoformat().split('.')[0] + 'Z',
location_name="Joe's Place",
)
e.add_source(url='http://example.com/foobar')
return e
def test_basic_event():
e = event_obj()
e.validate()
def test_no_location():
e = Event(
name="get-together",
start_date=datetime.datetime.utcnow().isoformat().split('.')[0] + 'Z',
)
e.add_source(url='http://example.com/foobar')
e.validate()
def test_event_str():
e = event_obj()
assert e.name in str(e)
def test_bad_event():
e = event_obj()
e.start_date = 6
with pytest.raises(ValueError):
e.validate()
def test_basic_agenda():
e = event_obj()
agenda = e.add_agenda_item("foo bar")
assert agenda['description'] == 'foo bar'
assert e.agenda[0] == agenda
e.validate()
def test_agenda_add_person():
e = event_obj()
agenda = e.add_agenda_item("foo bar")
assert agenda['related_entities'] == []
agenda.add_person(person='<NAME>', note='chair')
assert len(e.agenda[0]['related_entities']) == 1
e.validate()
def test_agenda_add_vote_event():
e = event_obj()
agenda = e.add_agenda_item("foo bar")
assert agenda['related_entities'] == []
agenda.add_vote_event(vote_event='Roll no. 12')
assert len(e.agenda[0]['related_entities']) == 1
e.validate()
def test_agenda_add_subject():
e = event_obj()
agenda = e.add_agenda_item("foo bar")
agenda.add_subject('test')
assert e.agenda[0]['subjects'] == ['test']
agenda.add_subject('test2')
assert e.agenda[0]['subjects'] == ['test', 'test2']
e.validate()
def test_agenda_add_classification():
e = event_obj()
agenda = e.add_agenda_item("foo bar")
agenda.add_classification('test')
assert e.agenda[0]['classification'] == ['test']
agenda.add_classification('test2')
assert e.agenda[0]['classification'] == ['test', 'test2']
e.validate()
def test_agenda_add_extra():
e = event_obj()
a = e.add_agenda_item('foo bar')
a['extras'] = dict(foo=1, bar=['baz'])
assert e.agenda[0]['extras'] == {'foo': 1, 'bar': ['baz']}
def test_add_committee():
e = event_obj()
agenda = e.add_agenda_item("foo bar")
assert agenda['related_entities'] == []
agenda.add_committee(committee='Hello, World', note='host')
e.validate()
def test_add_bill():
e = event_obj()
agenda = e.add_agenda_item("foo bar")
assert agenda['related_entities'] == []
agenda.add_bill(bill='HB 101', note='consideration')
e.validate()
def test_add_document():
e = event_obj()
assert e.documents == []
e.add_document(note='hello', url='http://example.com', media_type="text/html")
assert len(e.documents) == 1
o = e.documents[0]
assert o['note'] == 'hello'
assert o['links'] == [{'url': 'http://example.com', 'media_type': 'text/html', 'text': ''}]
e.validate()
def test_participants():
e = event_obj()
e.add_participant('Committee of the Whole', type='committee', note='everyone')
assert len(e.participants) == 1
assert e.participants[0]['name'] == 'Committee of the Whole'
assert e.participants[0]['entity_type'] == 'committee'
assert e.participants[0]['note'] == 'everyone'
# and add_person, which is a shortcut
e.add_person('<NAME>')
assert len(e.participants) == 2
assert e.participants[1]['name'] == '<NAME>'
assert e.participants[1]['entity_type'] == 'person'
assert e.participants[1]['note'] == 'participant'
def test_set_location():
e = event_obj()
e.set_location('North Pole', note='it is cold here', url='https://www.northpole.com',
coordinates={'latitude': '90.0000', 'longitude': '0.0000'})
assert e.location.get('name') == 'North Pole'
assert e.location.get('note') == 'it is cold here'
assert e.location.get('url') == 'https://www.northpole.com'
assert e.location.get('coordinates').get('latitude') == '90.0000'
assert e.location.get('coordinates').get('longitude') == '0.0000'
e.validate()
def test_add_media():
e = event_obj()
name = "<NAME>"
a = e.add_agenda_item(description='foo')
a.add_media_link(note=name, url="http://pault.ag", media_type="text/html")
a.add_media_link(note=name, url="ftp://pault.ag", media_type="text/plain")
e.validate()
assert len(e.agenda[0]['media']) == 1
assert len(e.agenda[0]['media'][0]['links']) == 2
e.add_media_link(note=name, url="http://pault.ag", media_type="text/html")
e.add_media_link(note=name, url="ftp://pault.ag", media_type="text/plain")
e.validate()
assert len(e.media) == 1
assert len(e.media[0]['links']) == 2
| StarcoderdataPython |
113427 | <reponame>coderdq/vuetest<filename>WEB21-1-12/WEB2/power/migrations/0001_initial.py
# Generated by Django 2.2 on 2020-10-15 01:02
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='POWER',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fsvip', models.CharField(max_length=20)),
('fsvoffset', models.CharField(max_length=3)),
('zvlip', models.CharField(max_length=20)),
('zvloffset', models.CharField(max_length=3)),
('smbvip', models.CharField(max_length=20)),
('powerdir', models.CharField(max_length=255)),
('band', models.CharField(max_length=3)),
('zvlused', models.CharField(max_length=2)),
('dl', models.CharField(max_length=20)),
('lowtemp', models.SmallIntegerField(null=True)),
('normtemp', models.SmallIntegerField(null=True)),
('hightemp', models.SmallIntegerField(null=True)),
('port', models.SmallIntegerField(null=True)),
('period', models.SmallIntegerField(null=True)),
],
),
]
| StarcoderdataPython |
1648105 | import requests
from .builder import AmazonRequestBuilder
from .response import (
AmazonItemSearchResponse,
AmazonItemLookupResponse,
AmazonSimilarityLookupResponse,
)
class AmazonProductAPI(object):
def __init__(self, access_key, secret_key, associate_tag):
self.access_key = access_key
self.secret_key = secret_key
self.associate_tag = associate_tag
self.request_builder = AmazonRequestBuilder(access_key, secret_key, associate_tag)
def _base_params(self, search_index=None, response_groups=None, parameters=None):
if parameters is None:
parameters = {}
default_params = {
'Service': 'AWSECommerceService',
'AWSAccessKeyId': self.access_key,
'AssociateTag': self.associate_tag,
'ResponseGroup': 'Images,ItemAttributes',
'Version': '2013-08-01'
}
if response_groups:
default_params['ResponseGroup'] = ','.join(response_groups)
if search_index:
default_params['SearchIndex'] = search_index
parameters.update(default_params)
return parameters
def item_search(self, search_index, keywords=None, page=None, response_groups=None, parameters=None):
params = self._base_params(search_index, response_groups, parameters)
params['Operation'] = 'ItemSearch'
if keywords:
params['Keywords'] = ','.join(keywords)
if page:
params['ItemPage'] = page
req_url = self.request_builder.build_request_url(params)
response = self._make_get_request(req_url, AmazonItemSearchResponse)
return response
def item_lookup(self, item_id, id_type='ASIN', search_index=None, response_groups=None, parameters=None):
params = self._base_params(search_index, response_groups, parameters)
params['Operation'] = 'ItemLookup'
params['ItemId'] = item_id
params['IdType'] = id_type
req_url = self.request_builder.build_request_url(params)
response = self._make_get_request(req_url, AmazonItemLookupResponse)
return response
def similarity_lookup(self, asins, response_groups=None, parameters=None):
params = self._base_params(response_groups=response_groups, parameters=parameters)
if not isinstance(asins, (list,)):
asins = [asins]
params['Operation'] = 'SimilarityLookup'
params['ItemId'] = ','.join(asins)
req_url = self.request_builder.build_request_url(params)
response = self._make_get_request(req_url, AmazonSimilarityLookupResponse)
return response
def _make_get_request(self, req_url, response_class):
req = requests.get(req_url)
response = response_class(req)
return response
| StarcoderdataPython |
1788790 | <filename>05_Practice1/Step03/5_3_sang.py<gh_stars>0
a = sorted(map(int, input().split()))
print(a[1])
| StarcoderdataPython |
1643070 | """Tests using pytest_resilient_circuits"""
# -*- coding: utf-8 -*-
# Copyright © IBM Corporation 2010, 2019
from __future__ import print_function, unicode_literals
import pytest
from resilient_circuits.util import get_config_data, get_function_definition
from resilient_circuits import SubmitTestFunction, FunctionResult
from test_helper import TasksResilientMock
PACKAGE_NAME = "fn_task_utils"
FUNCTION_NAME = "task_utils_create"
# Read the default configuration-data section from the package
config_data = get_config_data(PACKAGE_NAME)
# Provide a simulation of the Resilient REST API (uncomment to connect to a real appliance)
resilient_mock = TasksResilientMock
def call_task_utils_create_function(circuits, function_params, timeout=10):
# Fire a message to the function
evt = SubmitTestFunction("task_utils_create", function_params)
circuits.manager.fire(evt)
# circuits will fire an "exception" event if an exception is raised in the FunctionComponent
# return this exception if it is raised
exception_event = circuits.watcher.wait("exception", parent=None)
if exception_event is not False:
exception = exception_event.args[1]
raise exception
# else return the FunctionComponent's results
else:
event = circuits.watcher.wait("task_utils_create_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
class TestTskUtilsCreate:
""" Tests for the tsk_utils_create function"""
def test_function_definition(self):
""" Test that the package provides customization_data that defines the function """
func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)
assert func is not None
@pytest.mark.parametrize("incident_id, task_name, task_utils_payload", [
(123, "New Task Name", {"type": "text", "content": '{\n"required": false\n}'}),
(123, "My New Task", {"type": "text", "content": '{\n"required": false\n}'})
])
def test_success(self, circuits_app, incident_id, task_name, task_utils_payload):
""" Test calling with sample values for the parameters """
function_params = {
"incident_id": incident_id,
"task_name": task_name,
"task_utils_payload": task_utils_payload
}
results = call_task_utils_create_function(circuits_app, function_params)
assert(results["content"]["task"])
@pytest.mark.parametrize("incident_id, task_name, task_utils_payload", [
(123, "Η Θ Ι Κ Λ Μ Ν Ξ Ο Π Ρ", {"type": "text", "content": '{\n"required": false\n}'}),
(123, " Й К Л М Н О П Р С Т ", {"type": "text", "content": '{\n"required": false\n}'})
])
def test_success_unicode(self, circuits_app, incident_id, task_name, task_utils_payload):
""" Test calling with sample values for the parameters """
function_params = {
"incident_id": incident_id,
"task_name": task_name,
"task_utils_payload": task_utils_payload
}
results = call_task_utils_create_function(circuits_app, function_params)
assert (results["content"]["task"])
assert function_params["task_name"] == results["content"]["task"]["name"]
@pytest.mark.parametrize("incident_id, task_name, task_utils_payload", [
(123, "text", {"type": "text", "content": '{\n"required": false\n}'}),
])
def test_owner_as_email_user(self, circuits_app, incident_id, task_name, task_utils_payload):
""" Test calling with sample values for the parameters """
function_params = {
"incident_id": incident_id,
"task_name": task_name,
"task_utils_payload": task_utils_payload
}
results = call_task_utils_create_function(circuits_app, function_params)
assert (results["content"]["task"])
@pytest.mark.parametrize("incident_id, task_name, task_utils_payload", [
(123, "text", {"type": "text", "content": '{\n"required": false\n}'}),
(123, "text", {"type": "text", "content": '{\n"required": false\n}'})
])
def test_owner_as_user_id(self, circuits_app, incident_id, task_name, task_utils_payload):
""" Test calling with sample values for the parameters """
function_params = {
"incident_id": incident_id,
"task_name": task_name,
"task_utils_payload": task_utils_payload
}
results = call_task_utils_create_function(circuits_app, function_params)
assert results["content"]["task"]
| StarcoderdataPython |
178297 | from credmark.cmf.model import Model
@Model.describe(
slug='contrib.neilz',
display_name='An example of a contrib model',
description="This model exists simply as an example of how and where to \
contribute a model to the Credmark framework",
version='1.0',
developer='neilz.eth',
output=dict
)
class MyModel(Model):
def run(self, input):
return {
"credmarkFounder": "Neil",
"message": "You are a modeler. Thank you modeler."
}
| StarcoderdataPython |
1616735 | # -*- coding=utf-8 -*-
from flask.ext.wtf import Form
from ..models import Category
from wtforms import StringField, SubmitField, PasswordField, TextAreaField
from wtforms.validators import Required, length, Regexp, EqualTo
from wtforms.ext.sqlalchemy.fields import QuerySelectField
class LoginForm(Form):
username = StringField(u'帐号', validators=[Required(), length(6, 64)])
password = PasswordField(u'密码', validators=[Required()])
submit = SubmitField(u'登入')
class RegistrationForm(Form):
username = StringField(u'用户名', validators=[Required(), length(6, 18), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
u'用户名只允许字母, '
u'用户名不允许特殊符号')])
password = PasswordField(
u'密码', validators=[Required(), EqualTo('password2', message=u'密码错误提示1')])
password2 = PasswordField(u'<PASSWORD>', validators=[Required()])
real_name = StringField(u'昵称', validators=[Required()])
registerkey = StringField(u'注册码', validators=[Required()])
submit = SubmitField(u'注册')
class PostArticleForm(Form):
title = StringField(u'标题', validators=[Required(), length(6, 64)])
body = TextAreaField(u'内容', validators = [Required()])
category_id = QuerySelectField(u'分类', query_factory=lambda: Category.query.all(
), get_pk=lambda a: str(a.id), get_label=lambda a: a.name)
submit = SubmitField(u'发布')
class PostCategoryForm(Form):
name = StringField(u'分类名', validators=[Required(), length(2, 64)])
submit = SubmitField(u'发布')
| StarcoderdataPython |
4838089 | from __future__ import annotations
from unittest import TestCase
from jsonclasses.exceptions import ValidationException
from tests.classes.simple_config_user import SimpleConfigUser
from tests.classes.simple_folder import SimpleFolder
from tests.classes.simple_node import SimpleNode
from tests.classes.simple_shape_setting import SimpleShapeSetting
from tests.classes.simple_shorthand_setting import SimpleShorthandSetting
from tests.classes.default_shape_value import DefaultShapeValue
class TestShape(TestCase):
def test_shape_accepts_shorthand_types(self):
setting = SimpleShapeSetting()
setting.email['auto_send'] = '5'
setting.email['receive_promotion'] = '5'
self.assertRaisesRegex(ValidationException,
"Value '5' at 'email.auto_send' should be "
"bool.",
setting.validate)
def test_shape_accepts_optional_shorthand_types(self):
setting = SimpleShorthandSetting()
setting.email['auto_send'] = '5'
setting.email['receive_promotion'] = '5'
self.assertRaisesRegex(ValidationException,
"Value '5' at 'email.auto_send' should be "
"bool.",
setting.validate)
setting = SimpleShorthandSetting()
setting.validate()
def test_shape_accepts_typed_dict_type(self):
value = DefaultShapeValue(
settings={'ios': '2', 'android': '2', 'name': '4'})
self.assertRaisesRegex(
ValidationException,
"Value '2' at 'settings.ios' should be bool.",
value.validate)
def test_shape_validates_inner_fields(self):
user = SimpleConfigUser(config={})
self.assertRaisesRegex(ValidationException,
'\'config\\.ios\' should not be None',
user.validate)
def test_shape_doesnt_raise_on_validation_if_inner_fields_are_ok(self):
user = SimpleConfigUser(config={'ios': True, 'android': True})
user.validate()
def test_shape_assigns_none_on_inner_fields_for_accessing(self):
user = SimpleConfigUser(config={})
self.assertEqual(user.config, {'ios': None, 'android': None})
# TODO: make shape work according to strict settings
def test_shape_sanitizes_input(self):
user = SimpleConfigUser(config={'haha': True, 'android': False})
self.assertEqual(user.config, {'ios': None, 'android': False})
def test_shape_underscores_keys_on_init(self):
folder = SimpleFolder(
config={'displaySize': True, 'displayDate': False})
self.assertEqual(folder.config,
{'display_size': True, 'display_date': False})
def test_shape_doesnt_underscore_keys_on_init_if_specified(self):
node = SimpleNode(
config={'displaySize': True, 'display_Date': False})
self.assertEqual(node.config,
{'display_size': None, 'display_date': None})
def test_shape_camelizes_keys_on_tojson(self):
folder = SimpleFolder(
config={'displaySize': True, 'displayDate': False})
self.assertEqual(folder.tojson()['config'],
{'displaySize': True, 'displayDate': False})
def test_shape_doesnt_camelize_keys_on_tojson(self):
node = SimpleNode(
config={'display_size': True, 'display_date': False})
self.assertEqual(node.tojson()['config'],
{'display_size': True, 'display_date': False})
def test_shape_produce_validation_error_message_for_one_item(self):
node = SimpleNode(config={})
with self.assertRaises(ValidationException) as context:
node.validate()
exception = context.exception
self.assertEqual(len(exception.keypath_messages), 1)
self.assertEqual(exception.keypath_messages['config.display_size'],
"Value at 'config.display_size' should not be None.")
def test_strict_shape_raises_if_key_is_not_allowed(self):
with self.assertRaisesRegex(ValidationException,
"Unallowed key 'a' at 'config'\\."):
SimpleFolder(config={'a': True, 'b': False})
| StarcoderdataPython |
193204 | import numpy as np
import pytest
from ansys.dpf import core as dpf
from ansys.dpf.core import examples
@pytest.fixture()
def local_server():
try :
for server in dpf._server_instances :
if server() != dpf.SERVER:
server().info #check that the server is responsive
return server()
return dpf.start_local_server(as_global = False)
except:
return dpf.start_local_server(as_global = False)
@pytest.fixture()
def static_models(local_server):
otherfile = dpf.upload_file_in_tmp_folder(examples.static_rst, server=local_server)
return (dpf.Model(examples.static_rst), dpf.Model(otherfile, server=local_server))
@pytest.fixture()
def transient_models(local_server):
otherfile = dpf.upload_file_in_tmp_folder(examples.msup_transient, server=local_server)
return (dpf.Model(examples.msup_transient), dpf.Model(otherfile, server=local_server))
@pytest.fixture()
def cyc_models(local_server):
otherfile = dpf.upload_file_in_tmp_folder(examples.simple_cyclic, server=local_server)
return (dpf.Model(examples.simple_cyclic), dpf.Model(otherfile, server=local_server))
@pytest.fixture()
def all_kind_of_complexity_models(local_server):
otherfile = dpf.upload_file_in_tmp_folder(examples.download_all_kinds_of_complexity(), server=local_server)
return (dpf.Model(examples.download_all_kinds_of_complexity()), dpf.Model(otherfile, server=local_server))
def test_different_multi_server(static_models):
assert static_models[0]._server != static_models[1]._server
assert not static_models[0]._server == static_models[1]._server
assert static_models[0]._server.port != static_models[1]._server.port
assert static_models[0].metadata.data_sources.result_files[0]!=static_models[1].metadata.data_sources.result_files[0]
def test_model_time_freq_multi_server(static_models):
tf = static_models[0].metadata.time_freq_support
tf2 = static_models[1].metadata.time_freq_support
assert tf.time_frequencies.shape == tf2.time_frequencies.shape
assert tf.time_frequencies.size == tf2.time_frequencies.size
assert np.allclose(tf.time_frequencies.data,tf2.time_frequencies.data)
assert np.allclose(tf.time_frequencies.scoping.ids,tf2.time_frequencies.scoping.ids)
assert tf.n_sets == tf2.n_sets
assert tf.get_frequency(0,0) == tf2.get_frequency(0,0)
assert tf.get_cumulative_index(0,0) == tf2.get_cumulative_index(0,0)
# make sure that after starting a first local server we are still using
# 2 different servers
def test_different_multi_server2(static_models):
assert static_models[0]._server != static_models[1]._server
assert not static_models[0]._server == static_models[1]._server
assert static_models[0]._server.port != static_models[1]._server.port
assert static_models[0].metadata.data_sources.result_files[0]!=static_models[1].metadata.data_sources.result_files[0]
def test_model_mesh_multi_server(static_models):
mesh = static_models[0].metadata.meshed_region
mesh2 = static_models[1].metadata.meshed_region
assert mesh.unit == mesh2.unit
assert mesh.available_named_selections == mesh2.available_named_selections
assert np.allclose(mesh.named_selection(mesh.available_named_selections[0]).ids, mesh2.named_selection(mesh2.available_named_selections[0]).ids)
elements = mesh.elements
elements2 = mesh2.elements
assert np.allclose(elements.scoping.ids, elements2.scoping.ids)
assert np.allclose(elements.element_types_field.data, elements2.element_types_field.data)
assert np.allclose(elements.connectivities_field.data, elements2.connectivities_field.data)
assert np.allclose(elements.materials_field.data, elements2.materials_field.data)
assert elements.n_elements == elements2.n_elements
assert elements.has_shell_elements == elements2.has_shell_elements
assert elements.has_solid_elements == elements2.has_solid_elements
assert elements.has_beam_elements == elements2.has_beam_elements
assert elements.has_point_elements == elements2.has_point_elements
nodes = mesh.nodes
nodes2 = mesh2.nodes
assert np.allclose(nodes.scoping.ids, nodes2.scoping.ids)
def test_model_result_info_multi_server(static_models):
result_info = static_models[0].metadata.result_info
result_info2 = static_models[1].metadata.result_info
assert result_info.analysis_type == result_info2.analysis_type
assert result_info.physics_type == result_info2.physics_type
assert result_info.unit_system == result_info2.unit_system
assert result_info.cyclic_symmetry_type == result_info2.cyclic_symmetry_type
assert result_info.has_cyclic == result_info2.has_cyclic
available_results = result_info.available_results
available_results2 = result_info2.available_results
for i,res in enumerate(available_results):
assert res.name == available_results2[i].name
assert res.n_components == available_results2[i].n_components
assert res.dimensionality == available_results2[i].dimensionality
assert res.homogeneity == available_results2[i].homogeneity
assert res.unit == available_results2[i].unit
assert res.operator_name == available_results2[i].operator_name
assert res.sub_results == available_results2[i].sub_results
def test_model_cyc_support_multi_server(cyc_models):
result_info = cyc_models[0].metadata.result_info
result_info2 = cyc_models[1].metadata.result_info
assert result_info.has_cyclic == result_info2.has_cyclic
assert result_info.cyclic_symmetry_type == result_info2.cyclic_symmetry_type
cyc_support = result_info.cyclic_support
cyc_support2 = result_info2.cyclic_support
assert cyc_support.num_stages == cyc_support2.num_stages
assert cyc_support.num_sectors() == cyc_support2.num_sectors()
assert cyc_support.base_nodes_scoping().ids == cyc_support2.base_nodes_scoping().ids
assert cyc_support.base_elements_scoping().ids == cyc_support2.base_elements_scoping().ids
assert cyc_support.sectors_set_for_expansion().ids == cyc_support2.sectors_set_for_expansion().ids
assert cyc_support.expand_node_id(1).ids == cyc_support2.expand_node_id(1).ids
assert cyc_support.expand_element_id(1).ids == cyc_support2.expand_element_id(1).ids
assert cyc_support.expand_node_id(1,cyc_support.sectors_set_for_expansion()).ids == cyc_support2.expand_node_id(1,cyc_support2.sectors_set_for_expansion()).ids
assert cyc_support.expand_element_id(1,cyc_support.sectors_set_for_expansion()).ids == cyc_support2.expand_element_id(1,cyc_support2.sectors_set_for_expansion()).ids
def test_model_displacement_multi_server(transient_models):
tf = transient_models[0].metadata.time_freq_support
time_scoping = range(1, len(tf.time_frequencies)+1)
disp = transient_models[0].results.displacement()
disp.inputs.time_scoping(time_scoping)
disp2 = transient_models[1].results.displacement()
disp2.inputs.time_scoping(time_scoping)
fc = disp.outputs.fields_container()
fc2 = disp2.outputs.fields_container()
for i,f in enumerate(fc):
assert fc.get_label_space(i)==fc2.get_label_space(i)
ftocheck = fc2[i].deep_copy()
iden = dpf.operators.logic.identical_fields(f,ftocheck)
assert iden.outputs.boolean()
assert np.allclose(f.data, fc2[i].data)
assert np.allclose(f.scoping.ids, fc2[i].scoping.ids)
assert np.allclose(f.data, ftocheck.data)
assert np.allclose(f.scoping.ids, ftocheck.scoping.ids)
def check_fc(fc,fc2):
for i,f in enumerate(fc):
assert fc.get_label_space(i)==fc2.get_label_space(i)
ftocheck = fc2[i].deep_copy()
iden = dpf.operators.logic.identical_fields(f,ftocheck)
assert iden.outputs.boolean()
assert np.allclose(f.data, fc2[i].data)
assert np.allclose(f.scoping.ids, fc2[i].scoping.ids)
assert np.allclose(f.data, ftocheck.data)
assert np.allclose(f.scoping.ids, ftocheck.scoping.ids)
idenfc = dpf.operators.logic.identical_fc(fc,fc2.deep_copy())
assert idenfc.outputs.boolean()
def test_model_stress_multi_server(transient_models):
tf = transient_models[0].metadata.time_freq_support
time_scoping = range(1, len(tf.time_frequencies)+1)
disp = transient_models[0].results.stress()
disp.inputs.time_scoping(time_scoping)
disp2 = transient_models[1].results.stress()
disp2.inputs.time_scoping(time_scoping)
fc = disp.outputs.fields_container()
fc2 = disp2.outputs.fields_container()
check_fc(fc,fc2)
idenfc = dpf.operators.logic.identical_fc(fc.deep_copy(fc2._server),fc2, server =fc2._server )
assert idenfc.outputs.boolean()
def test_model_different_results_big_multi_server(all_kind_of_complexity_models):
tf = all_kind_of_complexity_models[0].metadata.time_freq_support
time_scoping = len(tf.time_frequencies)
results = all_kind_of_complexity_models[0].results
results2 = all_kind_of_complexity_models[1].results
op = results.displacement()
op.inputs.time_scoping(time_scoping)
op2 = results2.displacement()
op2.inputs.time_scoping(time_scoping)
fc = op.outputs.fields_container()
fc2 = op2.outputs.fields_container()
check_fc(fc,fc2)
op = results.stress()
op.inputs.time_scoping(time_scoping)
op2 = results2.stress()
op2.inputs.time_scoping(time_scoping)
fc = op.outputs.fields_container()
fc2 = op2.outputs.fields_container()
check_fc(fc,fc2)
op = results.elastic_strain()
op.inputs.time_scoping(time_scoping)
op2 = results2.elastic_strain()
op2.inputs.time_scoping(time_scoping)
fc = op.outputs.fields_container()
fc2 = op2.outputs.fields_container()
check_fc(fc,fc2)
op = results.elemental_volume()
op.inputs.time_scoping(time_scoping)
op2 = results2.elemental_volume()
op2.inputs.time_scoping(time_scoping)
fc = op.outputs.fields_container()
fc2 = op2.outputs.fields_container()
check_fc(fc,fc2)
| StarcoderdataPython |
1755055 | <filename>denoising.py
from numpy import *
import numpy as np
import scipy
import pywt
from statsmodels.robust import mad
import matplotlib.pyplot as plt
def waveletSmooth( x, wavelet="db4", level=1, title=None ):
# calculate the wavelet coefficients
# returns tuple [cA,cDn,...,cD1] one approx. and details coefficients
# use mode "periodization", or 'per', wavelet decompose(as for wavedec) the vector. len(cA)=len(cD) = len(x)/(2**n)
coeff = pywt.wavedec( x, wavelet, mode="per" )
# calculate a threshold
# mean absolute deviation of cD_level
sigma = mad( coeff[-level] )
#threshold = sigma*sqrt(2*log(n)/n)
uthresh = sigma * sqrt( 2*log(len( x )))
#apply soft threshold to cD_n
coeff[1:] = ( pywt.threshold( i, value=uthresh, mode="soft" ) for i in coeff[1:] )
# reconstruct the signal using the thresholded coefficients
y = pywt.waverec( coeff, wavelet, mode="per" )
# f, ax = plt.subplots()
# plt.plot( x, color="b", alpha=0.1 )
# plt.plot( y, color="b" )
# if title:
# ax.set_title(title)
# ax.set_xlim((0,len(y)))
return y[:-1]
# denoised = waveletSmooth(rawdata)
# wavelet = pywt.Wavelet('haar')
# levels = (np.floor(np.log(rawdata))).astype(int)
# WaveletCoeffs = pywt.wavedec2( rawdata, wavelet, level=levels)
# denoise = pywt.threshold(rawdata,1,'soft')
# plt.plot(denoise) | StarcoderdataPython |
1624089 | <reponame>krish8484/ITF1788<gh_stars>0
#
# ITF1788
#
# Interval Test Framework for IEEE 1788 Standard for Interval Arithmetic
#
#
# Copyright 2014
#
# <NAME> (<EMAIL>)
# <NAME> (<EMAIL>)
#
# Department of Computer Science
# University of Wuerzburg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import dslparser
from . import testAST
from . import discovery
from . import lang
import os
import optparse
import re
from ast import literal_eval as makeTuple
import time
import ntpath
import textwrap
# inherit from OptionParser to overwrite format_epilog method
# this allows properly formatted multiline strings in help messages
class ConsoleParser(optparse.OptionParser):
'''Handles the arguments passed to the program.'''
def __init__(self):
'''
Define the valid options.
'''
super(ConsoleParser, self).__init__()
self.add_option("-s", "--sourceDirectory", dest="sourceDir",
# TODO: check for operating systems other than linux
default='itl',
help="Directory with DSL tests")
self.add_option("-f", "--fileRegex", dest="fileRegex",
default='.*',
help="Regex for names of the desired DSL tests")
self.add_option("-c", "--configurations", dest="configurations",
help="""Specify the plugin configurations.""")
self.add_option("-o", "--outputDirectory", dest="outDir",
default="output",
help="Output directory for generated files")
self.add_option("-v", "--verbose", action="store_true",
dest="verbose")
def processConsoleParameters(self):
'''
Read the passed arguments and process them.
'''
(options, args) = self.parse_args()
self._checkSrcDir(options)
self._checkFileRegex(options)
self.specList = self._buildSpecList(options)
self.testFiles = self._buildTestFileList(options)
self.outDir = options.outDir
self.verbose = options.verbose
def _buildSpecList(self, options):
'''
Use the passed arguments and build a list of he configurations that
shall be generated.
'''
# if no specific configurations are given, generate tests for all
# configurations
if options.configurations is None or options.configurations == '*':
return discovery.getSpecList()
# regexes that are used to interpret the 'configurations'
# console parameter
identRegex = r"'[a-zA-Z](\s?[a-zA-Z0-9_])*'"
wildcardRegex = r"'\*'"
specListRegex = r"\s*\(\s*.+\s*,\s*.+\s*,\s*.+\s*\)\s*" + \
r"(\s*;\s*\(\s*.+\s*,\s*.+\s*,\s*.+\s*\)\s*)*"
LTARegex = r"\s*\(\s*" + identRegex + r"\s*,\s*" + identRegex + \
r"\s*,\s*" + identRegex + r"\s*\)\s*"
LTWRegex = r"\s*\(\s*" + identRegex + r"\s*,\s*" + identRegex + \
r"\s*,\s*" + wildcardRegex + r"\s*\)\s*"
LWARegex = r"\s*\(\s*" + identRegex + r"\s*,\s*" + wildcardRegex + \
r"\s*,\s*" + identRegex + r"\s*\)\s*"
LWWRegex = r"\s*\(\s*" + identRegex + r"\s*,\s*" + wildcardRegex + \
r"\s*,\s*" + wildcardRegex + r"\s*\)\s*"
if re.match(specListRegex, options.configurations):
# use configurations list parameter
tmp = options.configurations.split(';')
# enclose tuple values in quotation marks, required for
# the makeTuple method
for i in range(0, len(tmp)):
conf = tmp[i].strip()
# remove parentheses and leading/trailing whitespace
conf = conf[1:][:-1].strip()
# get tuple values and remove leading/trailing whitespace
vals = [v.strip() for v in conf.split(',')]
# add quotation marks and concat again
conf = '(' + ','.join(["'" + v + "'" for v in vals]) + ')'
# replace original config
tmp[i] = conf
try:
tmp = [makeTuple(el.strip()) for el in tmp]
valid = True
except:
valid = False
if not valid:
raise IOError('Invalid specification list:' +\
options.configurations +\
'\nRun "python3 -m itf1788 --help" to see the\
correct syntax.')
specList = []
for spec in tmp:
if re.match(LTARegex, str(spec)):
specList += [spec]
elif re.match(LTWRegex, str(spec)):
specList += \
discovery.getSpecListByLanguageAndTestLibrary(spec[0],
spec[1])
elif re.match(LWARegex, str(spec)):
specList += \
discovery.getSpecListByLanguageAndArithmeticLibrary(spec[0],
spec[2])
elif re.match(LWWRegex, str(spec)):
specList += discovery.getSpecListByLanguage(spec[0])
else:
raise IOError('Invalid configurations specification: ' +
options.configurations)
return specList
def _buildTestFileList(self, options):
'''
Assemble a list of all ITL tests corresponding to the passed
parameter
'''
testFiles = [os.path.join(options.sourceDir, f)
for f in os.listdir(options.sourceDir)
if os.path.isfile(os.path.join(options.sourceDir,
f))
if re.match(options.fileRegex, f)]
testFiles = list(filter(lambda f: f.endswith('.itl'), testFiles))
return testFiles
def _checkSrcDir(self, options):
'''
Check if the path to the source directory is well formed
'''
if not os.path.isdir(options.sourceDir):
raise IOError('Invalid source directory: ' +\
options.sourceDir)
def _checkFileRegex(self, options):
'''
Check if the file regex is valid
'''
try:
re.compile(options.fileRegex)
valid = True
except:
valid = False
if not valid:
raise IOError('Invalid file regex: ' + options.fileRegex)
def format_epilog(self, formatter):
'''
Help message.
'''
return textwrap.dedent("""
Examples:
-- print help
python3 -m itf1788 -h
-- run ITF1788 with default source and output folder
python3 -m itf1788
-- use verbose output
python3 -m itf1788 -v
-- generate tests for all source files in "itl" and
all configurations
python3 -m itf1788 -s "itl"
-- like above, but use only test files whose name starts
with 'test'
python3 -m itf1788 -s "itl" -f "test.*"
-- use output directory "output"
python3 -m itf1788 -s "itl" -o "output"
In the following examples, a wildcard denotes that every
available option for that position shall be used.
-- generate tests for C++ only
python3 -m itf1788 -s "itl" -c "(cpp, *, *)"
-- generate tests for C++ and BOOST Test Library only
python3 -m itf1788 -s "itl" -c "(cpp, BOOST, *)"
--generate tests for C++ and libieeep1788 only
python3 -m itf1788 -s "itl" -c "(cpp, *, libieeep1788)"
--generate tests for C++, BOOST Test library and libieee1788
python3 -m itf1788 -s "itl" -c "(cpp, BOOST, libieee1788)"
--generate tests for C++ and Octave only
python3 -m itf1788 -s "itl" -c "(cpp, *, *); (octave, *, *)"
""")
def main():
# measure run time
startTime = time.clock()
# parse console parameters
optParser = ConsoleParser()
optParser.processConsoleParameters()
specList = optParser.specList
testFiles = optParser.testFiles
outDir = optParser.outDir
# Assemble source files
for testfile in testFiles:
# parse the current ITL file and build an abstract syntax tree
ast = dslparser.parse(testfile)
# iterate over configurations
for language, testlib, arithlib in specList:
spec = discovery.getSpecification(language, testlib, arithlib)
out = lang.OutputSpecification(spec[0], spec[1], spec[2])
cbPath = discovery.getCbPath(language)
# paths to output directory and output file
writeDir = '/'.join([outDir, language, testlib, arithlib])
writeFile = '.'.join(ntpath.basename(testfile).split('.')[:-1]) + \
out.lang_extension
if optParser.verbose:
print('Generating', writeFile, 'for specification',
str((language, testlib, arithlib)), '...')
# generate output content by visiting the AST
v = testAST.ASTVisitor(out, cbPath)
(content, warnings) = ast.accept(v)
# create output directory if it does not exist
if not os.path.exists(writeDir):
os.makedirs(writeDir)
# write content
open(writeDir + '/' + writeFile, 'w+').write(content)
if optParser.verbose:
for warn in warnings:
print(warn)
print('Done.\n')
endTime = time.clock()
if optParser.verbose:
print('-'*80)
print('Generated output for', len(testFiles), 'testfiles in',
"%.2f" % round(endTime - startTime, 2), 'seconds.')
# Run main method if this script is called directly
if __name__ == '__main__':
main()
| StarcoderdataPython |
3375198 | <filename>jupyterlab_extension/services.py
# -*- coding: utf-8 -*-
import json
import os
import re
import requests
from unicodedata import normalize
DATASETS_ENDPOINT = os.getenv("DATASETS_ENDPOINT", "http://datasets.platiagro:8080")
PROJECTS_ENDPOINT = os.getenv("PROJECTS_ENDPOINT", "http://projects.platiagro:8080")
def find_task_by_name(os_path):
"""
Get the task name from the notebook using the PlatIAgro Projects API.
Parameters
----------
os_path : str
Returns
-------
dict
The task_id.
Raises
------
ConnectionError
When the request did not succeed.
HTTPError
When the request did not succeed.
"""
# only do this for Experiment.ipynb
match = re.search(r"/tasks/(.*?)/Experiment.ipynb", os_path)
if match:
task_name = match.group(1)
params = {'name': task_name}
r = requests.get(f"{PROJECTS_ENDPOINT}/tasks", params=params)
r.raise_for_status()
tasks = r.json()['tasks']
if len(tasks) > 0:
for task in tasks:
if task['name'] == task_name:
return task["uuid"]
def update_task(task_id, parameters=None) -> dict:
"""
Updates a task from notebook using PlatIAgro Projects API.
Parameters
----------
task_id : str
parameters : dict
Returns
-------
dict
The task details.
Raises
------
ConnectionError
When the request did not succeed.
HTTPError
When the request did not succeed.
"""
json = {}
if parameters:
json["parameters"] = parameters
r = requests.patch(f"{PROJECTS_ENDPOINT}/tasks/{task_id}", json=json)
r.raise_for_status()
return r.json()
def create_dataset(file: bytes, filename: str = "file") -> dict:
"""Creates a dataset from a CSV file using PlatIAgro Datasets API.
Args:
file (bytes): file object.
filename (str, optional): filename. Defaults to "file".
Returns:
dict: The dataset details: name, columns, and filename.
Raises:
ConnectionError: When the request did not succeed.
HTTPError: When the request did not succeed.
"""
files = {"file": (filename, file)}
r = requests.post(f"{DATASETS_ENDPOINT}/datasets", files=files)
r.raise_for_status()
return r.json()
def generate_name(filename: str, attempt: int = 1, path: str = "/tmp/data") -> str:
"""Generates a dataset name from a given filename.
Args:
filename (str): source filename.
path (str): path to check if name exist.
attempt (int, optional): the current attempt of generating a new name.
Returns:
str: new generated dataset name.
"""
name = normalize('NFKD', filename) \
.encode('ASCII', 'ignore') \
.replace(b' ', b'-') \
.decode()
if attempt > 1:
name, extension = os.path.splitext(name)
name = f"{name}-{attempt}{extension}"
try:
os.makedirs("/tmp/data", exist_ok=True)
open(f"{path}/{name}")
except FileNotFoundError:
return name
return generate_name(filename, attempt + 1)
def create_dataset_locally(file: bytes, filename: str = "file", name: str = "file", path: str = "/tmp/data"):
"""Creates a dataset from a CSV and writes locally.
Args:
file (bytes): file object content.
filename (str, optional): original file name. Defaults to "file".
name (str, optional): new file name generated by datasets API. Defaults to "file".
path (str, optional): path to be writen. Defaults to "/tmp/data".
Raises:
OSError: When the writing did not succeed.
"""
try:
os.makedirs("/tmp/data", exist_ok=True)
with open(f"{path}/{name}", 'wb') as csv_file:
csv_file.write(file)
return {"filename": filename, "name": name}
except OSError as e:
print(e)
def parse_parameters(experiment_notebook):
"""
Parses and returns the parameters declared in a notebook.
Parameters
----------
experiment_notebook : dict
Returns
-------
list:
A list of parameters (name, default, type, label, description).
"""
parameters = []
cells = experiment_notebook.get("cells", [])
for cell in cells:
cell_type = cell["cell_type"]
tags = cell["metadata"].get("tags", [])
if cell_type == "code" and "parameters" in tags:
source = cell["source"]
parameters.extend(read_parameters_from_source(source))
return parameters
def read_parameters_from_source(source):
"""
Lists the parameters declared in source code.
Parameters
----------
source : list
Source code lines.
Returns
-------
list:
A list of parameters (name, default, type, label, description).
"""
parameters = []
# Regex to capture a parameter declaration
# Inspired by Google Colaboratory Forms
# Example of a parameter declaration:
# name = "value" #@param ["1st option", "2nd option"] {type:"string", label:"Foo Bar", description:"Foo Bar"}
pattern = re.compile(r"^(\w+)\s*=\s*(.+)\s+#@param(?:(\s+\[.*\]))?(\s+\{.*\})")
for line in source:
match = pattern.search(line)
if match:
try:
name = match.group(1)
default = match.group(2)
options = match.group(3)
metadata = match.group(4)
parameter = {"name": name}
if default and default != "None":
if default in ["True", "False"]:
default = default.lower()
parameter["default"] = json.loads(default)
if options:
parameter["options"] = json.loads(options)
# adds quotes to metadata keys
metadata = re.sub(r"(\w+):", r'"\1":', metadata)
parameter.update(json.loads(metadata))
parameters.append(parameter)
except json.JSONDecodeError:
pass
return parameters
| StarcoderdataPython |
3856 | _base_ = '../faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py'
model = dict(
backbone=dict(
num_stages=4,
#frozen_stages=4
),
roi_head=dict(
bbox_head=dict(
num_classes=3
)
)
)
dataset_type = 'COCODataset'
classes = ('luchs', 'rotfuchs', 'wolf')
data = dict(
train=dict(
img_prefix='raubtierv2a/train/',
classes=classes,
ann_file='raubtierv2a/train/_annotations.coco.json'),
val=dict(
img_prefix='raubtierv2a/valid/',
classes=classes,
ann_file='raubtierv2a/valid/_annotations.coco.json'),
test=dict(
img_prefix='raubtierv2a/test/',
classes=classes,
ann_file='raubtierv2a/test/_annotations.coco.json'))
#optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) #original (8x2=16)
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) #(4x2=8) 4 GPUs
#optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001) #(1x2=2)
total_epochs=24
evaluation = dict(classwise=True, interval=1, metric='bbox')
work_dir = '/media/storage1/projects/WilLiCam/checkpoint_workdir/raubtierv2a/faster_rcnn_x101_64x4d_fpn_1x_raubtierv2a_nofreeze_4gpu'
#http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco/faster_rcnn_x101_64x4d_fpn_1x_coco_20200204-833ee192.pth
load_from = 'checkpoints/faster_rcnn_x101_64x4d_fpn_1x_coco_20200204-833ee192.pth'
| StarcoderdataPython |
1736612 | <filename>human_agent.py
from agent import Agent
class HumanAgent(Agent):
# overriding abstract method
def reinforce_owned_territory(self, state):
territory_name = input('Reinforce owned territory: ').strip()
return state.board.territories[territory_name]
# overriding abstract method
def reinforce_neutral_territory(self, state):
territory_name = input('Reinforce neutral territory: ').strip()
return state.board.territories[territory_name]
# overriding abstract method
def defend_territory(self, state, attacked_territory):
troop_count = int(input('Troops committed to defend ' + attacked_territory + ': '))
return troop_count
# overriding abstract method
def wants_to_attack(self, state):
wants_to_attack = input('Attack this turn (Y-Yes, other-No): ').strip()
if wants_to_attack == 'Y':
return True
else:
return False
# overriding abstract method
def select_attack_source(self, state):
territory_name = input('Attack an enemy territory from: ').strip()
return state.board.territories[territory_name]
# overriding abstract method
def select_attack_target(self, state, source):
territory_name = input('Attack enemy territory: ').strip()
return state.board.territories[territory_name]
# overriding abstract method
def select_attack_count(self, state, source):
troop_count = int(input('Troops committed to attack: '))
return troop_count
# overriding abstract method
def wants_to_fortify(self, state):
wants_to_fortify = input('Fortify this turn (Y-Yes, other-No): ').strip()
if wants_to_fortify == 'Y':
return True
else:
return False
# overriding abstract method
def select_fortify_source(self, state, target):
from_territory_name = input(f'Fortify {target} from territory: ').strip()
return state.board.territories[from_territory_name]
# overriding abstract method
def select_fortify_target(self, state):
to_territory_name = input('Fortify target territory: ').strip()
return state.board.territories[to_territory_name]
# overriding abstract method
def select_fortify_count(self, state, source):
troop_count = int(input('Move this many troops: '))
return troop_count
| StarcoderdataPython |
4808163 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import NamedTuple
def automl_create_model_for_tables(
gcp_project_id: str,
gcp_region: str,
dataset_display_name: str,
api_endpoint: str = None,
model_display_name: str = None,
model_prefix: str = 'bwmodel',
optimization_objective: str = None,
include_column_spec_names: list = None,
exclude_column_spec_names: list = None,
train_budget_milli_node_hours: int = 1000,
) -> NamedTuple('Outputs', [('model_display_name', str), ('model_name', str), ('model_id', str)]):
import subprocess
import sys
# we could build a base image that includes these libraries if we don't want to do
# the dynamic installation when the step runs.
subprocess.run([sys.executable, '-m', 'pip', 'install', 'googleapis-common-protos==1.6.0', '--no-warn-script-location'],
env={'PIP_DISABLE_PIP_VERSION_CHECK': '1'}, check=True)
subprocess.run([sys.executable, '-m', 'pip', 'install', 'google-cloud-automl==0.9.0', '--quiet', '--no-warn-script-location'],
env={'PIP_DISABLE_PIP_VERSION_CHECK': '1'}, check=True)
import google
import logging
from google.api_core.client_options import ClientOptions
from google.cloud import automl_v1beta1 as automl
import time
logging.getLogger().setLevel(logging.INFO) # TODO: make level configurable
# TODO: we could instead check for region 'eu' and use 'eu-automl.googleapis.com:443'endpoint
# in that case, instead of requiring endpoint to be specified.
if api_endpoint:
client_options = ClientOptions(api_endpoint=api_endpoint)
client = automl.TablesClient(project=gcp_project_id, region=gcp_region,
client_options=client_options)
else:
client = automl.TablesClient(project=gcp_project_id, region=gcp_region)
if not model_display_name:
model_display_name = '{}_{}'.format(model_prefix, str(int(time.time())))
logging.info('Training model {}...'.format(model_display_name))
response = client.create_model(
model_display_name,
train_budget_milli_node_hours=train_budget_milli_node_hours,
dataset_display_name=dataset_display_name,
optimization_objective=optimization_objective,
include_column_spec_names=include_column_spec_names,
exclude_column_spec_names=exclude_column_spec_names,
)
logging.info("Training operation: {}".format(response.operation))
logging.info("Training operation name: {}".format(response.operation.name))
logging.info("Training in progress. This operation may take multiple hours to complete.")
# block termination of the op until training is finished.
result = response.result()
logging.info("Training completed: {}".format(result))
model_name = result.name
model_id = model_name.rsplit('/', 1)[-1]
print('model name: {}, model id: {}'.format(model_name, model_id))
return (model_display_name, model_name, model_id)
if __name__ == '__main__':
import kfp
kfp.components.func_to_container_op(automl_create_model_for_tables,
output_component_file='tables_component.yaml',
base_image='python:3.7')
| StarcoderdataPython |
1611215 | <gh_stars>100-1000
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class LogAnalyticsAssociationParameter(object):
"""
LogAnalyticsAssociationParameter
"""
#: A constant which can be used with the status property of a LogAnalyticsAssociationParameter.
#: This constant has a value of "SUCCEEDED"
STATUS_SUCCEEDED = "SUCCEEDED"
#: A constant which can be used with the status property of a LogAnalyticsAssociationParameter.
#: This constant has a value of "FAILED"
STATUS_FAILED = "FAILED"
def __init__(self, **kwargs):
"""
Initializes a new LogAnalyticsAssociationParameter object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param agent_id:
The value to assign to the agent_id property of this LogAnalyticsAssociationParameter.
:type agent_id: str
:param entity_type:
The value to assign to the entity_type property of this LogAnalyticsAssociationParameter.
:type entity_type: str
:param entity_id:
The value to assign to the entity_id property of this LogAnalyticsAssociationParameter.
:type entity_id: str
:param source_id:
The value to assign to the source_id property of this LogAnalyticsAssociationParameter.
:type source_id: str
:param source_display_name:
The value to assign to the source_display_name property of this LogAnalyticsAssociationParameter.
:type source_display_name: str
:param source_type:
The value to assign to the source_type property of this LogAnalyticsAssociationParameter.
:type source_type: str
:param status:
The value to assign to the status property of this LogAnalyticsAssociationParameter.
Allowed values for this property are: "SUCCEEDED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type status: str
:param missing_properties:
The value to assign to the missing_properties property of this LogAnalyticsAssociationParameter.
:type missing_properties: list[str]
:param required_properties:
The value to assign to the required_properties property of this LogAnalyticsAssociationParameter.
:type required_properties: list[str]
"""
self.swagger_types = {
'agent_id': 'str',
'entity_type': 'str',
'entity_id': 'str',
'source_id': 'str',
'source_display_name': 'str',
'source_type': 'str',
'status': 'str',
'missing_properties': 'list[str]',
'required_properties': 'list[str]'
}
self.attribute_map = {
'agent_id': 'agentId',
'entity_type': 'entityType',
'entity_id': 'entityId',
'source_id': 'sourceId',
'source_display_name': 'sourceDisplayName',
'source_type': 'sourceType',
'status': 'status',
'missing_properties': 'missingProperties',
'required_properties': 'requiredProperties'
}
self._agent_id = None
self._entity_type = None
self._entity_id = None
self._source_id = None
self._source_display_name = None
self._source_type = None
self._status = None
self._missing_properties = None
self._required_properties = None
@property
def agent_id(self):
"""
Gets the agent_id of this LogAnalyticsAssociationParameter.
The agent unique identifier.
:return: The agent_id of this LogAnalyticsAssociationParameter.
:rtype: str
"""
return self._agent_id
@agent_id.setter
def agent_id(self, agent_id):
"""
Sets the agent_id of this LogAnalyticsAssociationParameter.
The agent unique identifier.
:param agent_id: The agent_id of this LogAnalyticsAssociationParameter.
:type: str
"""
self._agent_id = agent_id
@property
def entity_type(self):
"""
Gets the entity_type of this LogAnalyticsAssociationParameter.
The entity type.
:return: The entity_type of this LogAnalyticsAssociationParameter.
:rtype: str
"""
return self._entity_type
@entity_type.setter
def entity_type(self, entity_type):
"""
Sets the entity_type of this LogAnalyticsAssociationParameter.
The entity type.
:param entity_type: The entity_type of this LogAnalyticsAssociationParameter.
:type: str
"""
self._entity_type = entity_type
@property
def entity_id(self):
"""
Gets the entity_id of this LogAnalyticsAssociationParameter.
The entity unique identifier.
:return: The entity_id of this LogAnalyticsAssociationParameter.
:rtype: str
"""
return self._entity_id
@entity_id.setter
def entity_id(self, entity_id):
"""
Sets the entity_id of this LogAnalyticsAssociationParameter.
The entity unique identifier.
:param entity_id: The entity_id of this LogAnalyticsAssociationParameter.
:type: str
"""
self._entity_id = entity_id
@property
def source_id(self):
"""
Gets the source_id of this LogAnalyticsAssociationParameter.
The source name.
:return: The source_id of this LogAnalyticsAssociationParameter.
:rtype: str
"""
return self._source_id
@source_id.setter
def source_id(self, source_id):
"""
Sets the source_id of this LogAnalyticsAssociationParameter.
The source name.
:param source_id: The source_id of this LogAnalyticsAssociationParameter.
:type: str
"""
self._source_id = source_id
@property
def source_display_name(self):
"""
Gets the source_display_name of this LogAnalyticsAssociationParameter.
The source display name.
:return: The source_display_name of this LogAnalyticsAssociationParameter.
:rtype: str
"""
return self._source_display_name
@source_display_name.setter
def source_display_name(self, source_display_name):
"""
Sets the source_display_name of this LogAnalyticsAssociationParameter.
The source display name.
:param source_display_name: The source_display_name of this LogAnalyticsAssociationParameter.
:type: str
"""
self._source_display_name = source_display_name
@property
def source_type(self):
"""
Gets the source_type of this LogAnalyticsAssociationParameter.
The source type.
:return: The source_type of this LogAnalyticsAssociationParameter.
:rtype: str
"""
return self._source_type
@source_type.setter
def source_type(self, source_type):
"""
Sets the source_type of this LogAnalyticsAssociationParameter.
The source type.
:param source_type: The source_type of this LogAnalyticsAssociationParameter.
:type: str
"""
self._source_type = source_type
@property
def status(self):
"""
Gets the status of this LogAnalyticsAssociationParameter.
The status. Either FAILED or SUCCEEDED.
Allowed values for this property are: "SUCCEEDED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The status of this LogAnalyticsAssociationParameter.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this LogAnalyticsAssociationParameter.
The status. Either FAILED or SUCCEEDED.
:param status: The status of this LogAnalyticsAssociationParameter.
:type: str
"""
allowed_values = ["SUCCEEDED", "FAILED"]
if not value_allowed_none_or_none_sentinel(status, allowed_values):
status = 'UNKNOWN_ENUM_VALUE'
self._status = status
@property
def missing_properties(self):
"""
Gets the missing_properties of this LogAnalyticsAssociationParameter.
A list of missing properties.
:return: The missing_properties of this LogAnalyticsAssociationParameter.
:rtype: list[str]
"""
return self._missing_properties
@missing_properties.setter
def missing_properties(self, missing_properties):
"""
Sets the missing_properties of this LogAnalyticsAssociationParameter.
A list of missing properties.
:param missing_properties: The missing_properties of this LogAnalyticsAssociationParameter.
:type: list[str]
"""
self._missing_properties = missing_properties
@property
def required_properties(self):
"""
Gets the required_properties of this LogAnalyticsAssociationParameter.
A list of requried properties.
:return: The required_properties of this LogAnalyticsAssociationParameter.
:rtype: list[str]
"""
return self._required_properties
@required_properties.setter
def required_properties(self, required_properties):
"""
Sets the required_properties of this LogAnalyticsAssociationParameter.
A list of requried properties.
:param required_properties: The required_properties of this LogAnalyticsAssociationParameter.
:type: list[str]
"""
self._required_properties = required_properties
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| StarcoderdataPython |
49408 | <gh_stars>10-100
#
# Module with functions for
# Gruenbichler and Longstaff (1996) model
#
# (c) Dr. <NAME>
# Listed Volatility and Variance Derivatives
#
import math
import numpy as np
import scipy.stats as scs
def futures_price(v0, kappa, theta, zeta, T):
''' Futures pricing formula in GL96 model.
Parameters
==========
v0: float (positive)
current volatility level
kappa: float (positive)
mean-reversion factor
theta: float (positive)
long-run mean of volatility
zeta: float (positive)
volatility risk premium
T: float (positive)
time-to-maturity
Returns
=======
future: float
price of a future
'''
alpha = kappa * theta
beta = kappa + zeta
future = (alpha / beta * (1 - math.exp(-beta * T))
+ math.exp(-beta * T) * v0)
return future
def cx(K, gamma, nu, lamb):
''' Complementary distribution function of non-central chi-squared density.
Parameters
==========
K: float (positive)
strike price
gamma: float (positive)
as defined in the GL96 model
nu: float (positive)
degrees of freedom
lamb: float (positive)
non-centrality parameter
Returns
=======
complementary distribution of nc cs density
'''
return 1 - scs.ncx2.cdf(gamma * K, nu, lamb)
def call_price(v0, kappa, theta, sigma, zeta, T, r, K):
''' Call option pricing formula in GL96 Model
Parameters
==========
v0: float (positive)
current volatility level
kappa: float (positive)
mean-reversion factor
theta: float (positive)
long-run mean of volatility
sigma: float (positive)
volatility of volatility
zeta: float (positive)
volatility risk premium
T: float (positive)
time-to-maturity
r: float (positive)
risk-free short rate
K: float(positive)
strike price of the option
Returns
=======
call: float
present value of European call option
'''
D = math.exp(-r * T) # discount factor
alpha = kappa * theta
beta = kappa + zeta
gamma = 4 * beta / (sigma ** 2 * (1 - math.exp(-beta * T)))
nu = 4 * alpha / sigma ** 2
lamb = gamma * math.exp(-beta * T) * v0
# the pricing formula
call = (D * math.exp(-beta * T) * v0 * cx(K, gamma, nu + 4, lamb)
+ D * (alpha / beta) * (1 - math.exp(-beta * T))
* cx(K, gamma, nu + 2, lamb)
- D * K * cx(K, gamma, nu, lamb))
return call
def generate_paths(x0, kappa, theta, sigma, T, M, I):
''' Simulation of square-root diffusion with exact discretization
Parameters
==========
x0: float (positive)
starting value
kappa: float (positive)
mean-reversion factor
theta: float (positive)
long-run mean
sigma: float (positive)
volatility (of volatility)
T: float (positive)
time-to-maturity
M: int
number of time intervals
I: int
number of simulation paths
Returns
=======
x: NumPy ndarray object
simulated paths
'''
dt = float(T) / M
x = np.zeros((M + 1, I), dtype=np.float)
x[0, :] = x0
# matrix filled with standard normal distributed rv
ran = np.random.standard_normal((M + 1, I))
d = 4 * kappa * theta / sigma ** 2
# constant factor in the integrated process of x
c = (sigma ** 2 * (1 - math.exp(-kappa * dt))) / (4 * kappa)
if d > 1:
for t in range(1, M + 1):
# non-centrality parameter
l = x[t - 1, :] * math.exp(-kappa * dt) / c
# matrix with chi-squared distributed rv
chi = np.random.chisquare(d - 1, I)
x[t, :] = c * ((ran[t] + np.sqrt(l)) ** 2 + chi)
else:
for t in range(1, M + 1):
l = x[t - 1, :] * math.exp(-kappa * dt) / c
N = np.random.poisson(l / 2, I)
chi = np.random.chisquare(d + 2 * N, I)
x[t, :] = c * chi
return x
def call_estimator(v0, kappa, theta, sigma, T, r, K, M, I):
''' Estimation of European call option price in GL96 Model
via Monte Carlo simulation
Parameters
==========
v0: float (positive)
current volatility level
kappa: float (positive)
mean-reversion factor
theta: float (positive)
long-run mean of volatility
sigma: float (positive)
volatility of volatility
T: float (positive)
time-to-maturity
r: float (positive)
risk-free short rate
K: float (positive)
strike price of the option
M: int
number of time intervals
I: int
number of simulation paths
Returns
=======
callvalue: float
MCS estimator for European call option
'''
V = generate_paths(v0, kappa, theta, sigma, T, M, I)
callvalue = math.exp(-r * T) * np.sum(np.maximum(V[-1] - K, 0)) / I
return callvalue
| StarcoderdataPython |
3356790 | <gh_stars>0
import requests
from bs4 import BeautifulSoup
import time
import random
HOST = "http://www.ecvv.com{}"
def open_company_file(start):
with open("file_cs-uri-stem_product_W3SVC.txt", 'r') as r:
ret = r.readlines()[start + 1:]
return ret
def open_products_file(start):
with open("file_cs-uri-stem_product_W3SVC.txt", 'r') as r:
ret = r.readlines()[start + 1:]
return ret
def restart():
with open("product_title.txt", "r") as r:
line_numbers = r.readlines().__len__()
return line_numbers
def get_title(url):
wb_data = requests.get(url)
soup = BeautifulSoup(wb_data.text, 'lxml')
if "404.0" in soup.text:
return "404"
else:
return soup.select("title")[0].text.strip()
def parse_line(line):
return line.split(',')[1]
if __name__ == '__main__':
with open('product_title.txt', 'a') as w:
line_numbers = restart()
print(line_numbers)
url_list = open_products_file(line_numbers)
for i in url_list:
if "\"" in i:
w.writelines("url wrong\r\n")
else:
url = HOST.format(parse_line(i))
title = get_title(url)
print(url,title)
w.writelines(title + "\r\n")
time.sleep(0.5+random.random()*5)
| StarcoderdataPython |
52900 | <filename>setup.py
from setuptools import setup, find_packages
setup(
name='t3cpo',
packages=find_packages(exclude=['tests', '.github']),
version='0.1',
license='MIT',
description='Python wrapper for the several 3Commas api endpoints',
author='mmeijden',
author_email='<EMAIL>', # Type in your E-Mail
url='https://github.com/mMeijden/three3cpo',
keywords=['api', 'wrapper', '3c', '3Commas', 'crypto', 'bitcoin', 'altcoin', 'bots', 'exchange', 'trading'],
install_requires=[
'requests'
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
| StarcoderdataPython |
1742390 | <filename>tests/utils/test_ghash.py
# tests/utils/test_ghash.py
# =========================
#
# Copying
# -------
#
# Copyright (c) 2018 kado authors.
#
# This file is part of the *kado* project.
#
# kado is a free software project. You can redistribute it and/or
# modify if under the terms of the MIT License.
#
# This software project is distributed *as is*, WITHOUT WARRANTY OF ANY
# KIND; including but not limited to the WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE and NONINFRINGEMENT.
#
# You should have received a copy of the MIT License along with kado.
# If not, see <http://opensource.org/licenses/MIT>.
#
import unittest
import pkg_resources
import hashlib
from kado import constants as c
from kado.utils import ghash
from tests.lib import constants as tc
class TestGHash(unittest.TestCase):
"""Test case for :func:`kado.utils.ghash.ghash`."""
def test_ghash_sequence(self):
"""Basic ``ghash`` test on a sequence of characters."""
INIT_HASH = 0
TEST_DATA = b'0123456789abcdefghijklmnopqrstuvwxyz'
HASH_DATA = [
0x192340397c988b6f, 0xefc5bc5fa4408630, 0xfb5bc99628d59ad4,
0xe53ec3ad106555d0, 0xacd2e5528d1c8edb, 0xded83bd80e3f6a80,
0xd4f999f0069cd46d, 0xd4583576d6033deb, 0xdc6db7c69e83e859,
0x59a1c8d2f3490df6, 0x9d17e41d97809999, 0x88c6cbdd1a3c6ce9,
0x962aa19bd5b0cb1e, 0x5e57e8667fde63e8, 0x6b9eafafa32707ed,
0x1b20adeb05623a92, 0xf1a640886be62cf3, 0x05f655d4ebc1e835,
0x177e24ea167a3da7, 0x0da18f53577cb417, 0x074828ada929b1ec,
0x7d41698b0752fa52, 0xb71d01536265804d, 0x3f4f7448de07abf9,
0x2dd1488ddc7c6e30, 0x2fe3573da7b77cc9, 0x55b9f117d610c05b,
0x4db621a35cbf59ee, 0x96a2af56523d1d2f, 0xf4a744db21c8eedb,
0xfaf92425521e6566, 0x5e1c09452e85986d, 0x9ee7e422fd0042d1,
0x3e98973ebf9498d2, 0x84b60bd37abb6f17, 0x6e24b64e2acb1d81,
]
for idx, ch in enumerate(TEST_DATA):
with self.subTest(index=idx):
h = ghash.ghash(INIT_HASH, ch)
self.assertEqual(h, HASH_DATA[idx])
def test_ghash_chained(self):
"""Basic ``ghash`` test on a chained character sequence."""
INIT_HASH = 0
TEST_DATA = b'0123456789abcdefghijklmnopqrstuvwxyz'
HASH_DATA = [
0x192340397c988b6f, 0x220c3cd29d719d0e, 0x3f74433b63b8d4f0,
0x64274a23d7d6ffb0, 0x7521799a3cca8e3b, 0xc91b2f0c87d486f6,
0x672ff8091645e259, 0xa2b82589028f029d, 0x21de02d8a3a1ed93,
0x9d5dce843a8ce91c, 0xd7d381260c9a6bd1, 0x386dce293371448b,
0x07063dee3c935434, 0x6c646442f9050c50, 0x446778359531208d,
0xa3ef9e562fc47bac, 0x39857d34cb6f244b, 0x7901503e82a030cb,
0x0980c5671bba9f3d, 0x20a31a218ef1f291, 0x488e5cf0c70d970e,
0x0e5e236c956e286e, 0xd3d9482c8d41d129, 0xe70204a1f88b4e4b,
0xfbd551d1cd930ac6, 0x278dfae142dd9255, 0xa4d5e6da5bcbe505,
0x9761ef58145723f8, 0xc5668e067aeb651f, 0x7f7460e8179fb919,
0xf9e1e5f5815dd798, 0x51dfd5303141479d, 0x42a78e835f82d20b,
0xc3e7b4457e9a3ce8, 0x0c85745e77efe8e7, 0x872f9f0b1aaaef4f,
]
h = INIT_HASH
for idx, ch in enumerate(TEST_DATA):
with self.subTest(index=idx):
h = ghash.ghash(h, ch)
self.assertEqual(h, HASH_DATA[idx])
class TestCut(unittest.TestCase):
"""Test case for :func:`kado.utils.ghash.cut`."""
def test_cut_chunk_min_size_b1(self):
"""Data length lower or equal to ``GHASH_CHUNK_LO`` should not cut."""
TEST_DATA = b'1' * c.GHASH_CHUNK_LO
idx = ghash.cut(TEST_DATA)
self.assertEqual(idx, c.GHASH_CHUNK_LO)
def test_cut_chunk_max_size_b1(self):
"""Maximum chunk size should not be over ``GHASH_CHUNK_HI``."""
TEST_DATA = b'1' * (c.GHASH_CHUNK_HI + 1)
idx = ghash.cut(TEST_DATA)
self.assertEqual(idx, c.GHASH_CHUNK_HI)
def test_cut_data(self):
"""Test cutting point of known data files."""
for name, chunks in tc.DATA_CHUNKS_SHA256.items():
ct_point = chunks[0][1]
with self.subTest(file=name):
with pkg_resources.resource_stream('tests.lib', name) as fp:
self.assertEqual(ghash.cut(fp.read()), ct_point)
class TestChop(unittest.TestCase):
"""Test case for :func:`kado.utils.ghash.chop`."""
def test_chop_data_len(self):
"""Last chunk index should match the data length."""
for name in tc.DATA_CHUNKS_SHA256:
with pkg_resources.resource_stream('tests.lib', name) as fp:
content = fp.read()
last = 0
for _, end, _ in ghash.chop(content):
last = end
with self.subTest(file=name):
self.assertEqual(last, len(content))
def test_chop_data(self):
"""Chop known data files."""
for name, chunks in tc.DATA_CHUNKS_SHA256.items():
with pkg_resources.resource_stream('tests.lib', name) as fp:
for idx, ck in enumerate(ghash.chop(fp.read())):
with self.subTest(file=name, index=idx):
self.assertEqual(
chunks[idx],
(ck[0], ck[1], hashlib.sha256(ck[2]).hexdigest())
)
class TestRead(unittest.TestCase):
"""Test case for :func:`kado.utils.ghash.read`."""
def test_read_data_len(self):
"""Last chunk index should match the data length."""
for name in tc.DATA_CHUNKS_SHA256:
with pkg_resources.resource_stream('tests.lib', name) as fp:
l_content = len(fp.read())
last = 0
for _, end, _ in ghash.read(
pkg_resources.resource_filename('tests.lib', name)
):
last = end
with self.subTest(file=name):
self.assertEqual(last, l_content)
def test_read_data(self):
"""Read and chop known data files."""
for name, chunks in tc.DATA_CHUNKS_SHA256.items():
for idx, ck in enumerate(ghash.read(
pkg_resources.resource_filename('tests.lib', name)
)):
with self.subTest(file=name, index=idx):
self.assertEqual(
chunks[idx],
(ck[0], ck[1], hashlib.sha256(ck[2]).hexdigest())
)
def test_read_eq_chop(self):
"""``read`` and ``chop`` should return the exact same data."""
for name in tc.DATA_CHUNKS_SHA256:
with pkg_resources.resource_stream('tests.lib', name) as fp:
chop = [
(start, end, hashlib.sha256(data).hexdigest())
for start, end, data in ghash.chop(fp.read())
]
read = [
(start, end, hashlib.sha256(data).hexdigest())
for start, end, data in ghash.read(
pkg_resources.resource_filename('tests.lib', name)
)
]
with self.subTest(file=name):
self.assertEqual(chop, read)
| StarcoderdataPython |
1745893 | class 문석쌤튜플(tuple):
def __add__(self, other):
# '+' 연산자를 오버로딩할(내가 정의하는 함수로 덮어씌울) 거예요
assert len(self) == len(other)
# self랑 other(더하는 두 튜플)가 같은 길이를 가졌다고 가정
return tuple([x + y for x, y in zip(self, other)])
# 더해버려~
| StarcoderdataPython |
3339588 | # Copyright 2016 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# lambdafcns contains symbolic links to lambda functions in boss-tools/lambda.
# Since lambda is a reserved word, this allows importing from that folder
# without updating scripts responsible for deploying the lambda code.
import boto3
from lambdafcns import ingest_queue_upload_volumetric_lambda as iqu
import json
import math
import unittest
from unittest.mock import patch, MagicMock
@patch('boto3.resource')
class TestIngestQueueUploadLambda(unittest.TestCase):
def tile_count(self, kwargs):
x = math.ceil((kwargs["x_stop"] - kwargs["x_start"]) / kwargs["x_tile_size"])
y = math.ceil((kwargs["y_stop"] - kwargs["y_start"]) / kwargs["y_tile_size"])
z = math.ceil((kwargs["z_stop"] - kwargs["z_start"]) / kwargs["z_chunk_size"])
t = math.ceil((kwargs["t_stop"] - kwargs["t_start"]) / kwargs["t_tile_size"])
return x * y * z * t
def test_all_messages_are_there(self, fake_resource):
"""
This test will show that when items are being genereated by multiple lambdas, the sum of those items
will be the exact same set that would be generated by a single lambda creating them all.
Test_all_messages_are_there tests() first it creates
set of messages that all fit into a single lambda populating a dictionary with all the values returned.
Then it runs the create_messages 4 more times each time with the appropriate items_to_skip and
MAX_NUM_ITEMS_PER_LAMBDA set. It pulls the tile key out of the dictionary to verify that all the items were
accounted for. In the end there should be no items left in the dictionary.
This test can with many different values for tile sizes and starts and stop vaules and num_lambdas can be
changed.
Args:
fake_resource:
Returns:
"""
args = {
"upload_sfn": "IngestUpload",
"x_start": 0,
"x_stop": 2048,
"y_start": 0,
"y_stop": 2048,
"z_start": 0,
"z_stop": 128,
"t_start": 0,
"t_stop": 1,
"project_info": [
"1",
"2",
"3"
],
"ingest_queue": "https://queue.amazonaws.com/...",
"job_id": 11,
"upload_queue": "https://queue.amazonaws.com/...",
"x_tile_size": 1024,
"y_tile_size": 1024,
"t_tile_size": 1,
"z_tile_size": 1,
"resolution": 0,
"items_to_skip": 0,
'MAX_NUM_ITEMS_PER_LAMBDA': 500000,
'z_chunk_size': 64
}
# Walk create_messages generator as a single lambda would and populate the dictionary with all Keys like this
# "Chunk --- items".
dict = {}
msgs = iqu.create_messages(args)
for msg_json in msgs:
ct_key = self.generate_chunk_tile_key(msg_json)
print(ct_key)
if ct_key not in dict:
dict[ct_key] = 1
else:
self.fail("Dictionary already contains key: ".format(ct_key))
# Verify correct count of items in dictionary
dict_length = len(dict.keys())
item_count = self.tile_count(args)
print("Item Count: {}".format(item_count))
self.assertEqual(dict_length, item_count)
# loop through create_messages() num_lambda times pulling out each tile from the dictionary.
num_lambdas = 2
args["MAX_NUM_ITEMS_PER_LAMBDA"] = math.ceil(dict_length / num_lambdas)
for skip in range(0, dict_length, args["MAX_NUM_ITEMS_PER_LAMBDA"]):
args["items_to_skip"] = skip
#print("Skip: " + str(skip))
msgs = iqu.create_messages(args)
for msg_json in msgs:
ct_key = self.generate_chunk_tile_key(msg_json)
if ct_key in dict:
del dict[ct_key]
else:
self.fail("Dictionary does not contains key: ".format(ct_key))
# Verify Dictionary has no left over items.
self.assertEqual(len(dict), 0)
def generate_chunk_tile_key(self, msg_json):
"""
Generate a key to track messages for testing.
Args:
msg_json (str): JSON message encoded as string intended for the upload queue.
Returns:
(str): Unique key identifying message.
"""
msg = json.loads(msg_json)
return msg["chunk_key"]
| StarcoderdataPython |
186821 | # -*- coding: utf-8 -*-
from datetime import datetime
import tensorflow as tf
import tensornet as tn
import numpy as np
def read_dataset(data_path, days, match_pattern, batch_size, parse_func, num_parallel_calls = 12):
ds_data_files = tn.data.list_files(data_path, days=days, match_pattern=match_pattern)
dataset = ds_data_files.shard(num_shards=tn.core.shard_num(), index=tn.core.self_shard_id())
dataset = dataset.interleave(lambda f: tf.data.TFRecordDataset(f, buffer_size=1024 * 100),
cycle_length=4, block_length=8,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(batch_size)
dataset = dataset.map(map_func=lambda example_proto: parse_func(example_proto),
num_parallel_calls=num_parallel_calls)
dataset = tn.data.BalanceDataset(dataset)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def trained_delta_days(cur_dt, model_dir):
last_train_dt = tn.model.read_last_train_dt(model_dir)
if not last_train_dt:
return 1
last_train_dt = datetime.fromisoformat(last_train_dt)
cur_dt = datetime.fromisoformat(cur_dt)
return (cur_dt - last_train_dt).days
def dump_predict(result, path):
result = np.concatenate(result, axis=1)
content = ""
for y, y_pred in result:
content += "{}\t{}\n".format(y, y_pred)
filename = "{}/part-{:05d}".format(path, tn.core.self_shard_id())
tf.io.write_file(filename, content)
return
| StarcoderdataPython |
3205727 | <filename>sunnysouth/marketplace/serializers/addresses.py
# Django REST Framework
from rest_framework import serializers
# Models
from sunnysouth.marketplace.models import Address
class AddressModelSerializer(serializers.ModelSerializer):
class Meta:
model = Address
fields = '__all__'
read_only_fields = ['addressable_object_id', 'addressable_content_type']
| StarcoderdataPython |
453 | """empty message
Revision ID: 0084_add_job_stats
Revises: 0083_add_perm_types_and_svc_perm
Create Date: 2017-05-12 13:16:14.147368
"""
# revision identifiers, used by Alembic.
revision = "0084_add_job_stats"
down_revision = "0083_add_perm_types_and_svc_perm"
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table(
"job_statistics",
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("job_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("emails_sent", sa.BigInteger(), nullable=False),
sa.Column("emails_delivered", sa.BigInteger(), nullable=False),
sa.Column("emails_failed", sa.BigInteger(), nullable=False),
sa.Column("sms_sent", sa.BigInteger(), nullable=False),
sa.Column("sms_delivered", sa.BigInteger(), nullable=False),
sa.Column("sms_failed", sa.BigInteger(), nullable=False),
sa.Column("letters_sent", sa.BigInteger(), nullable=False),
sa.Column("letters_failed", sa.BigInteger(), nullable=False),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["job_id"],
["jobs.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_job_statistics_job_id"), "job_statistics", ["job_id"], unique=True)
def downgrade():
op.drop_index(op.f("ix_job_statistics_job_id"), table_name="job_statistics")
op.drop_table("job_statistics")
| StarcoderdataPython |
3233950 | <gh_stars>1-10
from typing import Tuple, List
from timeatlas.abstract import AbstractBaseMetadataType
class Unit(AbstractBaseMetadataType):
""" Defines a physical unit of measurement, like Celsius."""
def __init__(self, name: str, symbol: str, data_type: str):
self.name = name
self.symbol = symbol
self.data_type = data_type
def items(self) -> List[Tuple]:
"""Creating dict.iterable
Imitating the dict iterable
for k, v in dict.items()
Returns:
List[Tuple]
"""
return [("unit", self)]
| StarcoderdataPython |
3263354 | <gh_stars>10-100
from django.contrib.messages import constants as messages
from path.base import STATIC_DIR, TEMPLATES_DIR
from django.core.urlresolvers import reverse_lazy
from datetime import timedelta
SECRET_KEY = '<secret_key>'
ADMINS = (<admins_tuple>)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'easy_thumbnails',
'rest_framework',
'rest_framework_extensions',
'registration',
'debug_toolbar',
'crispy_forms',
'ygo_core',
'ygo_variables',
'ygo_cards',
'ygo_api',
'ygo_import'
)
MIDDLEWARE_CLASSES = (
'htmlmin.middleware.HtmlMinifyMiddleware',
'htmlmin.middleware.MarkRequestMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
ROOT_URLCONF = 'ygo_core.urls'
SESSION_ENGINE = 'django.contrib.sessions.backends.file'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = False
USE_L10N = False
USE_TZ = False
WSGI_APPLICATION = 'settings.wsgi.application'
TEMPLATE_DIRS = (
TEMPLATES_DIR,
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.media',
'ygo_core.context_processors.settings'
)
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder'
)
STATICFILES_DIRS = (STATIC_DIR,)
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
MESSAGE_TAGS = {
messages.ERROR: 'danger',
messages.SUCCESS: 'info'
}
LOGIN_URL = reverse_lazy('auth_login')
LOGOUT_URL = reverse_lazy('auth_logout')
LOGIN_REDIRECT_URL = reverse_lazy('collection')
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
)
}
ACCOUNT_ACTIVATION_DAYS = 30
REGISTRATION_AUTO_LOGIN = True
EMAIL_HOST = '<email_host>'
EMAIL_HOST_USER = '<email_user>'
EMAIL_HOST_PASSWORD = '<<PASSWORD>>'
EMAIL_USE_SSL = True
EMAIL_PORT = <email_ssl_port>
DEFAULT_FROM_EMAIL = '<email_from>'
SERVER_EMAIL = '<server_email>'
INTERNAL_IPS = ['<internal_ip>',]
CRISPY_TEMPLATE_PACK = 'bootstrap3'
HTML_MINIFY = True
CELERY_SEND_TASK_ERROR_EMAILS = True
CELERYBEAT_SCHEDULE = {
'fetch-cards': {
'task': 'ygo_cards.tasks.cards.fetch_cards',
'schedule': timedelta(hours=4),
},
'fetch-sets': {
'task': 'ygo_cards.tasks.sets.fetch_sets',
'schedule': timedelta(hours=1),
},
}
CELERY_TIMEZONE = 'UTC'
| StarcoderdataPython |
1652589 | <gh_stars>1-10
#Ref: <NAME>
"""
How do we know how many clusters?
Use AIC/BIC
Bayesian information criterion (BIC) can be helpful to pick the right number of parameters.
BIC estimates the quality of a model using penalty terms for # parameters.
If we fit data with 100 gaussians we will be overfitting.
BIC provides optimal vale if we do not know.
Looks asymptotic, pick the one at the elbow.
"""
import numpy as np
import cv2
img = cv2.imread("images/Alloy.jpg")
img2 = img.reshape((-1,3))
from sklearn.mixture import GaussianMixture as GMM
#####################
#This part calculate BIC
n = 2
gmm_model = GMM(n, covariance_type='tied').fit(img2)
#print(gmm_model)
bic_values = gmm_model.bic(img2)
print(bic_values)
n_components = np.arange(1,10)
gmm_models = [GMM(n, covariance_type='tied').fit(img2) for n in n_components]
#print(gmm_models)
print(GMM.bic(img2))
from matplotlib import pyplot as plt
plt.plot(n_components, [m.bic(img2) for m in gmm_models], label='BIC')
plt.xlable('n_components')
| StarcoderdataPython |
1673156 | <reponame>Autodesk/py-cloud-compute-cannon<filename>pyccc/_native.py
# Copyright 2016-2018 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This is a helper module for 2/3 compatibility - it gives access to the current interpreter's
native builtins, instead of those from the futures module
"""
import sys
__all__ = ['native']
if sys.version_info[0] == 2:
import __builtin__ as native
else:
assert sys.version_info[0] >= 3
import builtins as native
| StarcoderdataPython |
1768830 | <filename>yt/frontends/tipsy/io.py
"""
Tipsy data-file handling function
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (c) 2014, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import glob
import numpy as np
import os
from yt.geometry.oct_container import \
_ORDER_MAX
from yt.utilities.io_handler import \
BaseIOHandler
from yt.utilities.lib.geometry_utils import \
compute_morton
from yt.utilities.logger import ytLogger as \
mylog
CHUNKSIZE = 10000000
class IOHandlerTipsyBinary(BaseIOHandler):
_dataset_type = "tipsy"
_vector_fields = ("Coordinates", "Velocity", "Velocities")
_pdtypes = None # dtypes, to be filled in later
_ptypes = ( "Gas",
"DarkMatter",
"Stars" )
_chunksize = 64*64*64
_aux_fields = None
_fields = ( ("Gas", "Mass"),
("Gas", "Coordinates"),
("Gas", "Velocities"),
("Gas", "Density"),
("Gas", "Temperature"),
("Gas", "Epsilon"),
("Gas", "Metals"),
("Gas", "Phi"),
("DarkMatter", "Mass"),
("DarkMatter", "Coordinates"),
("DarkMatter", "Velocities"),
("DarkMatter", "Epsilon"),
("DarkMatter", "Phi"),
("Stars", "Mass"),
("Stars", "Coordinates"),
("Stars", "Velocities"),
("Stars", "Metals"),
("Stars", "FormationTime"),
("Stars", "Epsilon"),
("Stars", "Phi")
)
def __init__(self, *args, **kwargs):
self._aux_fields = []
super(IOHandlerTipsyBinary, self).__init__(*args, **kwargs)
def _read_fluid_selection(self, chunks, selector, fields, size):
raise NotImplementedError
def _read_aux_fields(self, field, mask, data_file):
"""
Read in auxiliary files from gasoline/pkdgrav.
This method will automatically detect the format of the file.
"""
filename = data_file.filename+'.'+field
dtype = None
# We need to do some fairly ugly detection to see what format the auxiliary
# files are in. They can be either ascii or binary, and the binary files can be
# either floats, ints, or doubles. We're going to use a try-catch cascade to
# determine the format.
try:#ASCII
auxdata = np.genfromtxt(filename, skip_header=1)
if auxdata.size != np.sum(data_file.total_particles.values()):
print("Error reading auxiliary tipsy file")
raise RuntimeError
except ValueError:#binary/xdr
f = open(filename, 'rb')
l = struct.unpack(data_file.ds.endian+"i", f.read(4))[0]
if l != np.sum(data_file.total_particles.values()):
print("Error reading auxiliary tipsy file")
raise RuntimeError
dtype = 'd'
if field in ('iord', 'igasorder', 'grp'):#These fields are integers
dtype = 'i'
try:# If we try loading doubles by default, we can catch an exception and try floats next
auxdata = np.array(struct.unpack(data_file.ds.endian+(l*dtype), f.read()))
except struct.error:
f.seek(4)
dtype = 'f'
try:
auxdata = np.array(struct.unpack(data_file.ds.endian+(l*dtype), f.read()))
except struct.error: # None of the binary attempts to read succeeded
print("Error reading auxiliary tipsy file")
raise RuntimeError
# Use the mask to slice out the appropriate particle type data
if mask.size == data_file.total_particles['Gas']:
return auxdata[:data_file.total_particles['Gas']]
elif mask.size == data_file.total_particles['DarkMatter']:
return auxdata[data_file.total_particles['Gas']:-data_file.total_particles['DarkMatter']]
else:
return auxdata[-data_file.total_particles['Stars']:]
def _fill_fields(self, fields, vals, mask, data_file):
if mask is None:
size = 0
else:
size = mask.sum()
rv = {}
for field in fields:
mylog.debug("Allocating %s values for %s", size, field)
if field in self._aux_fields: #Read each of the auxiliary fields
rv[field] = self._read_aux_fields(field, mask, data_file)
elif field in self._vector_fields:
rv[field] = np.empty((size, 3), dtype="float64")
if size == 0: continue
rv[field][:,0] = vals[field]['x'][mask]
rv[field][:,1] = vals[field]['y'][mask]
rv[field][:,2] = vals[field]['z'][mask]
else:
rv[field] = np.empty(size, dtype="float64")
if size == 0: continue
rv[field][:] = vals[field][mask]
if field == "Coordinates":
eps = np.finfo(rv[field].dtype).eps
for i in range(3):
rv[field][:,i] = np.clip(rv[field][:,i],
self.domain_left_edge[i] + eps,
self.domain_right_edge[i] - eps)
return rv
def _read_particle_coords(self, chunks, ptf):
data_files = set([])
for chunk in chunks:
for obj in chunk.objs:
data_files.update(obj.data_files)
for data_file in sorted(data_files):
poff = data_file.field_offsets
tp = data_file.total_particles
f = open(data_file.filename, "rb")
for ptype, field_list in sorted(ptf.items(), key=lambda a: poff[a[0]]):
f.seek(poff[ptype], os.SEEK_SET)
total = 0
while total < tp[ptype]:
p = np.fromfile(f, self._pdtypes[ptype],
count=min(self._chunksize, tp[ptype] - total))
total += p.size
d = [p["Coordinates"][ax].astype("float64") for ax in 'xyz']
del p
yield ptype, d
def _read_particle_fields(self, chunks, ptf, selector):
chunks = list(chunks)
data_files = set([])
for chunk in chunks:
for obj in chunk.objs:
data_files.update(obj.data_files)
for data_file in sorted(data_files):
poff = data_file.field_offsets
tp = data_file.total_particles
f = open(data_file.filename, "rb")
for ptype, field_list in sorted(ptf.items(), key=lambda a: poff[a[0]]):
f.seek(poff[ptype], os.SEEK_SET)
total = 0
while total < tp[ptype]:
p = np.fromfile(f, self._pdtypes[ptype],
count=min(self._chunksize, tp[ptype] - total))
total += p.size
mask = selector.select_points(
p["Coordinates"]['x'].astype("float64"),
p["Coordinates"]['y'].astype("float64"),
p["Coordinates"]['z'].astype("float64"), 0.0)
if mask is None: continue
tf = self._fill_fields(field_list, p, mask, data_file)
for field in field_list:
yield (ptype, field), tf.pop(field)
f.close()
def _update_domain(self, data_file):
'''
This method is used to determine the size needed for a box that will
bound the particles. It simply finds the largest position of the
whole set of particles, and sets the domain to +/- that value.
'''
ds = data_file.ds
ind = 0
# Check to make sure that the domain hasn't already been set
# by the parameter file
if np.all(np.isfinite(ds.domain_left_edge)) and np.all(np.isfinite(ds.domain_right_edge)):
return
with open(data_file.filename, "rb") as f:
ds.domain_left_edge = 0
ds.domain_right_edge = 0
f.seek(ds._header_offset)
mi = np.array([1e30, 1e30, 1e30], dtype="float64")
ma = -np.array([1e30, 1e30, 1e30], dtype="float64")
for iptype, ptype in enumerate(self._ptypes):
# We'll just add the individual types separately
count = data_file.total_particles[ptype]
if count == 0: continue
start, stop = ind, ind + count
while ind < stop:
c = min(CHUNKSIZE, stop - ind)
pp = np.fromfile(f, dtype = self._pdtypes[ptype],
count = c)
eps = np.finfo(pp["Coordinates"]["x"].dtype).eps
np.minimum(mi, [pp["Coordinates"]["x"].min(),
pp["Coordinates"]["y"].min(),
pp["Coordinates"]["z"].min()], mi)
np.maximum(ma, [pp["Coordinates"]["x"].max(),
pp["Coordinates"]["y"].max(),
pp["Coordinates"]["z"].max()], ma)
ind += c
# We extend by 1%.
DW = ma - mi
mi -= 0.01 * DW
ma += 0.01 * DW
ds.domain_left_edge = ds.arr(mi, 'code_length')
ds.domain_right_edge = ds.arr(ma, 'code_length')
ds.domain_width = DW = ds.domain_right_edge - ds.domain_left_edge
ds.unit_registry.add("unitary", float(DW.max() * DW.units.base_value),
DW.units.dimensions)
def _initialize_index(self, data_file, regions):
ds = data_file.ds
morton = np.empty(sum(data_file.total_particles.values()),
dtype="uint64")
ind = 0
DLE, DRE = ds.domain_left_edge, ds.domain_right_edge
dx = (DRE - DLE) / (2**_ORDER_MAX)
self.domain_left_edge = DLE.in_units("code_length").ndarray_view()
self.domain_right_edge = DRE.in_units("code_length").ndarray_view()
with open(data_file.filename, "rb") as f:
f.seek(ds._header_offset)
for iptype, ptype in enumerate(self._ptypes):
# We'll just add the individual types separately
count = data_file.total_particles[ptype]
if count == 0: continue
start, stop = ind, ind + count
while ind < stop:
c = min(CHUNKSIZE, stop - ind)
pp = np.fromfile(f, dtype = self._pdtypes[ptype],
count = c)
mis = np.empty(3, dtype="float64")
mas = np.empty(3, dtype="float64")
for axi, ax in enumerate('xyz'):
mi = pp["Coordinates"][ax].min()
ma = pp["Coordinates"][ax].max()
mylog.debug("Spanning: %0.3e .. %0.3e in %s", mi, ma, ax)
mis[axi] = mi
mas[axi] = ma
pos = np.empty((pp.size, 3), dtype="float64")
for i, ax in enumerate("xyz"):
eps = np.finfo(pp["Coordinates"][ax].dtype).eps
pos[:,i] = pp["Coordinates"][ax]
regions.add_data_file(pos, data_file.file_id,
data_file.ds.filter_bbox)
morton[ind:ind+c] = compute_morton(
pos[:,0], pos[:,1], pos[:,2],
DLE, DRE, data_file.ds.filter_bbox)
ind += c
mylog.info("Adding %0.3e particles", morton.size)
return morton
def _count_particles(self, data_file):
npart = {
"Gas": data_file.ds.parameters['nsph'],
"Stars": data_file.ds.parameters['nstar'],
"DarkMatter": data_file.ds.parameters['ndark']
}
return npart
@classmethod
def _compute_dtypes(cls, field_dtypes, endian = "<"):
pds = {}
for ptype, field in cls._fields:
dtbase = field_dtypes.get(field, 'f')
ff = "%s%s" % (endian, dtbase)
if field in cls._vector_fields:
dt = (field, [('x', ff), ('y', ff), ('z', ff)])
else:
dt = (field, ff)
pds.setdefault(ptype, []).append(dt)
pdtypes = {}
for ptype in pds:
pdtypes[ptype] = np.dtype(pds[ptype])
return pdtypes
def _create_dtypes(self, data_file):
# We can just look at the particle counts.
self._header_offset = data_file.ds._header_offset
self._pdtypes = {}
pds = {}
field_list = []
tp = data_file.total_particles
aux_filenames = glob.glob(data_file.filename+'.*') # Find out which auxiliaries we have
self._aux_fields = [f[1+len(data_file.filename):] for f in aux_filenames]
self._pdtypes = self._compute_dtypes(data_file.ds._field_dtypes,
data_file.ds.endian)
for ptype, field in self._fields:
if tp[ptype] == 0:
# We do not want out _pdtypes to have empty particles.
self._pdtypes.pop(ptype, None)
continue
field_list.append((ptype, field))
if any(["Gas"==f[0] for f in field_list]): #Add the auxiliary fields to each ptype we have
field_list += [("Gas",a) for a in self._aux_fields]
if any(["DarkMatter"==f[0] for f in field_list]):
field_list += [("DarkMatter",a) for a in self._aux_fields]
if any(["Stars"==f[0] for f in field_list]):
field_list += [("Stars",a) for a in self._aux_fields]
self._field_list = field_list
return self._field_list
def _identify_fields(self, data_file):
return self._field_list, {}
def _calculate_particle_offsets(self, data_file):
field_offsets = {}
pos = data_file.ds._header_offset
for ptype in self._ptypes:
field_offsets[ptype] = pos
if data_file.total_particles[ptype] == 0: continue
size = self._pdtypes[ptype].itemsize
pos += data_file.total_particles[ptype] * size
return field_offsets
| StarcoderdataPython |
1796231 | from numpy.core.defchararray import zfill
import taichi as ti
import numpy as np
from .camera import *
from .shading import *
from .renderer_utils import ray_aabb_intersection, intersect_sphere, ray_plane_intersect, reflect, refract
inf = 1e8
eps = 1e-4
@ti.data_oriented
class ParticleRenderer:
padding = 3 # extra padding to avoid cropping some of the projected sphere
def __init__(self, system, radius=0.025, main_res=512):
self.system = system
system.renderer = self
self.main_res = main_res
self.radius = radius
self.epsilon = 20.0 * self.radius
''' directional light '''
self.camera_main = Camera(res=(main_res, main_res), pos=[0, 0.5, 2.5], target=[0, 0, 0])
self.camera_main.add_buffer("pos", dim=3, dtype=float)
self.camera_main.add_buffer("zbuf", dim=0, dtype=float)
self.camera_main.add_buffer("normal", dim=3, dtype=float)
self.main_img = self.camera_main.img
light_y_pos = 2.0 - eps
light_x_min_pos = -0.15
light_x_range = 0.3
light_z_min_pos = 1.0
light_z_range = 0.3
self.light_area = light_x_range * light_z_range
self.light_vertices = [
ti.Vector([light_x_min_pos, light_y_pos, light_z_min_pos]),
ti.Vector([light_x_min_pos, light_y_pos, light_z_min_pos + light_z_range]),
ti.Vector([light_x_min_pos + light_x_range, light_y_pos, light_z_min_pos + light_z_range]),
ti.Vector([light_x_min_pos + light_x_range, light_y_pos, light_z_min_pos]),
]
self.left_wall = [ti.Vector([-1.1, 0.0, 0.0]), ti.Vector([-1.1, 0.0, 2.0]), ti.Vector([-1.1, 2.0, 2.0]), ti.Vector([-1.1, 2.0, 0.0])]
self.color_left = ti.Vector([0.65, 0.05, 0.05])
self.right_wall = [ti.Vector([1.1, 0.0, 0.0]), ti.Vector([1.1, 2.0, 0.0]), ti.Vector([1.1, 2.0, 2.0]), ti.Vector([1.1, 0.0, 2.0])]
self.color_right = ti.Vector([0.12, 0.45, 0.15])
self.light_min_pos = self.light_vertices[0]
self.light_max_pos = self.light_vertices[2]
self.light_normal = ti.Vector([0.0, -1.0, 0.0])
self.light_color = ti.Vector([0.9, 0.85, 0.7])
self.light_intensity = 200
self.camera_shadow = Camera(res=(2048, 2048), mainimg=False,
pos=[light_x_min_pos + light_x_range / 2, light_y_pos + light_x_range / 2, light_z_min_pos + light_z_range / 2],
target=[light_x_min_pos + light_x_range / 2, 0.0, light_z_min_pos + light_z_range / 2],
up=[0, 0, 1],
fov=45)
self.camera_shadow.add_buffer("zbuf", dim=0, dtype=float)
'''
Clear camera
'''
@ti.kernel
def clear_camera(self, camera: ti.template()):
for I in ti.grouped(camera.img):
camera.zbuf[I] = 0
camera.img[I].fill(0)
camera.normal[I].fill(0)
camera.pos[I].fill(0)
'''
Calculates G-buffer
'''
@ti.kernel
def calculate_buffers(self, camera: ti.template()):
camera.W2V[None] = camera.L2W[None].inverse()
# first pass: visibility splatting
for i in range(self.system.num_particles_max):
if i >= self.system.num_particles[None]:
continue
# particle center coordinate transfer
# particle position view space 4d homogeneous coord [x, y, z, 1]
pos_view = ti.Vector.zero(float, 3)
pos_view = xyz(camera.W2V @ position(self.system.pos[i]))
pos_img = camera.uncook(pos_view) # 2d image space position (x, y) in pixel unit
# find the projected radius in image space
ref_view_space = ti.Vector([pos_view[0] + self.radius, pos_view[1], pos_view[2]])
ref_img_space = camera.uncook(ref_view_space)
r_projected = abs(ref_img_space[0] - pos_img[0]) + self.padding # projected radius in pixel unit
# fragment ranges to render
xmin = int(min(max(0, pos_img[0] - r_projected), camera.res[0]))
xmax = int(min(max(0, pos_img[0] + r_projected), camera.res[0]))
ymin = int(min(max(0, pos_img[1] - r_projected), camera.res[1]))
ymax = int(min(max(0, pos_img[1] + r_projected), camera.res[1]))
if pos_view.z > 0 and 0 <= xmin < xmax < camera.res[0] and 0 <= ymin < ymax < camera.res[1]:
# process projected fragments and compute depth
for row in range(xmin, xmax):
for column in range(ymin, ymax):
# discard fragment if its distance to particle center > projected radius
frag_view_space = ti.Vector([row, column, pos_view[2]]).cast(float)
frag_view_space = camera.cook(frag_view_space) # 3d position in view space
dis_projected = (frag_view_space - pos_view).norm()
if dis_projected <= self.radius:
# compute depth value for valid fragment
depth = pos_view[2] - ti.sqrt(self.radius ** 2 - dis_projected ** 2)
z = camera.depth(depth)
# overwrite if closer
if z >= ti.atomic_max(camera.zbuf[row, column], z):
if ti.static(hasattr(camera, "normal")):
frag_surface = ti.Vector([frag_view_space[0], frag_view_space[1], depth])
normal = (frag_surface - pos_view).normalized()
normal_world = xyz(camera.L2W @ direction(normal))
pos_world = xyz(camera.L2W @ position(frag_surface))
camera.img[row, column] = self.system.col[i] # diffuse
camera.normal[row, column] = normal_world
camera.pos[row, column] = pos_world
@ti.func
def intersect_light(self, pos, d, tmax):
hit, t, _ = ray_aabb_intersection(self.light_min_pos, self.light_max_pos, pos, d)
if hit and 0 < t < tmax:
hit = 1
else:
hit = 0
t = inf
return hit, t
'''
Wall intersection from Cornell Box example
'''
@ti.func
def intersect_scene(self, pos, ray_dir):
closest, normal = inf, ti.Vector.zero(ti.f32, 3)
c = ti.Vector.zero(ti.f32, 3)
# left
pnorm = ti.Vector([1.0, 0.0, 0.0])
cur_dist, _ = ray_plane_intersect(pos, ray_dir, ti.Vector([-1.1, 0.0,
0.0]), pnorm)
if 0 < cur_dist < closest:
closest = cur_dist
normal = pnorm
c = self.color_left
# right
pnorm = ti.Vector([-1.0, 0.0, 0.0])
cur_dist, _ = ray_plane_intersect(pos, ray_dir, ti.Vector([1.1, 0.0, 0.0]),
pnorm)
if 0 < cur_dist < closest:
closest = cur_dist
normal = pnorm
c = self.color_right
# bottom
gray = ti.Vector([0.93, 0.93, 0.93])
pnorm = ti.Vector([0.0, 1.0, 0.0])
cur_dist, _ = ray_plane_intersect(pos, ray_dir, ti.Vector([0.0, 0.0, 0.0]),
pnorm)
if 0 < cur_dist < closest:
closest = cur_dist
normal = pnorm
c = gray
# top
pnorm = ti.Vector([0.0, -1.0, 0.0])
cur_dist, _ = ray_plane_intersect(pos, ray_dir, ti.Vector([0.0, 2.0, 0.0]),
pnorm)
if 0 < cur_dist < closest:
closest = cur_dist
normal = pnorm
c = gray
# far
pnorm = ti.Vector([0.0, 0.0, 1.0])
cur_dist, _ = ray_plane_intersect(pos, ray_dir, ti.Vector([0.0, 0.0, 0.0]),
pnorm)
if 0 < cur_dist < closest:
closest = cur_dist
normal = pnorm
c = gray
# light
hit_l, cur_dist = self.intersect_light(pos, ray_dir, closest)
if hit_l and 0 < cur_dist < closest:
# technically speaking, no need to check the second term
closest = cur_dist
normal = self.light_normal
c = self.light_color
return closest, normal, c
'''
Shadow map functions
'''
@ti.func
def shadowmap_soft(self, pos):
bias = eps
light_size = 16
n_sample = 64
n_ring = 10
radius = 1 / n_sample
radius_step = radius
angle = ti.random() * 2 * math.pi
angle_step = 2 * math.pi * n_ring / n_sample
pos_shadow = xyz(self.camera_shadow.W2V @ position(pos))
zbuf_UV = self.camera_shadow.uncook(pos_shadow)
z_shadow = self.camera_shadow.depth(pos_shadow.z)
visibility = 0.0
for _ in range(n_sample):
delta_UV = ti.Vector([ti.cos(angle), ti.sin(angle)]) * (radius ** 0.75) * light_size
angle += angle_step
radius += radius_step
#print(zbuf_UV, delta_UV)
shadow_depth = texture(self.camera_shadow.zbuf, zbuf_UV + delta_UV)
if 0 <= shadow_depth < z_shadow - bias:
visibility += 1.0
return visibility / n_sample
@ti.func
def shadowmap(self, pos):
pos_shadow = xyz(self.camera_shadow.W2V @ position(pos))
zbuf_UV = self.camera_shadow.uncook(pos_shadow)
z_shadow = self.camera_shadow.depth(pos_shadow.z)
bias = eps
visibility = 1.0
if texture(self.camera_shadow.zbuf, zbuf_UV) > z_shadow + bias:
visibility = 0.0
return visibility
@ti.func
def ssao(self, pos):
ao_radius = self.radius * 15
n_sample = 64
sample = 0
visible = 0.0
while sample < n_sample:
rand_vec = ti.Vector([ti.random(), ti.random(), ti.random()]) * 2 - 1.0
if (rand_vec ** 2).sum() <= 1.0:
sample += 1
pos_test = pos + rand_vec * ao_radius
pos_test_view = xyz(self.camera_main.W2V @ position(pos_test))
pos_test_UV = self.camera_main.uncook(pos_test_view)
z_test = self.camera_main.depth(pos_test_view.z)
if z_test >= texture(self.camera_main.zbuf, pos_test_UV):
visible += 1.0
return min(1.0, visible / n_sample * 2)
'''
Shading
'''
@ti.kernel
def shade_particles(self):
camera = self.camera_main
# third pass: shading
for I in ti.grouped(camera.img):
rayorig, viewdir = camera.pixel_ray(I)
closest, normal, color = self.intersect_scene(rayorig, viewdir)
pos_world = rayorig + viewdir * closest
pos_view = xyz(camera.W2V @ position(pos_world))
z = camera.depth(pos_view.z)
if z < camera.zbuf[I]:
normal = camera.normal[I]
color = camera.img[I]
pos_world = camera.pos[I]
# ambient
ao = self.ssao(pos_world)
color = color * 0.2 * ao
# diffuse shadowed
visibility = self.shadowmap_soft(pos_world)
color += visibility * shade_area_diffuse(pos_world, normal, color,
-self.light_normal, self.light_vertices, self.light_color, self.light_intensity)
color += shade_area_diffuse(pos_world, normal, color,
ti.Vector([1.0, 0.0, 0.0]), self.left_wall, self.color_left, self.light_intensity * 0.02)
color += shade_area_diffuse(pos_world, normal, color,
ti.Vector([-1.0, 0.0, 0.0]), self.right_wall, self.color_right, self.light_intensity * 0.02)
#camera.img[I] = ti.Vector([1.0, 1.0, 1.0]) * ao * visibility
# reflection
#refldir = viewdir - 2 * viewdir.dot(normal) * normal
# tone mapping
#camera.img[I] = camera.img[I] * 1.6 / (1.0 + camera.img[I])
# gamma correction
camera.img[I] = color ** (1 / 2.2)
def render_main(self):
self.clear_camera(self.camera_main)
self.camera_shadow.zbuf.fill(0)
self.calculate_buffers(self.camera_shadow)
self.calculate_buffers(self.camera_main)
self.shade_particles()
'''
Main render function which renders to the GUI.
'''
def render(self, gui):
gui.clear()
self.camera_main.from_mouse(gui)
self.render_main()
gui.set_image(self.main_img)
#gui.set_image(self.camera_shadow.zbuf)
| StarcoderdataPython |
1765682 | from firedrake import *
from MS_system import MS_constraint
import numpy as np
from ReducedFunctionalSafe import ReducedFunctionalSafe
from firedrake_adjoint import *
def parameter_optimization(target):
# Create the Augmented system
problem = MS_constraint()
# Compute the initial guess
problem.compute_guess()
# Solve the forward problem
problem.solve_MS()
# Get parameters from the problem
u = problem.u
V = problem.V
# Minimize over the target paramter mu (the period)
mu_target = target
d = project(Constant(mu_target), V)
J = assemble(inner(u[3]-d, u[3]-d)*dx) / mu_target**2
# Minimize
controla = Control(problem.a)
# Print functional value
def eval_cb(j, m):
print("\n #### Functional = %g #### \n" % (j))
rf = ReducedFunctionalSafe(J, controla)
# ROL optimization parameters
params_dict = {
'Status Test':{'Gradient Tolerance':1e-6,
'Step Tolerance':1e-12,
'Iteration Limit':100},
'Step':{'Type':'Trust Region',
'Trust Region':{'Initial Radius': -1, #1e-3 #determine initial radius with heuristics: -1
'Maximum Radius':1e8,
'Subproblem Solver':'Dogleg',
'Radius Growing Rate':2.5,
'Step Acceptance Threshold':0.05,
'Radius Shrinking Threshold':0.05,
'Radius Growing Threshold':0.9,
'Radius Shrinking Rate (Negative rho)':0.0625,
'Radius Shrinking Rate (Positive rho)':0.25,
'Radius Growing Rate':2.5,
'Sufficient Decrease Parameter':1e-4,
'Safeguard Size':100.0,
}
},
'General':{'Print Verbosity':0, #set to any number >0 for increased verbosity
'Secant':{'Type':'Limited-Memory BFGS', #BFGS-based Hessian-update in trust-region model
'Maximum Storage':10
}
}
}
# Solve the minimization problem
MinProblem = MinimizationProblem(rf)
inner_product = "L2"
solver = ROLSolver(MinProblem, params_dict, inner_product=inner_product)
sol = solver.solve()
# Get the control value
a = sol.dat.data[0]
print("a = %f" % a)
# Solve the forward problem
problem.a.assign(a)
problem.solve_MS()
if __name__ == "__main__":
"""
Set the target frequency.
The default settings correspond to reproducing Fig 1 of the paper.
"""
# Select target bifurcation parameter
mu_target = 0.0157
# Run the optimization
parameter_optimization(mu_target) | StarcoderdataPython |
1734006 | from enum import Enum, unique
@unique
class StackStatus(Enum):
IN_PROGRESS = "in progress"
QUEUED = "queued"
DONE = "done"
READY = "ready"
| StarcoderdataPython |
3370467 | from dataclasses import dataclass
from typing import Dict, List, Optional
@dataclass
class HttpTransactionData:
"""Dataclass for HTTP Transaction objects.
See also: https://github.com/ssllabs/ssllabs-scan/blob/master/ssllabs-api-docs-v3.md#httptransaction
"""
requestUrl: str
"""Request URL"""
statusCode: Optional[int]
"""Response status code"""
requestLine: Optional[str]
"""The entire request line as a single field"""
requestHeaders: List[str]
"""An array of request HTTP headers, each with name and value"""
responseLine: Optional[str]
"""The entire response line as a single field"""
responseHeadersRaw: List[str]
"""All response headers as a single field (useful if the headers are malformed)"""
responseHeaders: List[Dict]
"""An array of response HTTP headers, each with name and value"""
fragileServer: bool
"""True if the server crashes when inspected by SSL Labs (in which case the full test is refused)"""
| StarcoderdataPython |
1744958 | from django.http import HttpRequest
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.decorators import api_view
from workflows.services import WorkflowServices, WorkflowActions
class Workflows(APIView):
"""
Class to get and post workflows
"""
def get(self, request, offset: int):
"""Gets all workflows"""
res = WorkflowServices.getWorkflows(offset)
return Response(res.json())
def post(self, request):
data = request.data
name = data.get("name", "")
scheduleId = data.get("scheduleId", "")
triggerWorkflowId = data.get("triggerWorkflowId", "")
triggerWorkflowStatus = data.get("triggerWorkflowStatus", "")
notebookIds = data.get("notebookIds", [])
if 'id' in data and data['id']:
res = WorkflowServices.updateWorkflow(data['id'], name, scheduleId, triggerWorkflowId, triggerWorkflowStatus, notebookIds)
else:
res = WorkflowServices.createWorkflow(name, scheduleId, triggerWorkflowId, triggerWorkflowStatus, notebookIds)
return Response(res.json())
class Workflow(APIView):
"""
Class to get and post workflows
"""
def delete(self, request, workflowId: int):
res = WorkflowServices.deleteWorkflow(workflowId)
return Response(res.json())
class WorkflowRun(APIView):
"""
Class to get and post WorkflowRun
"""
def get(self, request, workflowId: int, offset: int):
"""Gets all workflows runs associated with given workflow
:param workflowId: id of Workflows.Workflow
"""
res = WorkflowServices.getWorkflowRuns(workflowId, offset)
return Response(res.json())
class RunWorkflow(APIView):
"""
Class to manually run workflows
"""
def get(self, request, workflowId: int):
"""Gets all workflows runs associated with given workflow
:param workflowId: id of Workflows.Workflow
"""
res = WorkflowActions.runWorkflow(workflowId)
return Response(res.json())
class StopWorkflow(APIView):
"""
Class to manually stop workflows
"""
def get(self, request, workflowId: int):
"""Gets all workflows runs associated with given workflow
:param workflowId: id of Workflows.Workflow
"""
res = WorkflowActions.stopWorkflow(workflowId)
return Response(res.json())
class WorkflowRunLog(APIView):
"""
Class to get and post WorkflowRun
"""
def get(self, request, workflowId: int):
"""Gets all workflows runs associated with given workflow
:param workflowId: id of Workflows.Workflow
"""
res = WorkflowServices.getWorkflowRunLogs(workflowId)
return Response(res.json())
class UpdateTriggerWorkflow(APIView):
"""
Class to update trigger workflow associated with workflow
"""
def post(self, request, workflowId: int):
"""
Updated trigger workflow
"""
data = request.data
triggerWorkflowId = data.get("triggerWorkflowId", "")
triggerWorkflowStatus = data.get("triggerWorkflowStatus", "")
res = WorkflowServices.updateTriggerWorkflow(workflowId, triggerWorkflowId, triggerWorkflowStatus)
return Response(res.json())
class UpdateSchedule(APIView):
"""
Class to update schedule associated with workflow
"""
def post(self, request, workflowId: int):
"""
Updated trigger workflow
"""
data = request.data
scheduleId = data.get("scheduleId", "")
res = WorkflowServices.updateSchedule(workflowId, scheduleId)
return Response(res.json())
| StarcoderdataPython |
73224 | <reponame>ttungbmt/BecaGIS_GeoPortal<gh_stars>0
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from pinax.ratings.models import OverallRating
from dialogos.models import Comment
from django.contrib.contenttypes.models import ContentType
from django.db.models import Avg
from haystack import indexes
from geonode.documents.models import Document
class DocumentIndex(indexes.SearchIndex, indexes.Indexable):
id = indexes.IntegerField(model_attr='id')
abstract = indexes.CharField(model_attr="abstract", boost=1.5)
category__gn_description = indexes.CharField(model_attr="category__gn_description", null=True)
csw_type = indexes.CharField(model_attr="csw_type")
csw_wkt_geometry = indexes.CharField(model_attr="csw_wkt_geometry")
detail_url = indexes.CharField(model_attr="get_absolute_url")
owner__username = indexes.CharField(model_attr="owner", faceted=True, null=True)
srid = indexes.CharField(model_attr="srid")
supplemental_information = indexes.CharField(model_attr="supplemental_information", null=True)
thumbnail_url = indexes.CharField(model_attr="thumbnail_url", null=True)
uuid = indexes.CharField(model_attr="uuid")
title = indexes.CharField(model_attr="title", boost=2)
date = indexes.DateTimeField(model_attr="date")
text = indexes.EdgeNgramField(document=True, use_template=True, stored=False)
type = indexes.CharField(faceted=True)
title_sortable = indexes.CharField(indexed=False, stored=False) # Necessary for sorting
category = indexes.CharField(
model_attr="category__identifier",
faceted=True,
null=True,
stored=True)
bbox_left = indexes.FloatField(model_attr="bbox_x0", null=True, stored=False)
bbox_right = indexes.FloatField(model_attr="bbox_x1", null=True, stored=False)
bbox_bottom = indexes.FloatField(model_attr="bbox_y0", null=True, stored=False)
bbox_top = indexes.FloatField(model_attr="bbox_y1", null=True, stored=False)
temporal_extent_start = indexes.DateTimeField(
model_attr="temporal_extent_start",
null=True,
stored=False)
temporal_extent_end = indexes.DateTimeField(
model_attr="temporal_extent_end",
null=True,
stored=False)
keywords = indexes.MultiValueField(
model_attr="keyword_slug_list",
null=True,
faceted=True,
stored=True)
regions = indexes.MultiValueField(
model_attr="region_name_list",
null=True,
faceted=True,
stored=True)
popular_count = indexes.IntegerField(
model_attr="popular_count",
default=0,
boost=20)
share_count = indexes.IntegerField(model_attr="share_count", default=0)
rating = indexes.IntegerField(null=True)
num_ratings = indexes.IntegerField(stored=False)
num_comments = indexes.IntegerField(stored=False)
def get_model(self):
return Document
def prepare_type(self, obj):
return "document"
def prepare_rating(self, obj):
ct = ContentType.objects.get_for_model(obj)
try:
rating = OverallRating.objects.filter(
object_id=obj.pk,
content_type=ct
).aggregate(r=Avg("rating"))["r"]
return float(str(rating or "0"))
except OverallRating.DoesNotExist:
return 0.0
def prepare_num_ratings(self, obj):
ct = ContentType.objects.get_for_model(obj)
try:
return OverallRating.objects.filter(
object_id=obj.pk,
content_type=ct
).all().count()
except OverallRating.DoesNotExist:
return 0
def prepare_num_comments(self, obj):
try:
return Comment.objects.filter(
object_id=obj.pk,
content_type=ContentType.objects.get_for_model(obj)
).all().count()
except Exception:
return 0
def prepare_title_sortable(self, obj):
return obj.title.lower().lstrip()
| StarcoderdataPython |
54221 |
def foo(a, b, *, bar=True):
print(bar)
# 直接调用报错
foo(1, 2, 3)
| StarcoderdataPython |
20860 | # Copyright 2015 Huawei Technologies Co.,LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
from magnum.conductor.handlers.common.cert_manager import create_client_files
class KubernetesAPI:
"""
Simple Kubernetes API client using requests.
This API wrapper allows for a set of very simple operations to be
performed on a Kubernetes cluster using the `requests` library. The
reason behind it is that the native `kubernetes` library does not
seem to be quite thread-safe at the moment.
Also, our interactions with the Kubernetes API are happening inside
Greenthreads so we don't need to use connection pooling on top of it,
in addition to pools not being something that you can disable with
the native Kubernetes API.
"""
def __init__(self, context, cluster):
self.context = context
self.cluster = cluster
# Load certificates for cluster
(self.ca_file, self.key_file, self.cert_file) = create_client_files(
self.cluster, self.context
)
def _request(self, method, url, json=True):
response = requests.request(
method,
url,
verify=self.ca_file.name,
cert=(self.cert_file.name, self.key_file.name)
)
response.raise_for_status()
if json:
return response.json()
else:
return response.text
def get_healthz(self):
"""
Get the health of the cluster from API
"""
return self._request(
'GET',
f"{self.cluster.api_address}/healthz",
json=False
)
def list_node(self):
"""
List all nodes in the cluster.
:return: List of nodes.
"""
return self._request(
'GET',
f"{self.cluster.api_address}/api/v1/nodes"
)
def list_namespaced_pod(self, namespace):
"""
List all pods in the given namespace.
:param namespace: Namespace to list pods from.
:return: List of pods.
"""
return self._request(
'GET',
f"{self.cluster.api_address}/api/v1/namespaces/{namespace}/pods"
)
def __del__(self):
"""
Close all of the file descriptions for the certificates, since they
are left open by `create_client_files`.
TODO(mnaser): Use a context manager and avoid having these here.
"""
if hasattr(self, 'ca_file'):
self.ca_file.close()
if hasattr(self, 'cert_file'):
self.cert_file.close()
if hasattr(self, 'key_file'):
self.key_file.close()
| StarcoderdataPython |
103665 | <gh_stars>0
# Hash таблицы - часто поиск забирает O(1) времени и старается искать примерное положение ключа(значения) я чейки которое нам нужно
from hashlib import md5 , sha1
data = [22,40,102,105,23,31,6,5]
hash_table = [None] *15
tbllen = len(hash_table)
def hash_function(value,table_size):
return value % table_size
for value in data:
hash_table[hash_function(value,tbllen)] = value
print(hash_table)
def hash_f(element,i,lenght):
"""Функция для создания множетсва хещ-функция"""
h1 = int(md5(element.encode('ascii')).hexdigest(),16)
h2 = int(sha1(element.encode('ascii')).hexdigest(),16)
return (h1+i*h2) %lenght
print (hash_f("CAT",1,10**5))
print (hash_f("CAT",2,10**5))
| StarcoderdataPython |
26645 | <reponame>hongren798911/haha
num1 = 100
num2 = 200
num3 = 300
num4 = 400
num5 = 500
mum6 = 600
num7 = 700
num8 = 800
| StarcoderdataPython |
3250936 | import socket
from threading import Thread #Permet de faire tourner des fonctions en meme temps (async)
import time
class reseau:
"""
Class reseau demandant 1 paramètre :
- sock : par default : socket.socket(socket.AF_INET, socket.SOCK_STREAM)
"""
def __init__(self, sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM), pseudo="User"):
self.sock = sock
self.serveurstart = False
self.listclient = []
self.pseudo = pseudo
self.waitmessage = []
self.chat = {0:0}
self.ip = ""
self.port = 0
def __bind(self,Host,Port, Cons=False):
"""
Ouvre une connextion en tant que HOTE de celle-ci.
- Host : ip souhaité
- Port : port ouvert afin de communiquer
"""
self.ip = Host
self.port = Port
self.sock.bind((Host, Port))
self.sock.listen(5)
if Cons: print(f"Ouverture Hote : > {Host} PORT {Port}")
def __ConnexionMessagerie(self, Host, Port, Cons=False):
"""
Connexion pour un client
"""
self.sock.connect((Host,Port))
if Cons: print(f"Connexion à l'Hote : > {Host} PORT {Port}")
def __RequestClient(self, Cons=False):
"""
Ouvre une boucle sur intervalle 4.
Cela attend la connexion d'un nouveau client afin de l'ajouter à la liste des clients.
"""
while self.serveurstart:
new_client, ip = self.sock.accept()
self.listclient.append(new_client)
if Cons: print(f"Connexion : > {ip}")
if Cons: print("Fermeture port")
self.CloseBind()
def CloseBind(self):
"""
Ferme la discussion en question en tant que hote.
[ATTENTION] Ne pas lancer la fonction en tant que client (non hote)
"""
self.SendMessage("*WARNING* | L'hote vient de fermer la session.")
time.sleep(1)
self.serveurstart = False
time.sleep(1)
for client in range(len(self.listclient)):
self.listclient[client].close()
self.listclient.pop(client)
self.sock.close()
def CloseClient(self):
msg = ("§STOPCLIENT§")
codemsg = msg.encode("utf-8")
self.sock.send(codemsg)
self.SendMessage(f"--> {self.pseudo} s'est déconnecté")
time.sleep(1)
self.serveurstart = False
def __SendMessageByHote(self):
while self.serveurstart:
if not self.waitmessage:
time.sleep(1)
continue
message = self.waitmessage[0]
if message[0] != '/':
msg = f"{self.pseudo}§{message}§"
codemsg = msg.encode("utf-8")
self.waitmessage.pop(0)
for client in self.listclient:
client.send(codemsg)
else:
self.waitmessage.pop(0)
if len(message) > 3 and message[0:2] == '/ip':
pass
def __SendMessageByClient(self):
while self.serveurstart:
if not self.waitmessage:
time.sleep(1)
continue
msg = f"{self.pseudo}§{self.waitmessage[0]}§"
codemsg = msg.encode("utf-8")
self.waitmessage.pop(0)
self.sock.send(codemsg)
def SendMessage(self,message,Cons=False):
"""
Fonction qui permet d'envoyer un message.
Args :
- message : (str)
"""
self.waitmessage.append(str(message))
def __ConsoleUseSend(self):
while self.serveurstart:
envoie = input(f"--------- SEND >> ")
if envoie == "/stop host":
self.CloseBind()
elif envoie == "/stop client":
self.CloseClient()
print("stop client envoye")
else:
self.SendMessage(envoie,True)
def __GetMessageByClient(self,Cons=False):
while self.serveurstart:
requete_server = self.sock.recv(500)
requete_server = requete_server.decode("utf-8")
a = requete_server.split("§")
pseudoList = [a[i] for i in range(0, len(a), 2)]
msgList = [a[i] for i in range(1, len(a), 2)]
for pseudo, message in zip(pseudoList, msgList):
ID = self.chat[0]+1
TIME = time.strftime('%H:%M', time.localtime())
self.chat[0] = ID
self.chat[ID] = {"pseudo":pseudo,"time":TIME,"content":message, 'distant': True}
if Cons: print(f"{pseudo} >> {message}")
def __GetMessageOfClient(self,client,Cons=False):
requete_client = client.recv(500) #Recuperation des messages
requete_client_decode = requete_client.decode('utf-8') #Passage en UTF-8
#Stockage du message
a = requete_client_decode.split("§")
pseudoList = [a[i] for i in range(0, len(a), 2)]
msgList = [a[i] for i in range(1, len(a), 2)]
for pseudo, message in zip(pseudoList, msgList):
if message == "STOPCLIENT":
for infoclient in range(len(self.listclient)):
if self.listclient[infoclient]==client:
self.listclient.pop(infoclient)
elif requete_client_decode == "GETIP":
pass
else:
ID = self.chat[0]+1
TIME = time.strftime('%H:%M', time.localtime())
self.chat[0] = ID
self.chat[ID] = {"pseudo":pseudo,"time":TIME,"content":message, 'distant': True}
if Cons: print(f"{pseudo} >> {message}")
#Envoie du message vers les autres client !
for newclient in self.listclient:
if newclient != client:
newclient.send(requete_client)
def __GetAllMessageByServer(self,Cons=False):
while self.serveurstart:
time.sleep(1)
for client in self.listclient:
GetMessage = Thread(target=self.__GetMessageOfClient,args=(client,Cons))
GetMessage.start()
def HostMessagerie(self, Host='localhost', Port=6300, Cons=False):
self.serveurstart = True
self.__bind(Host,Port,Cons) #Ouverture de la session
request = Thread(target=self.__RequestClient,args=[Cons])
send = Thread(target=self.__SendMessageByHote)
get = Thread(target=self.__GetAllMessageByServer,args=[Cons])
request.start()
send.start()
get.start()
if Cons: #Utiliser si nous voulons lancer le chat via la console
console = Thread(target=self.__ConsoleUseSend())
console.start()
def ClientMessagerie(self, Host='localhost', Port=6300, Cons=False):
self.serveurstart = True
self.__ConnexionMessagerie(Host,Port,Cons) #Ouverture de la connexion vers l'hote
#Envoie cart identite vers le HOST
# carte = "§CARTEID§|"
# codemsg = carte.encode("utf-8")
# self.sock.send(codemsg)
get = Thread(target=self.__GetMessageByClient,args=[Cons])
send = Thread(target=self.__SendMessageByClient)
get.start()
send.start()
if Cons: #Utiliser si nous voulons lancer le chat via la console
console = Thread(target=self.__ConsoleUseSend())
console.start()
def FetchMessage(self):
"""
Retourne un dictionnaire contenant tout les messages reçus.
- dico[0] = total de message
- dico[1 à dico[0]] = {'pseudo':''User, 'time': 'hh:mm', 'content' : 'Hey'}
[INFO] : La fonction ne retourne pas les propres message de l'utilisateur
"""
return self.chat
def ChangePseudo(self,pseudo):
"""
Permet de modifier le pseudo.
La fonction retourne aussi l'ancien pseudo.
"""
old = self.pseudo
self.pseudo = pseudo
return old
def GetPseudo(self):
"""
Retourne le pseudo de l'utilisateur.
"""
return self.pseudo
def GetInformationConnexion(self):
"""
Retourne l'ip et le port sous forme de tuple (self.ip,self.port)
(str | int)
"""
return(self.ip,self.port)
if __name__ == "__main__":
pseudo = input("Votre pseudo : ")
test = reseau(pseudo=pseudo)
etat = input("Voulez vous etre Host ou Client (H/C)")
if etat == "H":
test.HostMessagerie("172.20.10.2",5415,True)
else:
test.ClientMessagerie("172.20.10.6",6300,True)
| StarcoderdataPython |
1613169 | from gensim.models import Word2Vec
from ..lib import node2vec
'''
TODO: migrate to https://github.com/eliorc/node2vec
'''
def process(nx_g, args, gene_vec_conv=lambda x: x):
"""Generates node2vec representation of genes for given pathway network
Parameters
----------
nx_g: :networkx.classes.graph.Graph:
pathway graph
args: Namespace
node2vec arguments see node2vec
gene_vec_conv: function
gene vector converter function
"""
# generate model
for v1, v2 in nx_g.edges(): nx_g[v1][v2]['weight'] = 1
G = node2vec.Graph(nx_g, is_directed=args.is_directed, p=args.p, q=args.q)
G.preprocess_transition_probs()
walks_sim = G.simulate_walks(num_walks=10, walk_length=80)
walks = [list(map(str, walk)) for walk in walks_sim]
# size=dimension, window=context_size, workers=num_of_parallel_workers, iter=num_epochs_in_sgd
model = Word2Vec(walks, size=args.n2v_size, window=10, min_count=0, sg=1, workers=4, iter=1)
# wv has:
# * index2entity: array of result index to node id map size N
# * vectors: array of result vectors size N x v2v_size
# so convert them to map G (gene vector map)
gene_vectors = {}
for (eid, gene_vec) in zip(model.wv.index2entity, model.wv.vectors):
gene_vectors[int(eid)] = gene_vec_conv(gene_vec)
return gene_vectors
| StarcoderdataPython |
3258545 | <gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""装饰器"""
import functools
# ===============================
# 最基本的decorator
# ===============================
def log(func):
@functools.wraps(func) # 把原始函数的__name__等属性复制到 wrapper()函数中
def wrapper(*args, **kw):
print('call %s():' % func.__name__)
return func(*args, **kw)
return wrapper
@log
def now():
print('2018-3-25')
# 上面的 @log 相当于 now = log(now)
now()
print(now.__name__)
print('=' * 50)
# ===============================
# 三层嵌套的decorator
# ===============================
def log_1(text):
def decorator(func):
@functools.wraps(func) # 把原始函数的__name__等属性复制到 wrapper()函数中
def wrapper(*args, **kw):
print('%s %s():' % (text, func.__name__))
return func(*args, **kw)
return wrapper
return decorator
@log_1('execute')
def today():
print('2018-3-25')
today()
print(today.__name__)
# ===============================
# 类 decorator
# ===============================
class Foo(object):
def __init__(self, func):
self._func = func
def __call__(self, *args, **kwargs):
print('class decorator satrt running')
self._func()
print('class decorator ending')
@Foo
def bar():
print('bar')
bar()
# print(bar.__name__)
# 上面的代码相当于
# def bar():
# print('bar')
# obj = Foo(bar)
# obj.__call__() | StarcoderdataPython |
107332 | from textwrap import dedent
import pytest
from pylox.lox import Lox
# Base cases from https://github.com/munificent/craftinginterpreters/blob/master/test/string/error_after_multiline.lox
TEST_SRC = dedent(
"""\
// Tests that we correctly track the line info across multiline strings.
var a = "1
2
3
";
err; // // expect runtime error: Undefined variable 'err'.
"""
)
EXPECTED_STDOUTS = ["7:1: LoxRuntimeError: Undefined variable 'err'."]
def test_error_after_multiline(capsys: pytest.CaptureFixture) -> None:
interpreter = Lox()
interpreter.run(TEST_SRC)
assert interpreter.had_error
assert interpreter.had_runtime_error
all_out = capsys.readouterr().out.splitlines()
assert all_out == EXPECTED_STDOUTS
| StarcoderdataPython |
1725116 |
import sys
import os
import re
import mxnet as mx
import collections
import math
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from config import config
########################################################################
############### HELPERS FUNCTIONS FOR MODEL ARCHITECTURE ###############
########################################################################
# Parameters for the entire model (stem, all blocks, and head)
GlobalParams = collections.namedtuple('GlobalParams', [
'batch_norm_momentum', 'batch_norm_epsilon', 'dropout_rate',
'num_classes', 'width_coefficient', 'depth_coefficient',
'depth_divisor', 'min_depth', 'drop_connect_rate',])
# Parameters for an individual model block
BlockArgs = collections.namedtuple('BlockArgs', [
'kernel_size', 'num_repeat', 'input_filters', 'output_filters',
'expand_ratio', 'id_skip', 'stride', 'se_ratio'])
# Change namedtuple defaults
GlobalParams.__new__.__defaults__ = (None,) * len(GlobalParams._fields)
BlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields)
def round_filters(filters, global_params):
""" Calculate and round number of filters based on depth multiplier. """
multiplier = global_params.width_coefficient
if not multiplier:
return filters
divisor = global_params.depth_divisor
min_depth = global_params.min_depth
filters *= multiplier
min_depth = min_depth or divisor
new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
if new_filters < 0.9 * filters: # prevent rounding by more than 10%
new_filters += divisor
return int(new_filters)
def round_repeats(repeats, global_params):
""" Round number of filters based on depth multiplier. """
multiplier = global_params.depth_coefficient
if not multiplier:
return repeats
return int(math.ceil(multiplier * repeats))
def Conv2dSamePadding(data, out_channels=1, kernel_size=(1,1), stride=(1,1), dilation=1, groups=1, bias=True, pad=(0,0), name=""):
return mx.sym.Convolution(data=data, num_filter=out_channels, kernel=kernel_size, num_group=groups, stride=stride, pad=pad, no_bias=bias, name=name)
"""
ih, iw = x.size()[-2:]
kh, kw = weight.size()[-2:]
sh, sw = stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w//2, pad_w - pad_w//2, pad_h//2, pad_h - pad_h//2])
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
"""
def relu_fn(x, name=""):
return x * mx.symbol.Activation(data=x, act_type='sigmoid', name=name)
#return x * mx.sym.sigmoid(x, name=name)
def drop_connect(inputs, p, training):
if not training: return inputs
batch_size = inputs.shape[0]
keep_prob = 1 - p
random_tensor = keep_prob
random_tensor += mx.nd.random.normal(shape=(batch_size, 1, 1, 1), dtype = inputs.dtype)
binary_tensor = random_tensor.floor()
output = inputs / keep_prob * binary_tensor
return output
def MBConvBlock(data, block_args, global_params, drop_connect_rate=None, name=""):
_bn_mom = 1 - global_params.batch_norm_momentum
_bn_eps = global_params.batch_norm_epsilon
has_se = (block_args.se_ratio is not None) and (0 < block_args.se_ratio <= 1)
id_skip = block_args.id_skip # skip connection and drop connect
# Expansion phase
inp = block_args.input_filters
oup = block_args.input_filters * block_args.expand_ratio # number of output channels
# Depthwise convolution phase
k = block_args.kernel_size
s = block_args.stride
# Squeeze and Excitation layer, if desired
if has_se:
num_squeezed_channels = max(1, int(block_args.input_filters * block_args.se_ratio))
# Output phase
final_oup = block_args.output_filters
# Expansion and Depthwise Convolution
x = data;
if block_args.expand_ratio != 1:
x = Conv2dSamePadding(data=x, out_channels=oup, kernel_size=(1,1), bias=False, name="expand_conv-"+name)
x = mx.sym.BatchNorm(data=x, name='bn0-'+name, momentum=_bn_mom, eps=_bn_eps)
x = relu_fn(x, name="relu_fn-expand-"+name)
if isinstance(s, int):
stride = (s,s)
else:
if len(s) == 1:
stride = (s[0],s[0])
else:
stride = (s[0],s[1])
x = Conv2dSamePadding(data=x, out_channels=oup, groups=oup, #groups makes it depthwise
kernel_size=(k,k), stride=stride, pad=((k-1)//2, (k-1)//2), bias=False, name="depthwise_conv-"+name)
x=mx.sym.BatchNorm(data=x, name="bn1-"+name, momentum=_bn_mom, eps=_bn_eps)
x = relu_fn(x, name="relu_fn-"+name)
# Squeeze and Excitation
if has_se:
x_squeezed = mx.sym.Pooling(data=x, pool_type='avg', kernel=(1,1), global_pool=True, name="avg_pool-"+name)
x_squeezed = Conv2dSamePadding(data=x_squeezed, out_channels=num_squeezed_channels, kernel_size=(1,1), name="se_reduce-"+name)
x_squeezed = relu_fn(x_squeezed, name="relu_fn-se-"+name)
x_squeezed = Conv2dSamePadding(data=x_squeezed, out_channels=oup, kernel_size=(1,1), name="se_expand-"+name)
x_squeezed = mx.symbol.Activation(data=x_squeezed, act_type='sigmoid', name="se_sigmoid-"+name)
x= mx.symbol.broadcast_mul(x, x_squeezed)
#x = mx.sym.sigmoid(data=x_squeezed, name="sogmoid") * x
x = Conv2dSamePadding(data=x, out_channels=final_oup, kernel_size=(1,1), bias=False, name='project_conv-'+name)
x=mx.sym.BatchNorm(data=x, name="bn2-"+name, momentum=_bn_mom, eps=_bn_eps)
# Skip connection and drop connect
input_filters, output_filters = block_args.input_filters, block_args.output_filters
if id_skip and block_args.stride == 1 and input_filters == output_filters:
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=is_train, name="drop_connect-"+name)
x = x + data
return x
def efficientnet_params(model_name):
""" Map EfficientNet model name to parameter coefficients. """
params_dict = {
# Coefficients: width,depth,res,dropout
'efficientnet-b0': (1.0, 1.0, 224, 0.2),
'efficientnet-b1': (1.0, 1.1, 240, 0.2),
'efficientnet-b2': (1.1, 1.2, 260, 0.3),
'efficientnet-b3': (1.2, 1.4, 300, 0.3),
'efficientnet-b4': (1.4, 1.8, 380, 0.4),
'efficientnet-b5': (1.6, 2.2, 456, 0.4),
'efficientnet-b6': (1.8, 2.6, 528, 0.5),
'efficientnet-b7': (2.0, 3.1, 600, 0.5),
}
return params_dict[model_name]
class BlockDecoder(object):
""" Block Decoder for readability, straight from the official TensorFlow repository """
@staticmethod
def _decode_block_string(block_string):
""" Gets a block through a string notation of arguments. """
assert isinstance(block_string, str)
ops = block_string.split('_')
options = {}
for op in ops:
splits = re.split(r'(\d.*)', op)
if len(splits) >= 2:
key, value = splits[:2]
options[key] = value
# Check stride
assert (('s' in options and len(options['s']) == 1) or
(len(options['s']) == 2 and options['s'][0] == options['s'][1]))
return BlockArgs(
kernel_size=int(options['k']),
num_repeat=int(options['r']),
input_filters=int(options['i']),
output_filters=int(options['o']),
expand_ratio=int(options['e']),
id_skip=('noskip' not in block_string),
se_ratio=float(options['se']) if 'se' in options else None,
stride=[int(options['s'][0])])
@staticmethod
def _encode_block_string(block):
"""Encodes a block to a string."""
args = [
'r%d' % block.num_repeat,
'k%d' % block.kernel_size,
's%d%d' % (block.strides[0], block.strides[1]),
'e%s' % block.expand_ratio,
'i%d' % block.input_filters,
'o%d' % block.output_filters
]
if 0 < block.se_ratio <= 1:
args.append('se%s' % block.se_ratio)
if block.id_skip is False:
args.append('noskip')
return '_'.join(args)
@staticmethod
def decode(string_list):
"""
Decodes a list of string notations to specify blocks inside the network.
:param string_list: a list of strings, each string is a notation of block
:return: a list of BlockArgs namedtuples of block args
"""
assert isinstance(string_list, list)
blocks_args = []
for block_string in string_list:
blocks_args.append(BlockDecoder._decode_block_string(block_string))
return blocks_args
@staticmethod
def encode(blocks_args):
"""
Encodes a list of BlockArgs to a list of strings.
:param blocks_args: a list of BlockArgs namedtuples of block args
:return: a list of strings, each string is a notation of block
"""
block_strings = []
for block in blocks_args:
block_strings.append(BlockDecoder._encode_block_string(block))
return block_strings
def efficientnet(width_coefficient=None, depth_coefficient=None,
dropout_rate=0.2, drop_connect_rate=0.2):
""" Creates a efficientnet model. """
blocks_args = [
'r1_k3_s11_e1_i32_o16_se0.25', 'r2_k3_s22_e6_i16_o24_se0.25',
'r2_k5_s22_e6_i24_o40_se0.25', 'r3_k3_s22_e6_i40_o80_se0.25',
'r3_k5_s11_e6_i80_o112_se0.25', 'r4_k5_s22_e6_i112_o192_se0.25',
'r1_k3_s11_e6_i192_o320_se0.25',
]
blocks_args = BlockDecoder.decode(blocks_args)
global_params = GlobalParams(
batch_norm_momentum=0.99,
batch_norm_epsilon=1e-3,
dropout_rate=dropout_rate,
drop_connect_rate=drop_connect_rate,
# data_format='channels_last', # removed, this is always true in PyTorch
num_classes=1000,
width_coefficient=width_coefficient,
depth_coefficient=depth_coefficient,
depth_divisor=8,
min_depth=None
)
return blocks_args, global_params
def get_model_params(model_name, override_params):
""" Get the block args and global params for a given model """
#if model_name.startswith('efficientnet'):
if model_name.startswith('efficientnet'):
w, d, _, p = efficientnet_params(model_name)
# note: all models have drop connect rate = 0.2
blocks_args, global_params = efficientnet(width_coefficient=w, depth_coefficient=d, dropout_rate=p)
else:
raise NotImplementedError('model name is not pre-defined: %s' % model_name)
if override_params:
# ValueError will be raised here if override_params has fields not included in global_params.
global_params = global_params._replace(**override_params)
return blocks_args, global_params
def get_symbol():
arch=config.arch
blocks_args, global_params = get_model_params(arch, None)
data = mx.symbol.Variable(name="data") # 224
data = data-127.5
data = data*0.0078125
bn_mom = 1 - global_params.batch_norm_momentum
bn_eps = global_params.batch_norm_epsilon
# Stem
in_channels = 3 # rgb
out_channels = round_filters(32, global_params) # number of output channels
#x = Conv2dSamePadding(data=data, out_channels=out_channels, kernel_size=(3,3), stride=(2,2), pad=(1,1), bias=False)
x = Conv2dSamePadding(data=data, out_channels=out_channels, kernel_size=(3,3), stride=(1,1), pad=(1,1), bias=False)
x = mx.sym.BatchNorm(data=x, name="bn0", momentum=bn_mom, eps=bn_eps)
x = relu_fn(x)
for idx,block_args in enumerate(blocks_args):
# Update block input and output filters based on depth multiplier.
block_args = block_args._replace(
input_filters=round_filters(block_args.input_filters, global_params),
output_filters=round_filters(block_args.output_filters, global_params),
num_repeat=round_repeats(block_args.num_repeat, global_params)
)
# The first block needs to take care of stride and filter size increase.
x = MBConvBlock(x, block_args, global_params, name="block"+str(idx))
if block_args.num_repeat > 1:
block_args = block_args._replace(input_filters=block_args.output_filters, stride=1)
for rep in range(block_args.num_repeat - 1):
x = MBConvBlock(x, block_args, global_params, name="block"+str(idx)+"_repeat"+str(rep))
# save network
#mx.viz.plot_network(x, shape={"data":(1, 3, 112, 112)}).view()
#mx.viz.print_summary(x, shape={"data":(1, 3, 112, 112)}).view()
# Head
in_channels = block_args.output_filters # output of final block
# ori efficientnet
"""
out_channels = round_filters(1280, global_params)
x = Conv2dSamePadding(data=x, out_channels=out_channels, kernel_size=(1,1), bias=False)
x = mx.sym.BatchNorm(data=x, momentum=bn_mom, eps=bn_eps)
x = relu_fn(x)
x = mx.sym.Pooling(data=x, pool_type='avg', global_pool=True, kernel=(1,1), name="avg_pool")
if global_params.dropout_rate:
x = mx.sym.Dropout(data=x, p=global_params.dropout_rate)
fc1 = mx.sym.FullyConnected(data=x, num_hidden=config.emb_size, name='fc1')
"""
out_channels = config.emb_size
x = Conv2dSamePadding(data=x, out_channels=out_channels, kernel_size=(1,1), bias=False)
x = mx.sym.BatchNorm(data=x, momentum=bn_mom, eps=bn_eps)
x = relu_fn(x)
prefc1 = mx.sym.Pooling(data=x, pool_type='avg', global_pool=True, kernel=(1,1), name="prefc1")
fc1 = mx.sym.Flatten(data=prefc1, name="fc1")
return fc1;
| StarcoderdataPython |
1619058 | """ Cisco_IOS_XR_ip_iarm_v6_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR ip\-iarm\-v6 package operational data.
This module contains definitions
for the following management objects\:
ipv6arm\: IPv6 Address Repository Manager (IPv6 ARM)
operational data
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class Ipv6arm(Entity):
"""
IPv6 Address Repository Manager (IPv6 ARM)
operational data
.. attribute:: addresses
IPv6 ARM address database information
**type**\: :py:class:`Addresses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_iarm_v6_oper.Ipv6arm.Addresses>`
.. attribute:: summary
IPv6 ARM summary information
**type**\: :py:class:`Summary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_iarm_v6_oper.Ipv6arm.Summary>`
.. attribute:: vrf_summaries
IPv6 ARM VRFs summary information
**type**\: :py:class:`VrfSummaries <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_iarm_v6_oper.Ipv6arm.VrfSummaries>`
.. attribute:: multicast_host_interface
Default multicast host interface
**type**\: str
**pattern:** [a\-zA\-Z0\-9.\_/\-]+
"""
_prefix = 'ip-iarm-v6-oper'
_revision = '2017-05-01'
def __init__(self):
super(Ipv6arm, self).__init__()
self._top_entity = None
self.yang_name = "ipv6arm"
self.yang_parent_name = "Cisco-IOS-XR-ip-iarm-v6-oper"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("addresses", ("addresses", Ipv6arm.Addresses)), ("summary", ("summary", Ipv6arm.Summary)), ("vrf-summaries", ("vrf_summaries", Ipv6arm.VrfSummaries))])
self._leafs = OrderedDict([
('multicast_host_interface', (YLeaf(YType.str, 'multicast-host-interface'), ['str'])),
])
self.multicast_host_interface = None
self.addresses = Ipv6arm.Addresses()
self.addresses.parent = self
self._children_name_map["addresses"] = "addresses"
self.summary = Ipv6arm.Summary()
self.summary.parent = self
self._children_name_map["summary"] = "summary"
self.vrf_summaries = Ipv6arm.VrfSummaries()
self.vrf_summaries.parent = self
self._children_name_map["vrf_summaries"] = "vrf-summaries"
self._segment_path = lambda: "Cisco-IOS-XR-ip-iarm-v6-oper:ipv6arm"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ipv6arm, ['multicast_host_interface'], name, value)
class Addresses(Entity):
"""
IPv6 ARM address database information
.. attribute:: vrfs
IPv6 ARM address database information per VRF
**type**\: :py:class:`Vrfs <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_iarm_v6_oper.Ipv6arm.Addresses.Vrfs>`
"""
_prefix = 'ip-iarm-v6-oper'
_revision = '2017-05-01'
def __init__(self):
super(Ipv6arm.Addresses, self).__init__()
self.yang_name = "addresses"
self.yang_parent_name = "ipv6arm"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("vrfs", ("vrfs", Ipv6arm.Addresses.Vrfs))])
self._leafs = OrderedDict()
self.vrfs = Ipv6arm.Addresses.Vrfs()
self.vrfs.parent = self
self._children_name_map["vrfs"] = "vrfs"
self._segment_path = lambda: "addresses"
self._absolute_path = lambda: "Cisco-IOS-XR-ip-iarm-v6-oper:ipv6arm/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ipv6arm.Addresses, [], name, value)
class Vrfs(Entity):
"""
IPv6 ARM address database information per VRF
.. attribute:: vrf
IPv6 ARM address database information in a VRF
**type**\: list of :py:class:`Vrf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_iarm_v6_oper.Ipv6arm.Addresses.Vrfs.Vrf>`
"""
_prefix = 'ip-iarm-v6-oper'
_revision = '2017-05-01'
def __init__(self):
super(Ipv6arm.Addresses.Vrfs, self).__init__()
self.yang_name = "vrfs"
self.yang_parent_name = "addresses"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("vrf", ("vrf", Ipv6arm.Addresses.Vrfs.Vrf))])
self._leafs = OrderedDict()
self.vrf = YList(self)
self._segment_path = lambda: "vrfs"
self._absolute_path = lambda: "Cisco-IOS-XR-ip-iarm-v6-oper:ipv6arm/addresses/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ipv6arm.Addresses.Vrfs, [], name, value)
class Vrf(Entity):
"""
IPv6 ARM address database information in a VRF
.. attribute:: vrf_name (key)
VRF name
**type**\: str
.. attribute:: networks
IPv6 ARM address database information by network
**type**\: :py:class:`Networks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_iarm_v6_oper.Ipv6arm.Addresses.Vrfs.Vrf.Networks>`
.. attribute:: interfaces
IPv6 ARM address database information by interface
**type**\: :py:class:`Interfaces <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_iarm_v6_oper.Ipv6arm.Addresses.Vrfs.Vrf.Interfaces>`
"""
_prefix = 'ip-iarm-v6-oper'
_revision = '2017-05-01'
def __init__(self):
super(Ipv6arm.Addresses.Vrfs.Vrf, self).__init__()
self.yang_name = "vrf"
self.yang_parent_name = "vrfs"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['vrf_name']
self._child_classes = OrderedDict([("networks", ("networks", Ipv6arm.Addresses.Vrfs.Vrf.Networks)), ("interfaces", ("interfaces", Ipv6arm.Addresses.Vrfs.Vrf.Interfaces))])
self._leafs = OrderedDict([
('vrf_name', (YLeaf(YType.str, 'vrf-name'), ['str'])),
])
self.vrf_name = None
self.networks = Ipv6arm.Addresses.Vrfs.Vrf.Networks()
self.networks.parent = self
self._children_name_map["networks"] = "networks"
self.interfaces = Ipv6arm.Addresses.Vrfs.Vrf.Interfaces()
self.interfaces.parent = self
self._children_name_map["interfaces"] = "interfaces"
self._segment_path = lambda: "vrf" + "[vrf-name='" + str(self.vrf_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-ip-iarm-v6-oper:ipv6arm/addresses/vrfs/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ipv6arm.Addresses.Vrfs.Vrf, ['vrf_name'], name, value)
class Networks(Entity):
"""
IPv6 ARM address database information by
network
.. attribute:: network
An IPv6 Address in IPv6 ARM
**type**\: list of :py:class:`Network <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_iarm_v6_oper.Ipv6arm.Addresses.Vrfs.Vrf.Networks.Network>`
"""
_prefix = 'ip-iarm-v6-oper'
_revision = '2017-05-01'
def __init__(self):
super(Ipv6arm.Addresses.Vrfs.Vrf.Networks, self).__init__()
self.yang_name = "networks"
self.yang_parent_name = "vrf"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("network", ("network", Ipv6arm.Addresses.Vrfs.Vrf.Networks.Network))])
self._leafs = OrderedDict()
self.network = YList(self)
self._segment_path = lambda: "networks"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ipv6arm.Addresses.Vrfs.Vrf.Networks, [], name, value)
class Network(Entity):
"""
An IPv6 Address in IPv6 ARM
.. attribute:: address
Address
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
.. attribute:: prefix_length
Prefix Length
**type**\: int
**range:** 0..128
.. attribute:: handle
Interface
**type**\: str
**pattern:** [a\-zA\-Z0\-9.\_/\-]+
.. attribute:: address_xr
Address info
**type**\: :py:class:`AddressXr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_iarm_v6_oper.Ipv6arm.Addresses.Vrfs.Vrf.Networks.Network.AddressXr>`
.. attribute:: interface_name
Interface name
**type**\: str
.. attribute:: referenced_interface
Referenced Interface \- only valid for an unnumbered interface
**type**\: str
.. attribute:: vrf_name
VRF Name
**type**\: str
"""
_prefix = 'ip-iarm-v6-oper'
_revision = '2017-05-01'
def __init__(self):
super(Ipv6arm.Addresses.Vrfs.Vrf.Networks.Network, self).__init__()
self.yang_name = "network"
self.yang_parent_name = "networks"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("address-xr", ("address_xr", Ipv6arm.Addresses.Vrfs.Vrf.Networks.Network.AddressXr))])
self._leafs = OrderedDict([
('address', (YLeaf(YType.str, 'address'), ['str'])),
('prefix_length', (YLeaf(YType.uint32, 'prefix-length'), ['int'])),
('handle', (YLeaf(YType.str, 'handle'), ['str'])),
('interface_name', (YLeaf(YType.str, 'interface-name'), ['str'])),
('referenced_interface', (YLeaf(YType.str, 'referenced-interface'), ['str'])),
('vrf_name', (YLeaf(YType.str, 'vrf-name'), ['str'])),
])
self.address = None
self.prefix_length = None
self.handle = None
self.interface_name = None
self.referenced_interface = None
self.vrf_name = None
self.address_xr = Ipv6arm.Addresses.Vrfs.Vrf.Networks.Network.AddressXr()
self.address_xr.parent = self
self._children_name_map["address_xr"] = "address-xr"
self._segment_path = lambda: "network"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ipv6arm.Addresses.Vrfs.Vrf.Networks.Network, ['address', 'prefix_length', 'handle', 'interface_name', 'referenced_interface', 'vrf_name'], name, value)
class AddressXr(Entity):
"""
Address info
.. attribute:: address
Address
**type**\: :py:class:`Address <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_iarm_v6_oper.Ipv6arm.Addresses.Vrfs.Vrf.Networks.Network.AddressXr.Address>`
.. attribute:: prefix_length
Prefix length
**type**\: int
**range:** 0..4294967295
.. attribute:: route_tag
Route Tag of the address
**type**\: int
**range:** 0..4294967295
.. attribute:: is_primary
Is address primary \- valid only for IPv4 addresses
**type**\: bool
.. attribute:: is_tentative
Is address valid/tentative \- valid only for IPV6 addresses
**type**\: bool
.. attribute:: is_prefix_sid
Is prefix\_sid valid \- valid only for IPV6 addresses
**type**\: bool
.. attribute:: producer
Producer Name
**type**\: str
"""
_prefix = 'ip-iarm-v6-oper'
_revision = '2017-05-01'
def __init__(self):
super(Ipv6arm.Addresses.Vrfs.Vrf.Networks.Network.AddressXr, self).__init__()
self.yang_name = "address-xr"
self.yang_parent_name = "network"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("address", ("address", Ipv6arm.Addresses.Vrfs.Vrf.Networks.Network.AddressXr.Address))])
self._leafs = OrderedDict([
('prefix_length', (YLeaf(YType.uint32, 'prefix-length'), ['int'])),
('route_tag', (YLeaf(YType.uint32, 'route-tag'), ['int'])),
('is_primary', (YLeaf(YType.boolean, 'is-primary'), ['bool'])),
('is_tentative', (YLeaf(YType.boolean, 'is-tentative'), ['bool'])),
('is_prefix_sid', (YLeaf(YType.boolean, 'is-prefix-sid'), ['bool'])),
('producer', (YLeaf(YType.str, 'producer'), ['str'])),
])
self.prefix_length = None
self.route_tag = None
self.is_primary = None
self.is_tentative = None
self.is_prefix_sid = None
self.producer = None
self.address = Ipv6arm.Addresses.Vrfs.Vrf.Networks.Network.AddressXr.Address()
self.address.parent = self
self._children_name_map["address"] = "address"
self._segment_path = lambda: "address-xr"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ipv6arm.Addresses.Vrfs.Vrf.Networks.Network.AddressXr, ['prefix_length', 'route_tag', 'is_primary', 'is_tentative', 'is_prefix_sid', 'producer'], name, value)
class Address(Entity):
"""
Address
.. attribute:: afi
AFI
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: ipv4_address
IPV4 Address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: ipv6_address
IPV6 Address
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'ip-iarm-v6-oper'
_revision = '2017-05-01'
def __init__(self):
super(Ipv6arm.Addresses.Vrfs.Vrf.Networks.Network.AddressXr.Address, self).__init__()
self.yang_name = "address"
self.yang_parent_name = "address-xr"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('afi', (YLeaf(YType.int32, 'afi'), ['int'])),
('ipv4_address', (YLeaf(YType.str, 'ipv4-address'), ['str'])),
('ipv6_address', (YLeaf(YType.str, 'ipv6-address'), ['str'])),
])
self.afi = None
self.ipv4_address = None
self.ipv6_address = None
self._segment_path = lambda: "address"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ipv6arm.Addresses.Vrfs.Vrf.Networks.Network.AddressXr.Address, ['afi', 'ipv4_address', 'ipv6_address'], name, value)
class Interfaces(Entity):
"""
IPv6 ARM address database information by
interface
.. attribute:: interface
An IPv6 address in IPv6 ARM
**type**\: list of :py:class:`Interface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_iarm_v6_oper.Ipv6arm.Addresses.Vrfs.Vrf.Interfaces.Interface>`
"""
_prefix = 'ip-iarm-v6-oper'
_revision = '2017-05-01'
def __init__(self):
super(Ipv6arm.Addresses.Vrfs.Vrf.Interfaces, self).__init__()
self.yang_name = "interfaces"
self.yang_parent_name = "vrf"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("interface", ("interface", Ipv6arm.Addresses.Vrfs.Vrf.Interfaces.Interface))])
self._leafs = OrderedDict()
self.interface = YList(self)
self._segment_path = lambda: "interfaces"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ipv6arm.Addresses.Vrfs.Vrf.Interfaces, [], name, value)
class Interface(Entity):
"""
An IPv6 address in IPv6 ARM
.. attribute:: interface (key)
Interface
**type**\: str
**pattern:** [a\-zA\-Z0\-9.\_/\-]+
.. attribute:: referenced_interface
Referenced Interface \- only valid for an unnumbered interface
**type**\: str
.. attribute:: vrf_name
VRF Name
**type**\: str
.. attribute:: address
Address info
**type**\: list of :py:class:`Address <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_iarm_v6_oper.Ipv6arm.Addresses.Vrfs.Vrf.Interfaces.Interface.Address>`
"""
_prefix = 'ip-iarm-v6-oper'
_revision = '2017-05-01'
def __init__(self):
super(Ipv6arm.Addresses.Vrfs.Vrf.Interfaces.Interface, self).__init__()
self.yang_name = "interface"
self.yang_parent_name = "interfaces"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['interface']
self._child_classes = OrderedDict([("address", ("address", Ipv6arm.Addresses.Vrfs.Vrf.Interfaces.Interface.Address))])
self._leafs = OrderedDict([
('interface', (YLeaf(YType.str, 'interface'), ['str'])),
('referenced_interface', (YLeaf(YType.str, 'referenced-interface'), ['str'])),
('vrf_name', (YLeaf(YType.str, 'vrf-name'), ['str'])),
])
self.interface = None
self.referenced_interface = None
self.vrf_name = None
self.address = YList(self)
self._segment_path = lambda: "interface" + "[interface='" + str(self.interface) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ipv6arm.Addresses.Vrfs.Vrf.Interfaces.Interface, ['interface', 'referenced_interface', 'vrf_name'], name, value)
class Address(Entity):
"""
Address info
.. attribute:: address
Address
**type**\: :py:class:`Address_ <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_iarm_v6_oper.Ipv6arm.Addresses.Vrfs.Vrf.Interfaces.Interface.Address.Address_>`
.. attribute:: prefix_length
Prefix length
**type**\: int
**range:** 0..4294967295
.. attribute:: route_tag
Route Tag of the address
**type**\: int
**range:** 0..4294967295
.. attribute:: is_primary
Is address primary \- valid only for IPv4 addresses
**type**\: bool
.. attribute:: is_tentative
Is address valid/tentative \- valid only for IPV6 addresses
**type**\: bool
.. attribute:: is_prefix_sid
Is prefix\_sid valid \- valid only for IPV6 addresses
**type**\: bool
.. attribute:: producer
Producer Name
**type**\: str
"""
_prefix = 'ip-iarm-v6-oper'
_revision = '2017-05-01'
def __init__(self):
super(Ipv6arm.Addresses.Vrfs.Vrf.Interfaces.Interface.Address, self).__init__()
self.yang_name = "address"
self.yang_parent_name = "interface"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("address", ("address", Ipv6arm.Addresses.Vrfs.Vrf.Interfaces.Interface.Address.Address_))])
self._leafs = OrderedDict([
('prefix_length', (YLeaf(YType.uint32, 'prefix-length'), ['int'])),
('route_tag', (YLeaf(YType.uint32, 'route-tag'), ['int'])),
('is_primary', (YLeaf(YType.boolean, 'is-primary'), ['bool'])),
('is_tentative', (YLeaf(YType.boolean, 'is-tentative'), ['bool'])),
('is_prefix_sid', (YLeaf(YType.boolean, 'is-prefix-sid'), ['bool'])),
('producer', (YLeaf(YType.str, 'producer'), ['str'])),
])
self.prefix_length = None
self.route_tag = None
self.is_primary = None
self.is_tentative = None
self.is_prefix_sid = None
self.producer = None
self.address = Ipv6arm.Addresses.Vrfs.Vrf.Interfaces.Interface.Address.Address_()
self.address.parent = self
self._children_name_map["address"] = "address"
self._segment_path = lambda: "address"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ipv6arm.Addresses.Vrfs.Vrf.Interfaces.Interface.Address, ['prefix_length', 'route_tag', 'is_primary', 'is_tentative', 'is_prefix_sid', 'producer'], name, value)
class Address_(Entity):
"""
Address
.. attribute:: afi
AFI
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: ipv4_address
IPV4 Address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: ipv6_address
IPV6 Address
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'ip-iarm-v6-oper'
_revision = '2017-05-01'
def __init__(self):
super(Ipv6arm.Addresses.Vrfs.Vrf.Interfaces.Interface.Address.Address_, self).__init__()
self.yang_name = "address"
self.yang_parent_name = "address"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('afi', (YLeaf(YType.int32, 'afi'), ['int'])),
('ipv4_address', (YLeaf(YType.str, 'ipv4-address'), ['str'])),
('ipv6_address', (YLeaf(YType.str, 'ipv6-address'), ['str'])),
])
self.afi = None
self.ipv4_address = None
self.ipv6_address = None
self._segment_path = lambda: "address"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ipv6arm.Addresses.Vrfs.Vrf.Interfaces.Interface.Address.Address_, ['afi', 'ipv4_address', 'ipv6_address'], name, value)
class Summary(Entity):
"""
IPv6 ARM summary information
.. attribute:: producer_count
Number of producers
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: address_conflict_count
Number of address conflicts
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: unnumbered_conflict_count
Number of unnumbered interface conflicts
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: db_master_version
IP\-ARM DB master version
**type**\: int
**range:** 0..4294967295
.. attribute:: vrf_count
Number of known VRFs
**type**\: int
**range:** \-2147483648..2147483647
"""
_prefix = 'ip-iarm-v6-oper'
_revision = '2017-05-01'
def __init__(self):
super(Ipv6arm.Summary, self).__init__()
self.yang_name = "summary"
self.yang_parent_name = "ipv6arm"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('producer_count', (YLeaf(YType.int32, 'producer-count'), ['int'])),
('address_conflict_count', (YLeaf(YType.int32, 'address-conflict-count'), ['int'])),
('unnumbered_conflict_count', (YLeaf(YType.int32, 'unnumbered-conflict-count'), ['int'])),
('db_master_version', (YLeaf(YType.uint32, 'db-master-version'), ['int'])),
('vrf_count', (YLeaf(YType.int32, 'vrf-count'), ['int'])),
])
self.producer_count = None
self.address_conflict_count = None
self.unnumbered_conflict_count = None
self.db_master_version = None
self.vrf_count = None
self._segment_path = lambda: "summary"
self._absolute_path = lambda: "Cisco-IOS-XR-ip-iarm-v6-oper:ipv6arm/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ipv6arm.Summary, ['producer_count', 'address_conflict_count', 'unnumbered_conflict_count', 'db_master_version', 'vrf_count'], name, value)
class VrfSummaries(Entity):
"""
IPv6 ARM VRFs summary information
.. attribute:: vrf_summary
IPv6 ARM VRF summary information
**type**\: list of :py:class:`VrfSummary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_iarm_v6_oper.Ipv6arm.VrfSummaries.VrfSummary>`
"""
_prefix = 'ip-iarm-v6-oper'
_revision = '2017-05-01'
def __init__(self):
super(Ipv6arm.VrfSummaries, self).__init__()
self.yang_name = "vrf-summaries"
self.yang_parent_name = "ipv6arm"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("vrf-summary", ("vrf_summary", Ipv6arm.VrfSummaries.VrfSummary))])
self._leafs = OrderedDict()
self.vrf_summary = YList(self)
self._segment_path = lambda: "vrf-summaries"
self._absolute_path = lambda: "Cisco-IOS-XR-ip-iarm-v6-oper:ipv6arm/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ipv6arm.VrfSummaries, [], name, value)
class VrfSummary(Entity):
"""
IPv6 ARM VRF summary information
.. attribute:: vrf_name (key)
VRF name
**type**\: str
.. attribute:: vrf_id
VRF ID
**type**\: int
**range:** 0..4294967295
.. attribute:: vrf_name_xr
VRF Name
**type**\: str
"""
_prefix = 'ip-iarm-v6-oper'
_revision = '2017-05-01'
def __init__(self):
super(Ipv6arm.VrfSummaries.VrfSummary, self).__init__()
self.yang_name = "vrf-summary"
self.yang_parent_name = "vrf-summaries"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['vrf_name']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('vrf_name', (YLeaf(YType.str, 'vrf-name'), ['str'])),
('vrf_id', (YLeaf(YType.uint32, 'vrf-id'), ['int'])),
('vrf_name_xr', (YLeaf(YType.str, 'vrf-name-xr'), ['str'])),
])
self.vrf_name = None
self.vrf_id = None
self.vrf_name_xr = None
self._segment_path = lambda: "vrf-summary" + "[vrf-name='" + str(self.vrf_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-ip-iarm-v6-oper:ipv6arm/vrf-summaries/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Ipv6arm.VrfSummaries.VrfSummary, ['vrf_name', 'vrf_id', 'vrf_name_xr'], name, value)
def clone_ptr(self):
self._top_entity = Ipv6arm()
return self._top_entity
| StarcoderdataPython |
3259261 | <filename>predict-social-media-ad-purchased.py<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
# In[3]:
data=pd.read_csv('data.csv')
# In[4]:
data.isnull().sum()
# In[5]:
data.head()
# In[10]:
data = data.drop('User ID', axis=1)
# In[11]:
data.head()
# In[13]:
from sklearn.preprocessing import LabelEncoder
# In[14]:
le= LabelEncoder()
# In[15]:
data['Gender']=le.fit_transform(data['Gender'])
# In[16]:
data.head()
# In[17]:
import matplotlib.pyplot as plt
import seaborn as sns
# In[19]:
sns.pairplot(data)
# In[20]:
x=data.drop('Purchased',axis=1).values
y=data['Purchased'].values
# In[21]:
x.shape,y.shape
# In[22]:
from sklearn.model_selection import train_test_split
# In[23]:
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,shuffle=True)
# In[24]:
x_train.shape,y_train.shape,x_test.shape,y_test.shape
# In[25]:
from sklearn.linear_model import LogisticRegression
# In[26]:
log=LogisticRegression()
# In[30]:
log.fit(x_train,y_train)
# In[31]:
pred = log.predict(x_test)
# In[32]:
from sklearn.metrics import accuracy_score
# In[33]:
accuracy_score(y_test,pred)
# In[34]:
plt.plot(pred, '--r')
plt.plot(y_test, '-b')
plt.title('Pur vs Act')
plt.show()
# In[ ]:
| StarcoderdataPython |
3360106 | <gh_stars>1000+
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from copy import deepcopy
import numpy as np
from ...algorithm import Algorithm
from ...algorithm_selector import COMPRESSION_ALGORITHMS
from ....graph import model_utils as mu
from ....graph import node_utils as nu
from ....samplers.creator import create_sampler
from ....statistics.statistics import TensorStatistic
from ....statistics.functions import activations as asf
from ....statistics.functions import aggregation as agf
from ....utils.logger import get_logger
logger = get_logger(__name__)
@COMPRESSION_ALGORITHMS.register('ActivationChannelAlignment')
class ActivationChannelAlignment(Algorithm):
name = 'ActivationChannelAlignment'
def __init__(self, config, engine):
super().__init__(config, engine)
stat_subset_size = min(
self._config.get(
'stat_subset_size', len(self._engine.data_loader)),
len(self._engine.data_loader))
self.total_exec_steps = stat_subset_size
shuffle_data = self._config.get('shuffle_data', False)
seed = self._config.get('seed', 0)
self._sampler = create_sampler(engine, stat_subset_size, shuffle_data, seed)
@property
def change_original_model(self):
return True
def run(self, model):
""" this function applies activation range alignment procedure
:param model: model to apply the algo on
:return range-corrected model
"""
activations_statistics = self._stats_collector.get_statistics_for_algorithm(self.name)
node_pairs_list = self.find_node_pairs(model)
stats = dict()
for node_name, stats_list in activations_statistics.items():
stats[node_name] = dict()
for stats_name, stats_values in stats_list.items():
stats[node_name][stats_name] = agf.median(stats_values)
for node_pair_data in node_pairs_list:
node_in, weight_in, bias_in, node_out, weight_out, bias_out = node_pair_data
self.align_ranges(node_in, weight_in, bias_in, weight_out, bias_out, stats)
node_out['need_rescale'] = True
return model
def register_statistics(self, model, stats_collector):
model = deepcopy(model)
activation_statistics_layout = self.get_activations_statistics_layout(model)
stats_collector.register(self.name, activation_statistics_layout, self._sampler)
self._stats_collector = stats_collector
def get_activations_statistics_layout(self, model):
node_pairs_list = self.find_node_pairs(model)
stats_layout = {}
for node_pair_data in node_pairs_list:
node_in, *_ = node_pair_data
# Step over bias Add node
if nu.get_bias_for_node(node_in):
node_in = nu.get_node_output(node_in, 0)[0]
name = node_in.name
stats_layout[name] = {'channel_range_min': TensorStatistic(asf.quantile_per_channel, q=1e-4),
'channel_range_max': TensorStatistic(asf.quantile_per_channel, q=1-1e-4)}
logger.debug('Collecting output statistics for nodes {}'.format(stats_layout.keys()))
return stats_layout
def align_ranges(self, node_in, weight_in, bias_in, weight_out, bias_out, stats):
# Step over bias Add node
if nu.get_bias_for_node(node_in):
node_in = nu.get_node_output(node_in, 0)[0]
name = node_in.name
amin = stats[name]['channel_range_min']
amax = stats[name]['channel_range_max']
ascale, amean = amax - amin, (amin + amax) * 0.5
self.align_means(amean, bias_in, bias_out, weight_out)
self.align_scales(ascale, bias_in, weight_in, weight_out)
@staticmethod
def align_scales(ascale, bias_in, weight_in, weight_out):
if np.all(ascale <= 0):
return
scale_factor = ascale / np.median(ascale[ascale > 0])
scale_factor[ascale <= 0] = 1
scale_factor = np.clip(scale_factor, 1e-2, 1e2)
# scale producer convolution weights
weight_in_value = nu.get_node_value(weight_in)
weight_in_type = weight_in_value.dtype
weight_in_shape = weight_in_value.shape
if weight_in_shape[0] == scale_factor.shape[0]:
scale_in_shape = np.ones(len(weight_in_shape), dtype=np.int)
scale_in_shape[0] = scale_factor.shape[0]
weight_in_value = weight_in_value / scale_factor.reshape(scale_in_shape)
nu.set_node_value(weight_in, weight_in_value.astype(weight_in_type))
if bias_in is not None:
# set new bias
old_bias_in_val = nu.get_node_value(bias_in)
old_bias_in_type = old_bias_in_val.dtype
bias_in_value = old_bias_in_val / scale_factor.reshape(old_bias_in_val.shape)
nu.set_node_value(bias_in, bias_in_value.astype(old_bias_in_type))
weight_out_value = nu.get_node_value(weight_out)
weight_out_type = weight_out_value.dtype
scale_out_shape = np.ones(len(weight_out_value.shape), dtype=np.int)
scale_out_shape[1] = scale_factor.shape[0]
weight_out_value = weight_out_value * scale_factor.reshape(scale_out_shape)
nu.set_node_value(weight_out, weight_out_value.astype(weight_out_type))
@staticmethod
def align_means(amean, bias_in, bias_out, weight_out):
# shift biases to get 0 mean activations in each channel
if bias_in and bias_out:
old_bias_in_value = nu.get_node_value(bias_in)
old_bias_in_type = old_bias_in_value.dtype
bias_in_value = old_bias_in_value - amean.reshape(old_bias_in_value.shape)
nu.set_node_value(bias_in, bias_in_value.astype(old_bias_in_type))
weight_out_value = nu.get_node_value(weight_out)
weight_dims = len(weight_out_value.shape)
if weight_dims > 2:
weight_out_value = np.sum(weight_out_value,
axis=tuple(range(2, weight_dims)))
shift = weight_out_value.dot(amean)
del weight_out_value
old_bias_out_value = nu.get_node_value(bias_out)
old_bias_out_type = old_bias_out_value.dtype
bias_out_value = old_bias_out_value + shift.reshape(old_bias_out_value.shape)
nu.set_node_value(bias_out, bias_out_value.astype(old_bias_out_type))
def find_node_pairs(self, model):
node_pairs_list = []
nodes = sorted([(n.name, n) for n in mu.get_nodes_by_type(model, ['Convolution'])])
for _, node_out in nodes:
if not self.check_conv_node(node_out):
continue
node_in = nu.get_node_input(node_out, 0)
# Conv -> Add -> Conv
if node_in.type == 'Add':
node_in = nu.get_node_input(node_in, 0)
if not self.check_producer_node(node_in, node_out):
continue
bias_in, weights_in = self.get_producer_weights(node_in)
bias_out, weights_out = self.get_consumer_weights(node_out)
if not weights_in or not weights_out:
continue
# node_in -> node_out
node_pairs_list.append((node_in, weights_in, bias_in,
node_out, weights_out, bias_out))
# align activations channels inside this sequence
logger.debug('{} -> {}'.format(node_in.name, node_out.name))
return node_pairs_list
@staticmethod
def check_conv_node(node_out):
if node_out.has_valid('group') and node_out.group != 1:
return False
if not node_out.has_valid('pads_begin') or not node_out.has_valid('pads_end') or \
not np.all(np.array(node_out.pads_begin) == 0) or not np.all(np.array(node_out.pads_end) == 0):
logger.debug('Pad of {} Convolution node != 0 '
'Do not align activations for this node pair.'.format(node_out.name))
return False
if not node_out.has_valid('strides') or not np.all(np.array(node_out.strides) == 1):
logger.debug('Strides of {} Convolution node != 1 '
'Do not align activations for this node pair.'.format(node_out.name))
return False
if not node_out.has_valid('strides') or not np.all(np.array(node_out.dilations) == 1):
logger.debug('Dilation of {} Convolution node != 1 '
'Do not align activations for this node pair.'.format(node_out.name))
return False
return True
@staticmethod
def check_producer_node(node_in, node_out):
# look into producer. skip linear operations and check that
# it is convolutions with single fq consumer
node_out_producer_port = node_out.in_port(0).get_source()
if len(node_out_producer_port.get_destinations()) > 1:
logger.debug('{} has a producer that feeds many nodes. '
'Do not align activations for this node pair.'.format(node_out.name))
return False
# check that producer is convolution
if node_in.type not in ['Convolution', 'MatMul']:
logger.debug('{} gets data from {} {}'.format(node_out.name, node_in.name, node_in.type))
logger.debug('{} has no Convolution producer. '
'Do not align activations for this node pair.'.format(node_out.name))
return False
return True
@staticmethod
def get_producer_weights(node_in):
# get producer convolution weights
w_in = nu.get_weights_for_node(node_in)
if w_in.type == 'FakeQuantize':
w_in = nu.get_node_input(w_in, 0)
if w_in.type != 'Const':
w_in = None
logger.debug('Node after {} has no Convolution producer with const weights. '
'Do not align activations for this node pair.'.format(node_in))
# get producer convolution bias
b_in = nu.get_bias_for_node(node_in)
if b_in is not None and b_in.type != 'Const':
w_in = None
logger.debug('Node after {} has no convolution producer with const bias. '
'Do not align activations for this node pair.'.format(node_in))
return b_in, w_in
@staticmethod
def get_consumer_weights(node_out):
# get consumer convolution weights
w_out = nu.get_weights_for_node(node_out)
if w_out.type == 'FakeQuantize':
w_out = nu.get_node_input(w_out, 0)
if w_out.type != 'Const':
w_out = None
logger.debug('{} has no const weights. '
'Do not align activations for this node pair.'.format(node_out.name))
# get consumer convolution bias
b_out = nu.get_bias_for_node(node_out)
if b_out is not None and b_out.type != 'Const':
b_out = None
return b_out, w_out
| StarcoderdataPython |
126843 | from vistrails.core.modules.utils import make_modules_dict
try:
# read_numpy requires numpy
import numpy
except ImportError: # pragma: no cover
numpy_modules = []
else:
from read_numpy import _modules as numpy_modules
from read_csv import _modules as csv_modules
from read_excel import _modules as excel_modules
from read_json import _modules as json_modules
_modules = make_modules_dict(numpy_modules, csv_modules, excel_modules,
json_modules,
namespace='read')
| StarcoderdataPython |
1616602 | from treelist import TreeList
a = TreeList([1, 2, 3, 3, 3, 4, 5, 7, 8])
print(f"a[{a.leftmost(lambda x: x > 3)}] > 3")
print(a)
for i in range(1, 8 + 1):
print(f"{i} => {a.bisect_left(i)} .. {a.bisect_right(i)}")
| StarcoderdataPython |
122952 | <reponame>ctoth/platform_utils<filename>platform_utils/clipboard.py
import platform
def set_text_windows(text):
"""
Args:
text:
Returns:
"""
import win32clipboard
import win32con
win32clipboard.OpenClipboard()
try:
win32clipboard.EmptyClipboard()
win32clipboard.SetClipboardText(text, win32con.CF_UNICODETEXT)
finally:
win32clipboard.CloseClipboard()
def set_text_gtk(text):
"""
Args:
text:
Returns:
"""
import gtk
cb = gtk.Clipboard()
cb.set_text(text)
cb.store()
def set_text_osx(text):
"""
Args:
text:
Returns:
"""
scrap = True
try:
import Carbon.Scrap
except ModuleNotFoundError:
scrap = False
if scrap:
Carbon.Scrap.ClearCurrentScrap()
scrap = Carbon.Scrap.GetCurrentScrap()
scrap.PutScrapFlavor("TEXT", 0, text)
else:
try:
text = text.encode()
except AttributeError:
pass
import subprocess
s = subprocess.Popen("pbcopy", stdin=subprocess.PIPE)
s.communicate(text)
def set_text(text):
"""Copies text to the clipboard.
Args:
text:
Returns:
"""
plat = platform.system()
if plat == "Windows":
set_text_windows(text)
elif plat == "Linux":
set_text_gtk(text)
elif plat == "Darwin":
set_text_osx(text)
else:
raise NotImplementedError("Cannot set clipboard text on platform %s" % plat)
copy = set_text
def get_text_windows():
""" """
import win32clipboard
import win32con
win32clipboard.OpenClipboard()
try:
text = win32clipboard.GetClipboardData(win32con.CF_UNICODETEXT)
finally:
win32clipboard.CloseClipboard()
return text
def get_text_osx():
""" """
import subprocess
s = subprocess.Popen("pbpaste", stdout=subprocess.PIPE)
result = s.communicate()[0]
try:
result = result.decode()
except UnicodeDecodeError:
pass
return result
def get_text():
""" """
plat = platform.system()
if plat == "Windows":
return get_text_windows()
elif plat == "Darwin":
return get_text_osx()
else:
raise NotImplementedError(
"Cannot get text from clipboard on platform %s" % plat
)
| StarcoderdataPython |
147559 | from typing import List, Tuple
import helper
import inputHelper
from puzzleBase import PuzzleBase
CardList = List[int]
Const_Player1: str = "Player 1"
Const_Player2: str = "Player 2"
class InputData:
expectedAnswer: int
player1Deck: CardList
player2Deck: CardList
def __init__(self, name: str, part: int) -> None:
day = 22
decks = {}
playerblocks = inputHelper.load_file(day, name).split("\n\n")
for player in playerblocks:
bits = player.splitlines()
key = bits[0].replace(":", "")
decks[key] = [int(x) for x in bits[1:]]
self.player1Deck = decks[Const_Player1]
self.player2Deck = decks[Const_Player2]
answer = inputHelper.load_file(day, f"{name}-answer{part}")
self.expectedAnswer = int(answer) if answer is not None else None
class Puzzle(PuzzleBase):
nextGame: int = 1
def play_combat(self, p1Deck: CardList, p2Deck: CardList) -> CardList:
round = 0
while len(p1Deck) > 0 and len(p2Deck) > 0:
round += 1
helper.dprint(f"-- Round {round} --")
helper.dprint(f"{Const_Player1}'s deck: " + ", ".join(str(x) for x in p1Deck))
helper.dprint(f"{Const_Player2}'s deck: " + ", ".join(str(x) for x in p2Deck))
p1Card = p1Deck.pop(0)
p2Card = p2Deck.pop(0)
helper.dprint(f"{Const_Player1} plays {p1Card}")
helper.dprint(f"{Const_Player2} plays {p2Card}")
if p1Card > p2Card:
helper.dprint(f"{Const_Player1} wins the round!")
# add_to_deck(p1Deck, p1Card, p2Card)
p1Deck.append(p1Card)
p1Deck.append(p2Card)
else:
helper.dprint(f"{Const_Player2} wins the round!")
# add_to_deck(p2Deck, p2Card, p1Card)
p2Deck.append(p2Card)
p2Deck.append(p1Card)
helper.dprint("")
helper.dprint("== Post-game results ==")
helper.dprint(f"{Const_Player1}'s deck: " + ", ".join(str(x) for x in p1Deck))
helper.dprint(f"{Const_Player2}'s deck: " + ", ".join(str(x) for x in p2Deck))
return p1Deck if len(p1Deck) > 0 else p2Deck
def add_to_history(self, game: int, p1Deck: CardList, p2Deck: CardList, history: List[str]) -> bool:
p1DeckStr = ",".join(str(x) for x in p1Deck)
p2DeckStr = ",".join(str(x) for x in p2Deck)
state = f"Game {game}|{Const_Player1}:{p1DeckStr}|{Const_Player2}:{p2DeckStr}"
if state in history:
return False
history.append(state)
return True
def play_recursive_combat(self, p1Deck: CardList, p2Deck: CardList, history: List[str]) -> Tuple[str, CardList]:
game = self.nextGame
self.nextGame += 1
if game == 5:
helper.dprint("")
helper.dprint(f"=== Game {game} ===")
helper.dprint("")
round = 0
winner = ""
winningDeck: CardList = []
while len(p1Deck) > 0 and len(p2Deck) > 0:
round += 1
if not self.add_to_history(game, p1Deck, p2Deck, history):
helper.dprint(f"{Const_Player1} wins round {round} of game {game}!")
return Const_Player1, p1Deck
helper.dprint(f"-- Round {round} (Game {game}) --")
helper.dprint(f"{Const_Player1}'s deck: " + ", ".join(str(x) for x in p1Deck))
helper.dprint(f"{Const_Player2}'s deck: " + ", ".join(str(x) for x in p2Deck))
p1Card = p1Deck.pop(0)
p2Card = p2Deck.pop(0)
helper.dprint(f"{Const_Player1} plays {p1Card}")
helper.dprint(f"{Const_Player2} plays {p2Card}")
if p1Card <= len(p1Deck) and p2Card <= len(p2Deck):
helper.dprint("Playing a sub-game to determine the winner...")
helper.dprint("")
p1SubDeck = p1Deck[:p1Card]
p2SubDeck = p2Deck[:p2Card]
winner, winningDeck = self.play_recursive_combat(p1SubDeck, p2SubDeck, history)
else:
winner = Const_Player1 if p1Card > p2Card else Const_Player2
helper.dprint(f"{winner} wins round {round} of game {game}!")
if winner == Const_Player1:
p1Deck.append(p1Card)
p1Deck.append(p2Card)
else:
p2Deck.append(p2Card)
p2Deck.append(p1Card)
helper.dprint("")
if game == 1:
helper.dprint("== Post-game results ==")
helper.dprint(f"{Const_Player1}'s deck: " + ", ".join(str(x) for x in p1Deck))
helper.dprint(f"{Const_Player2}'s deck: " + ", ".join(str(x) for x in p2Deck))
else:
helper.dprint(f"The winner of game {game} is {winner}!")
return (Const_Player1, p1Deck) if winner == Const_Player1 else (Const_Player2, p2Deck)
def calculate_score(self, deck: CardList, multiplier: int) -> int:
if len(deck) == 0:
return 0
card = deck.pop(len(deck) - 1)
result = (card * multiplier) + self.calculate_score(deck, multiplier + 1)
return result
def run_part1(self, data: InputData) -> str:
winnerDeck = self.play_combat(data.player1Deck, data.player2Deck)
result = self.calculate_score(winnerDeck, 1)
return helper.validate_result("What is the winning player's score?", result, data.expectedAnswer)
def run_part2(self, data: InputData) -> str:
history: List[str] = []
winner, winnerDeck = self.play_recursive_combat(data.player1Deck, data.player2Deck, history)
result = self.calculate_score(winnerDeck, 1)
return helper.validate_result("What is the winning player's score?", result, data.expectedAnswer)
def solve(self):
print("Day 22: Crab Combat")
print("")
self.run_example(lambda: "P1 Ex1) " + self.run_part1(InputData('example1', 1)))
self.run_problem(lambda: "Part 1) " + self.run_part1(InputData('input', 1)))
print("")
self.run_example(lambda: "P2 Ex1) " + self.run_part2(InputData('example1', 2)))
self.run_example(lambda: "P2 Ex2) " + self.run_part2(InputData('example2', 2)))
self.run_problem(lambda: "Part 2) " + self.run_part2(InputData('input', 2))) | StarcoderdataPython |
1717837 | from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, in_data, out_attributes, user_options, num_cores,
out_filename):
import itertools
from genomicode import SimpleVariantMatrix
from genomicode import AnnotationMatrix
from Betsy import module_utils as mlib
summary_file = in_data.identifier
metadata = {}
#x = mlib.get_user_option(
# user_options, "nonsynonymous_and_stopgain_only",
# allowed_values=["no", "yes"])
#nonsynonymous_and_stopgain_only = (x == "yes")
min_alt_reads = mlib.get_user_option(
user_options, "filter_by_min_alt_reads", not_empty=True,
type=int)
assert min_alt_reads >= 0 and min_alt_reads < 10000
min_total_reads = mlib.get_user_option(
user_options, "filter_by_min_total_reads", not_empty=True,
type=int)
assert min_total_reads >= 0 and min_total_reads < 10000
min_vaf = mlib.get_user_option(
user_options, "filter_by_min_vaf", not_empty=True,
type=float)
assert min_vaf >= 0.0 and min_vaf < 1.0
#min_gq = mlib.get_user_option(
# user_options, "filter_by_min_GQ", not_empty=True, type=float)
#assert min_gq >= 0 and min_gq < 1000
assert min_total_reads or min_alt_reads, "No filter"
matrix = SimpleVariantMatrix.read_as_am(summary_file)
#var_matrix = SimpleVariantMatrix.read(summary_file)
#call_matrix = var_matrix.call_matrix
#annot_matrix = var_matrix.annot_matrix
#annovar_matrix = None
#for (name, matrix) in var_matrix.named_matrices:
# if "ExonicFunc.refGene" in matrix.headers:
# annovar_matrix = matrix
# break
#assert annovar_matrix, "Missing annotation: ExonicFunc.refGene"
# copy.deepcopy is very slow. Try to avoid it.
# Strategy:
# 1. Make a list of the changes to be made.
# 2. Save the filtered rows.
# 3. Make the changes.
# 4. Save the non-filtered rows.
I_remove = {} # i -> 1
call_remove = {} # i -> (sample, caller) -> 1
#CHROM = matrix.header2annots["______Chrom"]
#POS = matrix.header2annots["______Pos"]
#POS = [int(x) for x in POS]
#REF = matrix.header2annots["______Ref"]
#ALT = matrix.header2annots["______Alt"]
# Optimization: normalize the headers for the samples and callers.
sc2header = {} # (sample, caller) -> header_h
for sc in itertools.product(matrix.samples, matrix.callers):
sample, caller = sc
header = "%s___%s___Ref/Alt/VAF" % (sample, caller)
header_h = matrix.normalize_header(header)
assert header_h
sc2header[sc] = header_h
for i in range(matrix.num_annots()):
has_calls = False # whether this row has any calls.
for sc in itertools.product(matrix.samples, matrix.callers):
sample, caller = sc
header_h = sc2header[sc]
call_str = matrix.header2annots[header_h][i]
if not call_str:
continue
call = SimpleVariantMatrix._parse_call(call_str)
filt = False
# filter_by_min_alt_reads
if min_alt_reads > 0 and \
(call.num_alt is None or call.num_alt < min_alt_reads):
filt = True
# filter_by_min_total_reads
if min_total_reads > 0 and (
call.total is None or call.total < min_total_reads):
filt = True
# filter_by_min_vaf
if min_vaf >= 1E-6 and (
call.vaf is None or call.vaf < min_vaf):
filt = True
if filt:
if i not in call_remove:
call_remove[i] = {}
call_remove[i][sc] = 1
else:
has_calls = True
# If this coordinate has no more calls, then remove the
# whole row.
if not has_calls:
I_remove[i] = 1
I_remove = sorted(I_remove)
# Write out a matrix of the discarded rows.
filtered_matrix = AnnotationMatrix.rowslice(matrix, I_remove)
SimpleVariantMatrix.write_from_am("discarded.txt", filtered_matrix)
# Remove the calls.
for i in call_remove:
for sc in call_remove[i]:
header_h = sc2header[sc]
call_str = matrix.header2annots[header_h][i]
assert call_str
matrix.header2annots[header_h][i] = ""
# Which rows to keep.
I_remove_dict = {}.fromkeys(I_remove)
I_keep = [
i for i in range(matrix.num_annots()) if i not in I_remove_dict]
filtered_matrix = AnnotationMatrix.rowslice(matrix, I_keep)
SimpleVariantMatrix.write_from_am(out_filename, filtered_matrix)
## ## Filter out synonymous variants.
## #if nonsynonymous_and_stopgain_only:
## # # Make sure annotated with Annovar.
## # assert "ExonicFunc.refGene" in annovar_matrix.headers
## # exonic_func = annovar_matrix["ExonicFunc.refGene"]
## # for i, efunc in enumerate(exonic_func):
## # efunc = exonic_func[i]
## # assert efunc in [
## # "", "nonsynonymous SNV", "synonymous SNV",
## # "stopgain", "stoploss",
## # "frameshift substitution", "nonframeshift substitution",
## # "unknown"], \
## # "Unknown exonic_func: %s" % efunc
## # if efunc not in ["nonsynonymous SNV", "stopgain"]:
## # I_remove[i] = 1
## # continue
## # Filter based on the calls.
## if min_alt_reads > 0 or min_total_reads > 0:
## all_coord = call_matrix.coord2samplecaller2call.keys()
## for coord in all_coord:
## all_sc = call_matrix.coord2samplecaller2call[coord].keys()
## for sc in all_sc:
## # SimpleVariantMatrix.Call object.
## call = call_matrix.coord2samplecaller2call[coord][sc]
## # filter_by_min_alt_reads
## if min_alt_reads > 0 and \
## (call.num_alt is None or call.num_alt < min_alt_reads):
## if coord not in call_remove:
## call_remove[coord] = {}
## call_remove[coord][sc] = 1
## # filter_by_min_total_reads
## if min_total_reads > 0 and (
## call.total is None or call.total < min_total_reads):
## if coord not in call_remove:
## call_remove[coord] = {}
## call_remove[coord][sc] = 1
## # Filter based on VAF.
## if min_vaf >= 1E-6:
## all_coord = call_matrix.coord2samplecaller2call.keys()
## for coord in all_coord:
## all_sc = call_matrix.coord2samplecaller2call[coord].keys()
## for sc in all_sc:
## call = call_matrix.coord2samplecaller2call[coord][sc]
## # filter_by_min_vaf
## if call.vaf is None or call.vaf < min_vaf:
## if coord not in call_remove:
## call_remove[coord] = {}
## call_remove[coord][sc] = 1
## # If any of these coordinates have no more variants, then
## # remove the whole row.
## if call_remove:
## chrom, pos = annot_matrix["Chrom"], annot_matrix["Pos"]
## ref, alt = annot_matrix["Ref"], annot_matrix["Alt"]
## pos = [int(x) for x in pos]
## coord2i = {}
## for i, coord in enumerate(zip(chrom, pos, ref, alt)):
## coord2i[coord] = i
## for coord in call_remove:
## num_remove = len(call_remove[coord])
## num_calls = len(call_matrix.coord2samplecaller2call[coord])
## assert num_remove <= num_calls
## if num_remove == num_calls:
## i = coord2i[coord]
## I_remove[i] = 1
## # Make a matrix of the discarded rows.
## old_annot_matrix = var_matrix.annot_matrix
## old_named_matrices = var_matrix.named_matrices
## filtered_matrix = var_matrix
## x = AnnotationMatrix.rowslice(var_matrix.annot_matrix, I_remove)
## filtered_matrix.annot_matrix = x
## named_matrices = []
## for (name, matrix) in var_matrix.named_matrices:
## matrix = AnnotationMatrix.rowslice(matrix, I_remove)
## named_matrices.append((name, matrix))
## filtered_matrix.named_matrices = named_matrices
## SimpleVariantMatrix.write("discarded.txt", filtered_matrix)
## var_matrix.annot_matrix = old_annot_matrix
## var_matrix.named_matrices = old_named_matrices
## # Remove the calls.
## for coord in call_remove:
## chrom, pos, ref, alt = coord
## for (sample, caller) in call_remove[coord]:
## var_matrix.call_matrix.set_call(
## chrom, pos, ref, alt, sample, caller, None)
## # Which rows to keep.
## I_keep = [
## i for i in range(var_matrix.num_variants()) if i not in I_remove]
## # Filter annotation matrix
## var_matrix.annot_matrix = AnnotationMatrix.rowslice(
## var_matrix.annot_matrix, I_keep)
## # Filter named matrices.
## for i, (name, matrix) in enumerate(var_matrix.named_matrices):
## matrix = AnnotationMatrix.rowslice(matrix, I_keep)
## var_matrix.named_matrices[i] = (name, matrix)
## SimpleVariantMatrix.write(out_filename, var_matrix)
return metadata
def name_outfile(self, antecedents, user_options):
return "calls.txt"
| StarcoderdataPython |
93242 | from abc import ABC, abstractmethod
from copy import copy
from typing import Any, Optional
import numpy as np
from gym.spaces import Space
from gym.utils import seeding
class Operator(ABC):
# Set these in ALL subclasses
suboperators: tuple = tuple()
grid_dependant: Optional[bool] = None
action_dependant: Optional[bool] = None
context_dependant: Optional[bool] = None
deterministic: Optional[bool] = None
@abstractmethod
def __init__(
self,
grid_space: Optional[Space] = None,
action_space: Optional[Space] = None,
context_space: Optional[Space] = None,
) -> None:
# fmt: off
self.grid_space = grid_space
self.action_space = action_space
self.context_space = context_space
# fmt: on
self.seed()
@abstractmethod
def update(
self, grid: np.ndarray, action: Any, context: Any
) -> tuple[np.ndarray, Any]:
"""Update a Cellular Automaton's Lattice (Grid) by using a provided action and context.
Parameters
----------
grid : array-like
Cellular Automaton lattice.
action : object
Action influencing the operator output.
context : object
Extra information.
Returns
-------
new_grid : array-like
Modified grid.
new_context : object
Modified context.
"""
new_grid = copy(grid)
new_context = copy(context)
return new_grid, new_context
def __call__(self, *args, **kwargs):
return self.update(*args, **kwargs)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
| StarcoderdataPython |
152714 | # -*- coding: utf-8 -*-
# django-read-only-admin
# tests/test_utils.py
from typing import List # pylint: disable=W0611
from django.test import TestCase
from django.test.utils import override_settings
from read_only_admin.utils import (
get_read_only_permission_name,
get_read_only_permission_codename,
)
__all__ = [
"GetReadOnlyPermissionCodenameUtilTest",
"GetReadOnlyPermissionNameUtilTest",
] # type: List[str]
class GetReadOnlyPermissionCodenameUtilTest(TestCase):
"""
get_read_only_permission_codename util tests.
"""
def test_get_read_only_permission_codename(self) -> None:
"""
Util must return model read only permission codename based on read only prefix setting. # noqa: E501
"""
self.assertEqual(
get_read_only_permission_codename(model="user"), "readonly_user"
)
@override_settings(READ_ONLY_ADMIN_PERMISSION_PREFIX="")
def test_get_read_only_permission_codename__without_prefix(self) -> None:
"""
Util must return model read only permission codename based on read only prefix setting with broken prefix settings. # noqa: E501
"""
self.assertEqual(get_read_only_permission_codename(model="user"), "_user")
class GetReadOnlyPermissionNameUtilTest(TestCase):
"""
get_read_only_permission_name util tests.
"""
def test_get_read_only_permission_name(self) -> None:
"""
Util must return model read only permission name based on read only name prefix setting. # noqa: E501
"""
self.assertEqual(get_read_only_permission_name(model="user"), "Read only user")
@override_settings(READ_ONLY_ADMIN_PERMISSION_NAME_PREFIX="")
def test_get_read_only_permission_name__without_prefix(self) -> None:
"""
Util must return model read only permission name based on read only name prefix setting with broken name prefix settings. # noqa: E501
"""
self.assertEqual(get_read_only_permission_name(model="user"), " user")
| StarcoderdataPython |
4841840 | import json
import glob
import pickle
import pandas as pd
import os
HEAD_LEN = 510
def GenerateDatasetFromJson():
'''
return: a list of list of list,
first dimension is each document,
second dimension is doc_list and label_list of each document,
third dimension is number of sentences in a document
'''
fns = glob.glob('./train/*.json')
dataset=[]
for fn in fns:
file=json.load(open(fn,'r'))
title = file['title']
para = file['segmentation']
summa = file['summarization']
doc_list=[]
label_list=[]
for one_key in summa:
for sentence_dict in summa[one_key]['summarization_data']:
doc_list.append(sentence_dict['sent'])
label_list.append(sentence_dict['label'])
dataset.append([doc_list, label_list])
return dataset
def GenerateDatasetFromJson2(dir,doc_num, neg_multiplier):
'''
return: a list of list of list,
first dimension is each document,
second dimension is doc_list and label_list of each document,
third dimension is number of sentences in a document
'''
fns = glob.glob(os.path.join(dir, '*.json'))
cnt=1
sent_list=[]
doc_list = []
label_list=[]
for fn in fns:
file=json.load(open(fn,'r'))
title = file['title']
# para = ' '.join(map(lambda x: ' '.join(x), file['segmentation']))
para = []
word_cnt = 0
for clip in file['segmentation']:
for sentence in clip:
sent_l = len(sentence.split(' '))
if word_cnt + sent_l < HEAD_LEN:
para.append(sentence)
word_cnt += sent_l
para = ' '.join(para)
summa = file['summarization']
for one_key in summa:
for sentence_dict in summa[one_key]['summarization_data']:
sent_list.append(sentence_dict['sent'])
doc_list.append(para)
label_list.append(sentence_dict['label'])
cnt+=1
if cnt>doc_num:
break
df = pd.DataFrame.from_dict({"sents":sent_list, "docs":doc_list, "y":label_list})
pos_df = df[df.y == 1]
neg_df = df[df.y == 0]
print("Negative sample size:", len(neg_df))
print("Positive sample size:", len(pos_df))
sub_neg_df = neg_df.sample(len(pos_df)*neg_multiplier)
balanced_df = pos_df.append(sub_neg_df)
return balanced_df
if __name__=="__main__":
# dataset = GenerateDatasetFromJson()
# pickle.dump(dataset, open('train_data.robin','wb'))
# dataset = pickle.load(open('train_data.robin','rb'))
# print(len(dataset))
dataset_df = GenerateDatasetFromJson2('./train', 500, 2)
dataset_df.to_json("train_data.json")
dataset_df = GenerateDatasetFromJson2('./test', 100, 2)
dataset_df.to_json("test_data.json")
| StarcoderdataPython |
1792360 | # @Author: Ivan
# @Time: 2020/11/16
import os
import time
import argparse
import torch
from torch import nn
import torch.optim as optim
import torch.nn.functional as F
from torchvision import utils
import matplotlib.pyplot as plt
from utils.datasets import create_dataloader
from utils.util import parse_cfg
from models import build_model
from torchviz import make_dot
def train(model, train_loader, optimizer, epoch, device, train_loss_lst, train_acc_lst):
model.train() # Set the module in training mode
train_loss = 0
correct = 0
for batch_idx, (inputs, labels) in enumerate(train_loader):
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
# foward propagation
outputs = model(inputs)
pred = outputs.max(1, keepdim=True)[1]
correct += pred.eq(labels.view_as(pred)).sum().item()
# back propagation
criterion = nn.CrossEntropyLoss()
loss = criterion(outputs, labels)
train_loss += loss.item()
# loss = F.nll_loss(outputs, labels) # negative log likelihood loss
loss.backward()
optimizer.step()
# show batch0 dataset
if batch_idx == 0 and epoch == 0:
fig = plt.figure()
inputs = inputs.cpu() # convert to cpu
grid = utils.make_grid(inputs)
plt.imshow(grid.numpy().transpose((1, 2, 0)))
plt.show()
# print loss and accuracy
if (batch_idx+1) % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.1f}%)] Loss: {:.6f}'
.format(epoch, batch_idx * len(inputs), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
train_loss /= len(train_loader.dataset)
# record loss and acc
train_loss_lst.append(train_loss)
train_acc_lst.append(correct / len(train_loader.dataset))
return train_loss_lst, train_acc_lst
def validate(model, val_loader, device, val_loss_lst, val_acc_lst):
model.eval() # Sets the module in evaluation mode
val_loss = 0
correct = 0
# no need to calculate gradients
with torch.no_grad():
for data, target in val_loader:
data, target = data.to(device), target.to(device)
output = model(data)
# add one batch loss
criterion = nn.CrossEntropyLoss()
val_loss += criterion(output, target).item()
# val_loss += F.nll_loss(output, target, reduction='sum').item()
# find index of max prob
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
val_loss /= len(val_loader.dataset)
print('\nVal set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'
.format(val_loss, correct, len(val_loader.dataset),
100. * correct / len(val_loader.dataset)))
# record loss and acc
val_loss_lst.append(val_loss)
val_acc_lst.append(correct / len(val_loader.dataset))
return val_loss_lst, val_acc_lst
def test(model, test_loader, device):
model.eval() # Sets the module in evaluation mode
test_loss = 0
correct = 0
# no need to calculate gradients
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
# add one batch loss
criterion = nn.CrossEntropyLoss()
test_loss += criterion(output, target).item()
# test_loss += F.nll_loss(output, target, reduction='sum').item()
# find index of max prob
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
# record loss and acc
test_loss /= len(test_loader.dataset)
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'
.format(test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def arg_parse():
"""
Parse arguements to the detect module
"""
parser = argparse.ArgumentParser(description='Food Recognition System')
parser.add_argument("--cfg", "-c", dest='cfg', default="cfg/frs.cfg",
help="Your config file path", type=str)
parser.add_argument("--weights", "-w", dest='weights', default="",
help="Path of pretrained weights", type=str)
parser.add_argument("--output", "-o", dest='output', default="output",
help="Path of output files", type=str)
parser.add_argument("--epochs", "-e", dest='epochs', default=200,
help="Training epochs", type=int)
parser.add_argument("--lr", "-lr", dest='lr', default=0.005,
help="Training learning rate", type=float)
parser.add_argument("--batch_size", "-b", dest='batch_size', default=32,
help="Training batch size", type=int)
parser.add_argument("--input_size", "-i", dest='input_size', default=224,
help="Image input size", type=int)
parser.add_argument("--save_freq", "-s", dest='save_freq', default=10,
help="Frequency of saving model", type=int)
return parser.parse_args()
if __name__ == "__main__":
args = arg_parse()
weight_path, cfg_path, output_path = args.weights, args.cfg, args.output
epochs, lr, batch_size, input_size, save_freq = args.epochs, args.lr, args.batch_size, args.input_size, args.save_freq
# load configs from config
cfg = parse_cfg(cfg_path)
print('Config:', cfg)
dataset_path, nb_class = cfg['dataset'], int(cfg['nb_class'])
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# load datasets
train_loader, val_loader, test_loader = create_dataloader(
'IMAGE_FOLDER', dataset_path, batch_size, input_size)
# load model
model = build_model(weight_path, cfg).to(device)
print('Model successfully loaded!')
# plot model structure
# graph = make_dot(model(torch.rand(1, 3, input_size, input_size).cuda()),
# params=dict(model.named_parameters()))
# graph.render('model_structure', './', cleanup=True, format='png')
# optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
optimizer = optim.Adam(model.parameters(), lr=lr)
# create output file folder
start = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
os.makedirs(os.path.join(output_path, start))
# loss and accuracy list
train_loss_lst, val_loss_lst = [], []
train_acc_lst, val_acc_lst = [], []
# train
for epoch in range(epochs):
train_loss_lst, train_acc_lst = train(model, train_loader, optimizer,
epoch, device, train_loss_lst, train_acc_lst)
val_loss_lst, val_acc_lst = validate(
model, val_loader, device, val_loss_lst, val_acc_lst)
# save model weights every save_freq epoch
if epoch % save_freq == 0:
torch.save(model.state_dict(), os.path.join(
output_path, start, 'epoch'+str(epoch)+'.pth'))
test(model, test_loader, device)
# plot loss and accuracy, save params change
fig = plt.figure()
plt.plot(range(epochs), train_loss_lst, 'g', label='train loss')
plt.plot(range(epochs), val_loss_lst, 'k', label='val loss')
plt.plot(range(epochs), train_acc_lst, 'r', label='train acc')
plt.plot(range(epochs), val_acc_lst, 'b', label='val acc')
plt.grid(True)
plt.xlabel('epoch')
plt.ylabel('acc-loss')
plt.legend(loc="upper right")
now = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
plt.savefig(os.path.join(output_path, start, now + '.jpg'))
plt.show()
# save model
torch.save(model.state_dict(), os.path.join(output_path, start, 'last.pth'))
| StarcoderdataPython |
1659220 | import logging
from django.conf import settings
from django.db import models
import sendgrid
from pokewatch.pokedex.models import Pokemon
logger = logging.getLogger(__name__)
class Place(models.Model):
label = models.CharField(unique=True, max_length=255)
latitude = models.DecimalField(max_digits=17, decimal_places=14)
longitude = models.DecimalField(max_digits=17, decimal_places=14)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta(object):
ordering = ['label']
unique_together = ('latitude', 'longitude')
index_together = ('latitude', 'longitude')
def __str__(self):
return self.label
class Trainer(models.Model):
name = models.CharField(unique=True, max_length=255)
email = models.EmailField(unique=True)
places = models.ManyToManyField(Place, related_name='trainers')
pokemon = models.ManyToManyField(Pokemon, related_name='trainers')
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta(object):
ordering = ['name']
def __str__(self):
return self.name
def notify(self, place, pokemon, sendgrid_client):
logger.info(
'Notifying %s of Pokemon nearby %s: %s.',
self.name,
place.label,
', '.join([p.name for p in pokemon]),
)
lines = []
for p in pokemon:
minutes, seconds = divmod(p.expires_in().seconds, 60)
line = (
'A wild {name} is nearby! '
'It\'ll be around for {minutes} minutes and {seconds} seconds. '
'Find it on the map at {link}.'
).format(
name=p.name,
minutes=minutes,
seconds=seconds,
link=p.map_link(),
)
lines.append(line)
body = '\n\n'.join(lines)
message = sendgrid.Mail(
to=self.email,
from_email=settings.FROM_EMAIL,
subject='Pokemon near {}!'.format(place.label),
text=body,
)
status, msg = sendgrid_client.send(message)
log_msg = 'SendGrid returned {status}: {msg}.'.format(status=status, msg=msg)
logger.info(log_msg) if status == 200 else logger.error(log_msg)
| StarcoderdataPython |
1711136 | <gh_stars>1-10
import random
import os
#file=open('jio-num.txt','w')
#this is very simple code, you can change this according
#to your need but the change should comited to this repo.
df=[]
#first == '6296'
for i in range(1001):
x='+916297'+'%s'%random.randint(100000,999999)
y='+916296'+'%s'%random.randint(100000,999999)
z='+917029'+'%s'%random.randint(100000,999999)
p='+919641'+'%s'%random.randint(100000,999999)
#q='+91'+'%s'%random.randint(100000,999999)
df.append(x)
df.append(y)
df.append(z)
df.append(p)
#print(len(df))
r=list(set(df))
print('len of main num',len(r))
#print(r)
for i in range(len(r)):
print("sending message to",r[i])
os.system("termux-sms-send -n %s %s"%(list(r)[i],"Grow your Business now! or make some money online its very easy with webnima team find us on https://webxm.ml/business/ or https://webxm.rf.gd/ share this message to everyone to help them to Grow their business"))
print("message sent")
#file.close()
| StarcoderdataPython |
103105 | from soccer_geometry.transformation import Transformation
from soccer_geometry.camera import Camera
| StarcoderdataPython |
184591 | import typing
import pytest
from energuide import bilingual
from energuide import element
from energuide.embedded import code
from energuide.embedded import distance
from energuide.embedded import insulation
from energuide.embedded import window
from energuide.exceptions import InvalidEmbeddedDataTypeError
@pytest.fixture
def raw_sample() -> element.Element:
doc = """
<Window>
<Label>East0001</Label>
<Construction>
<Type idref='Code 11' rValue='0.4779'>234002</Type>
</Construction>
<Measurements width='1967.738' height='1322.0699' />
</Window>
"""
return element.Element.from_string(doc)
BAD_XML_DATA = [
# This XML block is missing the <Label> tag
"""
<Window>
<Construction>
<Type rValue='0.4779'>User specified</Type>
</Construction>
<Measurements width='1967.738' height='1322.0699' />
</Window>
""",
# This XML block has non-numeric strings as attribute values
"""
<Window>
<Label>East0001</Label>
<Construction>
<Type rValue='bad'>User specified</Type>
</Construction>
<Measurements width='data' height='here' />
</Window>
""",
# This XML block is missing the attributes of the <Measurements> tag
"""
<Window>
<Label>East0001</Label>
<Construction>
<Type>User specified</Type>
</Construction>
<Measurements />
</Window>
"""
]
@pytest.fixture
def sample_window_code() -> typing.Dict[str, code.WindowCode]:
return {'Code 11': code.WindowCode(
identifier='Code 11',
label='202002',
tags={
code.WindowCodeTag.GLAZING_TYPE: bilingual.Bilingual(
english='Double/double with 1 coat',
french='Double/double, 1 couche',
),
code.WindowCodeTag.COATING_TINTS: bilingual.Bilingual(english='Clear', french='Transparent'),
code.WindowCodeTag.FILL_TYPE: bilingual.Bilingual(english='6 mm Air', french="6 mm d'air"),
code.WindowCodeTag.SPACER_TYPE: bilingual.Bilingual(english='Metal', french='Métal'),
code.WindowCodeTag.CODE_TYPE: bilingual.Bilingual(english='Picture', french='Fixe'),
code.WindowCodeTag.FRAME_MATERIAL: bilingual.Bilingual(english='Wood', french='Bois'),
}
)}
@pytest.fixture
def sample(sample_window_code: typing.Dict[str, code.WindowCode]) -> window.Window:
return window.Window(
label='East0001',
window_code=sample_window_code['Code 11'],
window_insulation=insulation.Insulation(0.4779),
width=distance.Distance(1.967738),
height=distance.Distance(1.3220699),
)
def test_from_data(raw_sample: element.Element,
sample_window_code: typing.Dict[str, code.WindowCode],
sample: window.Window) -> None:
output = window.Window.from_data(raw_sample, sample_window_code)
assert output == sample
@pytest.mark.parametrize("bad_xml", BAD_XML_DATA)
def test_bad_data(bad_xml: str) -> None:
window_node = element.Element.from_string(bad_xml)
with pytest.raises(InvalidEmbeddedDataTypeError) as excinfo:
window.Window.from_data(window_node, {})
assert excinfo.value.data_class == window.Window
def test_from_data_missing_codes() -> None:
doc = """
<Window>
<Label>East0001</Label>
<Construction>
<Type rValue='0.4779' />
</Construction>
<Measurements width='1967.738' height='1322.0699' />
</Window>
"""
sample = element.Element.from_string(doc)
output = window.Window.from_data(sample, {})
assert output.label == 'East0001'
assert output.window_code is None
assert output.to_dict()['glazingTypesEnglish'] is None
def test_to_dict(sample: window.Window) -> None:
output = sample.to_dict()
assert output == {
'label': 'East0001',
'insulationRsi': 0.4779,
'insulationR': pytest.approx(2.713642),
'glazingTypesEnglish': 'Double/double with 1 coat',
'glazingTypesFrench': 'Double/double, 1 couche',
'coatingsTintsEnglish': 'Clear',
'coatingsTintsFrench': 'Transparent',
'fillTypeEnglish': '6 mm Air',
'fillTypeFrench': "6 mm d'air",
'spacerTypeEnglish': 'Metal',
'spacerTypeFrench': 'Métal',
'typeEnglish': 'Picture',
'typeFrench': 'Fixe',
'frameMaterialEnglish': 'Wood',
'frameMaterialFrench': 'Bois',
'areaMetres': pytest.approx(2.601487),
'areaFeet': pytest.approx(28.002176),
'widthMetres': 1.967738,
'widthFeet': pytest.approx(6.455833),
'heightMetres': 1.3220699,
'heightFeet': pytest.approx(4.337499),
}
| StarcoderdataPython |
3357232 | <gh_stars>0
# Polycarpus works as a DJ in the best Berland nightclub,
# and he often uses dubstep music in his performance.
# Recently, he has decided to take a couple of old songs and make dubstep remixes from them.
# Let's assume that a song consists of some number of words (that don't contain WUB).
# To make the dubstep remix of this song,
# Polycarpus inserts a certain number of words "WUB" before the first word of the song
# (the number may be zero), after the last word (the number may be zero),
# and between words (at least one between any pair of neighbouring words),
# and then the boy glues together all the words, including "WUB",
# in one string and plays the song at the club.
# For example,
# a song with words "I AM X" can transform into a dubstep remix as
# WUBWUBIWUBAMWUBWUBX" and cannot transform into "WUBWUBIAMWUBX".
# Recently, Jonny has heard Polycarpus's new dubstep track,
# but since he isn't into modern music,
# he decided to find out what was the initial song that Polycarpus remixed.
# Help Jonny restore the original song.
# My Solution
def song_decoder(song):
decoded = song.replace("WUB", " ")
decoded = " ".join(decoded.split())
return decoded
# Best Solution
def song_decoder(song):
return " ".join(song.replace('WUB', ' ').split()) | StarcoderdataPython |
199837 | <reponame>penghou620/airflow
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from airflow.contrib.operators.grpc_operator import GrpcOperator
from tests.compat import mock
class StubClass:
def __init__(self, channel):
pass
def stream_call(self, data):
pass
class TestGrpcOperator(unittest.TestCase):
def custom_conn_func(self, connection):
pass
@mock.patch('airflow.contrib.operators.grpc_operator.GrpcHook')
def test_with_interceptors(self, mock_hook):
operator = GrpcOperator(
stub_class=StubClass,
call_func="stream_call",
interceptors=[],
task_id="test_grpc",
)
operator.execute({})
mock_hook.assert_called_once_with("grpc_default", interceptors=[], custom_connection_func=None)
@mock.patch('airflow.contrib.operators.grpc_operator.GrpcHook')
def test_with_custom_connection_func(self, mock_hook):
operator = GrpcOperator(
stub_class=StubClass,
call_func="stream_call",
custom_connection_func=self.custom_conn_func,
task_id="test_grpc",
)
operator.execute({})
mock_hook.assert_called_once_with(
"grpc_default", interceptors=None, custom_connection_func=self.custom_conn_func)
@mock.patch('airflow.contrib.operators.grpc_operator.GrpcHook')
def test_execute_with_log(self, mock_hook):
mocked_hook = mock.Mock()
mock_hook.return_value = mocked_hook
mocked_hook.configure_mock(**{'run.return_value': ["value1", "value2"]})
operator = GrpcOperator(
stub_class=StubClass,
call_func="stream_call",
log_response=True,
task_id="test_grpc",
)
with mock.patch.object(operator.log, 'info') as mock_info:
operator.execute({})
mock_hook.assert_called_once_with("grpc_default", interceptors=None, custom_connection_func=None)
mocked_hook.run.assert_called_once_with(StubClass, "stream_call", data={}, streaming=False)
mock_info.assert_any_call("Calling gRPC service")
mock_info.assert_any_call("'value1'")
mock_info.assert_any_call("'value2'")
@mock.patch('airflow.contrib.operators.grpc_operator.GrpcHook')
def test_execute_with_callback(self, mock_hook):
mocked_hook = mock.Mock()
callback = mock.Mock()
mock_hook.return_value = mocked_hook
mocked_hook.configure_mock(**{'run.return_value': ["value1", "value2"]})
operator = GrpcOperator(
stub_class=StubClass,
call_func="stream_call",
task_id="test_grpc",
response_callback=callback
)
with mock.patch.object(operator.log, 'info') as mock_info:
operator.execute({})
mock_hook.assert_called_once_with("grpc_default", interceptors=None, custom_connection_func=None)
mocked_hook.run.assert_called_once_with(StubClass, "stream_call", data={}, streaming=False)
self.assertTrue(("'value1'", "'value2'") not in mock_info.call_args_list)
mock_info.assert_any_call("Calling gRPC service")
callback.assert_any_call("value1", {})
callback.assert_any_call("value2", {})
| StarcoderdataPython |
3302975 | <reponame>Hoter11/WebProject
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-19 10:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_entry_text'),
]
operations = [
migrations.AddField(
model_name='entry',
name='identifier',
field=models.CharField(default=None, max_length=4),
preserve_default=False,
),
]
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.