repo_name
stringlengths
5
92
path
stringlengths
4
232
copies
stringclasses
19 values
size
stringlengths
4
7
content
stringlengths
721
1.04M
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
15
997
alpha_frac
float64
0.25
0.97
autogenerated
bool
1 class
jdvelasq/cashflows
cashflows/utilityfun.py
1
1162
""" Economic utility functions =============================================================================== Overview ------------------------------------------------------------------------------- Functions in this module ------------------------------------------------------------------------------- """ from math import log, exp def exp_utility_fun(r_param): """Exponential utility function. U(x) = 1 - exp(-x / r_param) """ def utility_fun(x, inverse=False): if inverse is False: return (1 - exp( -x / r_param)) return - r_param * log(1 - x) return utility_fun def log_utility_fun(r_param): """Exponential utility function. U(x) = log(x + r_param) """ def utility_fun(x, inverse=False): if inverse is False: return log(x + r_param) return exp(x) - r_param return utility_fun def sqrt_utility_fun(r_param): """Exponential utility function. U(x) = + sqrt(x + r_param) """ def utility_fun(x, inverse=False): if inverse is False: return (x + r_param) ** 0.5 return x * x - r_param return utility_fun
mit
-1,251,830,415,994,589,000
21.784314
79
0.451807
false
dmargala/tpcorr
tpcorr/observation.py
1
17935
## Observation correction model import numpy as np import astropy.time import astropy.coordinates import astropy.units as u import astropy.units.imperial import astropy.units.cds import bossdata import specsim import tpcorr.pointing import tpcorr.guider import tpcorr.acceptance_model class Observation(object): def __init__(self, plate, mjd, guide_wlen=5400 * u.Angstrom, offset_wlen=4000 * u.Angstrom, std_wlen=5400 * u.Angstrom, wlen_grid_steps=15, steps_per_exposure=5, pressure0=None, temperature0=None): print 'Calculating corrections for {} observed on MJD {}'.format(plate, mjd) self.plate = plate self.mjd = mjd self.guide_wlen = guide_wlen self.std_wlen = std_wlen self.offset_wlen = offset_wlen self.steps_per_exposure = steps_per_exposure self.wlen_grid_steps = wlen_grid_steps self.finder = bossdata.path.Finder() self.mirror = bossdata.remote.Manager() # Get the list of exposures used in this observation's coadd from a spec lite file. spec_name = self.finder.get_spec_path(plate, mjd, fiber=1, lite=True) self.spec_file = bossdata.spec.SpecFile(self.mirror.get(spec_name)) # Read the first b1 raw science exposure to find this plate's plug map. raw = self.spec_file.get_raw_image(0, 'blue', finder=self.finder, mirror=self.mirror) plug_map = raw.read_plug_map() # Look up the plate design pointing from the raw header and convert to # an index A,B,C,... -> 0,1,2,... pointing_label = raw.header['POINTING'].strip() pointing_index = ord(pointing_label) - ord('A') # Initialize a pointing object for this plate's sky location. ra_center = float(plug_map['raCen']) * u.deg self.ra_center = ra_center dec_center = float(plug_map['decCen']) * u.deg print 'Plate center is RA={:.3f}, DEC={:.3f} for {}-{}'.format(ra_center, dec_center, plate, pointing_label) self.pointing = tpcorr.pointing.Pointing(ra_center, dec_center) # Find the nominal observing temperature and time that this plate's holes are drilled for. self.design_temp = float(plug_map['temp'])*u.deg_C self.design_pressure = None # Calculate based on elevation and temperature self.design_ha = float(plug_map['ha'].split()[pointing_index]) * u.deg midnight = astropy.time.Time(mjd, format='mjd', scale='tai', location=self.pointing.where) design_time = specsim.transform.adjust_time_to_hour_angle(midnight, ra_center, self.design_ha, max_iterations=100) self.design_tai = design_time.mjd * 86400. print 'Holes drilled for T={:.1f} and HA={:.1f} (TAI={:.1f})'.format(self.design_temp, self.design_ha, self.design_tai) # design_time.mjd # when = astropy.time.Time(tai/86400., format='mjd', scale='tai', location=self.where) self.design_alt = self.pointing.plate_center.transform_to(astropy.coordinates.AltAz( obstime=design_time, location=self.pointing.where)).alt.to(u.deg) # Find this plate's guide stars. plugging = plug_map['PLUGMAPOBJ'] guide_fibers = plugging['holeType'] == 'GUIDE' guide_ra, guide_dec = plugging['ra'][guide_fibers], plugging['dec'][guide_fibers] self.guide_targets = astropy.coordinates.ICRS(guide_ra * u.deg, guide_dec * u.deg) # Calculate the nominal guide fiber positions. self.guide_x0, self.guide_y0, _, _ = self.pointing.transform( self.guide_targets, self.design_tai, guide_wlen, self.design_temp, self.design_pressure) # Find this plate's offset fibers. We have to use spAll for this since the plug map does # not record the design wavelengths. self.plugmap = self.get_plugmap_from_spframes() offset_fibers_mask = self.plugmap['LAMBDA_EFF'] == offset_wlen.to(u.Angstrom).value offset_fibers = self.plugmap[offset_fibers_mask] offset_xfocal = offset_fibers['XFOCAL'] * u.mm offset_yfocal = offset_fibers['YFOCAL'] * u.mm self.fiber_ids = offset_fibers['FIBERID'] self.offset_targets = astropy.coordinates.ICRS(ra=offset_fibers['RA'] * u.deg, dec=offset_fibers['DEC'] * u.deg) self.num_offset_targets = np.count_nonzero(self.offset_targets) print 'Plate has {:d} guide fibers and {:d} offset targets.'.format(len(self.guide_targets), self.num_offset_targets) if self.num_offset_targets > 0: # Calculate the nominal science fiber positions. These will not match XFOCAL, YFOCAL # exactly since we do not exactly replicate the IDL transforms, but they should be # close (within ~0.2 arcsec) and we only use offsets calculated consistently with # transform() in the following. self.offset_x0, self.offset_y0, offset_alt, offset_az = self.pointing.transform( self.offset_targets, self.design_tai, offset_wlen, self.design_temp, self.design_pressure) # Calculate where the offset target fibers would have been positioned if they were # designed for the same wavelength as the standard stars. self.offset_x0_std, self.offset_y0_std, _, _ = self.pointing.transform( self.offset_targets, self.design_tai, std_wlen, self.design_temp, self.design_pressure) # Initialize the wavelength grid to use for calculating corrections. self.wlen_grid = np.linspace(3500., 10500., wlen_grid_steps)[:, np.newaxis] * u.Angstrom # Initialize guided target centroid list self.guided_centroids = [] # Initialize exposure meta data lists self.seeing = np.empty((self.spec_file.num_exposures)) * u.arcsec self.ha = np.empty((self.spec_file.num_exposures)) * u.degree self.pressure = np.empty((self.spec_file.num_exposures)) * u.kPa self.temperature = np.empty((self.spec_file.num_exposures)) * u.deg_C self.tai_beg = np.empty((self.spec_file.num_exposures)) # seconds self.tai_end = np.empty((self.spec_file.num_exposures)) # seconds self.alt = np.empty((self.spec_file.num_exposures)) * u.degree self.init_exposure_meta(pressure=pressure0, temperature=temperature0) def init_exposure_meta(self, seeing=None, pressure=None, temperature=None): # Precompute the conversion from inches of Hg to kPa. pconv = (1 * u.cds.mmHg * u.imperial.inch / u.mm).to(u.kPa).value # Loop over exposures for exp_index in range(self.spec_file.num_exposures): # Open the b1 frame for this exposure, to access its metadata. b1_frame_name = self.finder.get_plate_path( self.plate, self.spec_file.get_exposure_name(exp_index, 'blue', 'spFrame')) b1_frame = bossdata.plate.FrameFile(self.mirror.get(b1_frame_name)) exp_id = b1_frame.exposure_id # Lookup this exposure's observing time, seeing, and temperature. self.tai_beg[exp_index] = b1_frame.header['TAI-BEG'] self.tai_end[exp_index] = b1_frame.header['TAI-END'] tai_mid = 0.5 * (self.tai_beg[exp_index] + self.tai_end[exp_index]) # Convert tai to hour angle self.ha[exp_index] = tpcorr.pointing.normalize_angle( self.pointing.hour_angle(tai_mid).to(u.deg).value)*u.deg if seeing is not None: self.seeing[exp_index] = seeing else: if b1_frame.header['SEEING50'] == 0: self.seeing[exp_index] = 1.49 * u.arcsec print 'Warning: SEEING50=0. Using nominal value: ', self.seeing[exp_index] else: self.seeing[exp_index] = b1_frame.header['SEEING50'] * u.arcsec if temperature is not None: self.temperature[exp_index] = temperature else: try: self.temperature[exp_index] = b1_frame.header['AIRTEMP'] * u.deg_C except ValueError, e: self.temperature[exp_index] = 5 * u.deg_C print 'Warning: AIRTEMP not available in exp header. Using nominal value: ', self.temperature[exp_index] if pressure is not None: self.pressure[exp_index] = pressure else: try: self.pressure[exp_index] = b1_frame.header['PRESSURE'] * pconv * u.kPa except ValueError, e: self.pressure[exp_index] = 71.890 * u.kPa print 'Warning: PRESSURE not available in exp header. Using nominal value: ', self.pressure[exp_index] obstime = astropy.time.Time(tai_mid/86400., format='mjd', scale='tai', location=self.pointing.where) self.alt[exp_index] = self.pointing.plate_center.transform_to(astropy.coordinates.AltAz( obstime=obstime, location=self.pointing.where)).alt.to(u.deg) print 'Exp[{:02d}] #{:08d} seeing {:.3f}, T={:+5.1f}, P={:.1f}, TAI {:.1f} ({:+7.3f} days, HA {:+.1f})'.format( exp_index, exp_id, self.seeing[exp_index], self.temperature[exp_index], self.pressure[exp_index], tai_mid, (tai_mid - self.design_tai)/86400., self.ha[exp_index]) def get_plugmap_from_spframes(self): # Read frame files for both spectrographs frames = [] for fiber in (1,501): spec_name = self.finder.get_spec_path(self.plate, self.mjd, fiber=fiber, lite=True) spec_file = bossdata.spec.SpecFile(self.mirror.get(spec_name)) frame_name = self.finder.get_plate_path(self.plate, spec_file.get_exposure_name(0, 'blue', 'spFrame')) frames.append(bossdata.plate.FrameFile(self.mirror.get(frame_name))) # Stack frame plugmaps return astropy.table.vstack([frame.plug_map for frame in frames]) def get_exp_centroids(self, exp_index, guide_plot_name=None): # Create time steps covering this exposure. tai_steps = np.linspace(self.tai_beg[exp_index], self.tai_end[exp_index], self.steps_per_exposure) # Calculate the actual guide target positions on the focal plane without any guiding. guide_x, guide_y, _, _ = self.pointing.transform( self.guide_targets[:, np.newaxis], tai_steps, self.guide_wlen, self.temperature[exp_index], self.pressure[exp_index]) # Solve for the optimal guider corrections. guider = tpcorr.guider.Guider(self.guide_x0, self.guide_y0, guide_x, guide_y) if guide_plot_name: guider.plot(tai_steps, field_radius=340 * u.mm, zoom=5000., fiber_radius=0.1 * u.arcsec * self.pointing.platescale, save=guide_plot_name) # Calculate the offset target paths on the focal plane without any guiding, for the actual observing conditions. offset_x, offset_y, _, _ = self.pointing.transform( self.offset_targets[:, np.newaxis, np.newaxis], tai_steps, self.wlen_grid, self.temperature[exp_index], self.pressure[exp_index]) # Apply guiding corrections to estimate the actual offset target paths during the exposure. return guider.correct(offset_x, offset_y) def get_mean_exp_centroids(self, extrap_wlen=False): # Create time steps covering this exposure. midnight = astropy.time.Time(self.mjd, format='mjd', scale='tai', location=self.pointing.where) ha = np.mean(self.ha) time = specsim.transform.adjust_time_to_hour_angle(midnight, self.ra_center, ha, max_iterations=100) tai = time.mjd * 86400. temperature = np.mean(self.temperature) pressure = np.mean(self.pressure) print 'Mean Exposure: seeing {:.3f}, T={:+5.1f}, P={:.1f}, TAI {:.1f} ({:+7.3f} days, HA {:+.1f})'.format( np.mean(self.seeing), temperature, pressure, tai, (tai - self.design_tai)/86400., ha) # Calculate the actual guide target positions on the focal plane without any guiding. guide_x, guide_y, _, _ = self.pointing.transform( self.guide_targets[:, np.newaxis], tai, self.guide_wlen, self.design_temp, self.design_pressure) # Solve for the optimal guider corrections. guider = tpcorr.guider.Guider(self.guide_x0, self.guide_y0, guide_x, guide_y) # Calculate the offset target paths on the focal plane without any guiding, for the actual observing conditions. offset_x, offset_y, _, _ = self.pointing.transform( self.offset_targets[:, np.newaxis, np.newaxis], tai, self.wlen_grid, self.design_temp, self.design_pressure, extrap_wlen=extrap_wlen) return guider.correct(offset_x, offset_y) def get_mean_correction(self, extrap_wlen=False): corrections = np.empty((self.num_offset_targets, self.wlen_grid_steps, 1)) # Estimate the actual offset target paths during the exposure # (offset_x0, offset_y0), (offset_x0_std, offset_y0_std), (guided_x, guided_y) = self.get_mean_exp_centroids() guided_x, guided_y = self.get_mean_exp_centroids(extrap_wlen=extrap_wlen) # Calculate centroid offsets for each offset target, relative to its nominal fiber center. offset = np.sqrt( (guided_x - self.offset_x0[:, np.newaxis, np.newaxis])**2 + (guided_y - self.offset_y0[:, np.newaxis, np.newaxis])**2) # Calculate centroid offsets for each offset target, relative to where its fiber center would # be if it were designed for the same wavelength as the standard stars. offset_std = np.sqrt( (guided_x - self.offset_x0_std[:, np.newaxis, np.newaxis])**2 + (guided_y - self.offset_y0_std[:, np.newaxis, np.newaxis])**2) seeing = np.mean(self.seeing) max_offset = 1.1/2.0*max(np.max((offset / self.pointing.platescale).to(u.arcsec)).value, np.max((offset_std / self.pointing.platescale).to(u.arcsec)).value) acceptance_model = tpcorr.acceptance_model.AcceptanceModel(seeing, max_offset=max_offset) # Calculate the acceptance fractions for both sets of centroid offsets. acceptance = acceptance_model((offset / self.pointing.platescale).to(u.arcsec)) acceptance_std = acceptance_model((offset_std / self.pointing.platescale).to(u.arcsec)) # Calculate the acceptance fraction ratios, tabulated for each offset target, wavelength and time. # The ratio calculated this way gives the correction of eqn (13). corrections = acceptance_std / acceptance mean_correction = np.mean(corrections, axis=-1) return mean_correction, guided_x, guided_y def get_corrections(self, seeing_wlen=5400.*u.Angstrom): # Precompute wlen ratio for wavelength dependent seeing adjustment wlen_ratio = (self.wlen_grid / seeing_wlen).si # Initialize acceptance ratio grid corrections = np.empty( (self.spec_file.num_exposures, self.num_offset_targets, self.wlen_grid_steps, self.steps_per_exposure), dtype=float) guided_centroids = [] # Loop over exposures for exp_index in range(self.spec_file.num_exposures): # Estimate the actual offset target paths during the exposure guided_x, guided_y = self.get_exp_centroids(exp_index) guided_centroids.append((guided_x, guided_y)) # Calculate centroid offsets for each offset target, relative to its nominal fiber center. offset = np.sqrt( (guided_x - self.offset_x0[:, np.newaxis, np.newaxis])**2 + (guided_y - self.offset_y0[:, np.newaxis, np.newaxis])**2) # Calculate centroid offsets for each offset target, relative to where its fiber center would # be if it were designed for the same wavelength as the standard stars. offset_std = np.sqrt( (guided_x - self.offset_x0_std[:, np.newaxis, np.newaxis])**2 + (guided_y - self.offset_y0_std[:, np.newaxis, np.newaxis])**2) seeing = self.seeing[exp_index] # psf = sdss_25m.get_atmospheric_psf(seeing_wlen, seeing, gauss=False) # acceptance_model = sdss_25m.calculate_fiber_acceptance(psf) seeing_wlen_adjusted = seeing*wlen_ratio**(-0.2) # acceptance_model_grid = map(tpcorr.acceptance_model.AcceptanceModel, seeing_wlen_adjusted) max_offset = 1.1/2.0*max(np.max((offset / self.pointing.platescale).to(u.arcsec)).value, np.max((offset_std / self.pointing.platescale).to(u.arcsec)).value) for wlen_index in range(self.wlen_grid_steps): # Build acceptance model for this wavelength acceptance_model = tpcorr.acceptance_model.AcceptanceModel(seeing_wlen_adjusted[wlen_index], max_offset=max_offset) # Calculate the acceptance fractions for both sets of centroid offsets. acceptance = acceptance_model((offset[:,wlen_index,:] / self.pointing.platescale).to(u.arcsec)) acceptance_std = acceptance_model((offset_std[:,wlen_index,:] / self.pointing.platescale).to(u.arcsec)) # Calculate the acceptance fraction ratios, tabulated for each offset target, wavelength and time. # The ratio calculated this way gives the correction of eqn (13). corrections[exp_index,:,wlen_index,:] = acceptance_std / acceptance # Average the correction over each exposure time slice. avg_corrections = np.mean(np.mean(corrections, axis=-1), axis=0) return corrections, avg_corrections, guided_centroids if __name__ == '__main__': pass
mit
-6,213,619,020,246,203,000
52.537313
145
0.633789
false
transientskp/tkp
tkp/db/alchemy/image.py
1
8193
import math from datetime import datetime from tkp.db.model import Frequencyband, Skyregion, Image, Dataset from tkp.utility.coordinates import eq_to_cart from sqlalchemy import func, cast from sqlalchemy.dialects.postgresql import DOUBLE_PRECISION as Double def get_band(session, dataset, freq_eff, freq_bw, freq_bw_max=.0): """ Returns the frequency band for the given frequency parameters. Will create a new frequency band entry in the database if no match is found. You can limit the bandwidth of the band association with the freq_bw_max. args: session (sqlalchemy.orm.session.Session): a SQLAlchemy session object dataset (tkp.db.model.Dataset): the TraP dataset freq_eff (float): The central frequency of image to get band for freq_bw (float): The bandwidth of image to get band for freq_bw_max (float): The maximum bandwith used for band association. Not used if 0.0 (default). returns: tkp.db.model.Frequencyband: a frequency band object """ if freq_bw_max == .0: bw_half = freq_bw / 2 low = freq_eff - bw_half high = freq_eff + bw_half else: bw_half = freq_bw_max / 2 low = freq_eff - bw_half high = freq_eff + bw_half w1 = high - low w2 = Frequencyband.freq_high - Frequencyband.freq_low max_ = func.greatest(high, Frequencyband.freq_high) min_ = func.least(low, Frequencyband.freq_low) band = session.query(Frequencyband).filter( (Frequencyband.dataset == dataset) & (max_ - min_ < w1 + w2) ).first() if not band: # no match so we create a new band band = Frequencyband(freq_central=freq_eff, freq_low=low, freq_high=high, dataset=dataset) session.add(band) return band def update_skyregion_members(session, skyregion): """ This function performs a simple distance-check against current members of the runningcatalog to find sources that should be visible in the given skyregion, and updates the assocskyrgn table accordingly. Any previous entries in assocskyrgn relating to this skyregion are deleted first. Note 1. We use the variable 'inter' to cache the extraction_radius as transformed onto the unit sphere, so this does not have to be recalculated for every comparison. Note 2. (To Do:) This distance check could be made more efficient by restricting to a range of RA values, as we do with the Dec. However, this optimization is complicated by the meridian wrap-around issue. """ inter = 2. * math.sin(math.radians(skyregion.xtr_radius) / 2.) inter_sq = inter * inter q = """ INSERT INTO assocskyrgn ( runcat ,skyrgn ,distance_deg ) SELECT rc.id as runcat ,sky.id as skyrgn ,DEGREES(2 * ASIN(SQRT( (rc.x - sky.x) * (rc.x - sky.x) + (rc.y - sky.y) * (rc.y - sky.y) + (rc.z - sky.z) * (rc.z - sky.z) ) / 2 ) ) FROM skyregion sky ,runningcatalog rc WHERE sky.id = %(skyregion_id)s AND rc.dataset = sky.dataset AND rc.wm_decl BETWEEN sky.centre_decl - sky.xtr_radius AND sky.centre_decl + sky.xtr_radius AND ( (rc.x - sky.x) * (rc.x - sky.x) + (rc.y - sky.y) * (rc.y - sky.y) + (rc.z - sky.z) * (rc.z - sky.z) ) < %(inter_sq)s ; """ % {'inter_sq': inter_sq, 'skyregion_id': skyregion.id} session.execute(q) return inter def get_skyregion(session, dataset, centre_ra, centre_decl, xtr_radius): """ gets an id for a skyregion, given a pair of central co-ordinates and a radius. If no matching skyregion is found, a new one is inserted. In this case we also trigger execution of `updateSkyRgnMembers` for the new skyregion - this performs a simple assocation with current members of the runningcatalog to find sources that should be visible in the new skyregion, and updates the assocskyrgn table accordingly. args: session (sqlalchemy.orm.session.Session): a SQLAlchemy session object dataset_id (int): the dataset ID centre_ra (float): center RA coordinate centre_decl (float): center DECL coordinate xtr_radius (float): The extraction radius returns: tkp.db.models.Skyregion: a SQLalchemy skyregion """ skyregion = session.query(Skyregion).filter(Skyregion.dataset == dataset, Skyregion.centre_ra == centre_ra, Skyregion.centre_decl == centre_decl, Skyregion.xtr_radius == xtr_radius).one_or_none() if not skyregion: x, y, z = eq_to_cart(centre_ra, centre_decl) skyregion = Skyregion(dataset=dataset, centre_ra=centre_ra, centre_decl=centre_decl, xtr_radius=xtr_radius, x=x, y=y, z=z) session.add(skyregion) session.flush() update_skyregion_members(session, skyregion) return skyregion def insert_image(session, dataset, freq_eff, freq_bw, taustart_ts, tau_time, beam_smaj_pix, beam_smin_pix, beam_pa_rad, deltax, deltay, url, centre_ra, centre_decl, xtr_radius, rms_qc, freq_bw_max=0.0, rms_min=None, rms_max=None, detection_thresh=None, analysis_thresh=None): """ Insert an image for a given dataset. Args: session (sqlalchemy.orm.session.Session): A SQLalchemy sessions dataset (int): ID of parent dataset. freq_eff: See :ref:`Image table definitions <schema-image>`. freq_bw: See :ref:`Image table definitions <schema-image>`. freq_bw_max (float): Optional override for freq_bw, not used if 0. taustart_ts: See :ref:`Image table definitions <schema-image>`. taus_time: See :ref:`Image table definitions <schema-image>`. beam_smaj_pix (float): Restoring beam semimajor axis length in pixels. (Converted to degrees before storing to database). beam_smin_pix (float): Restoring beam semiminor axis length in pixels. (Converted to degrees before storing to database). beam_pa_rad (float): Restoring beam parallactic angle in radians. (Converted to degrees before storing to database). deltax(float): Degrees per pixel increment in x-direction. deltay(float): Degrees per pixel increment in y-direction. centre_ra(float): Image central RA co-ord, in degrees. centre_decl(float): Image central Declination co-ord, in degrees. xtr_radius(float): Radius in degrees from field centre that will be used for source extraction. """ # this looks a bit weird, but this simplifies backwards compatibility dataset_id = dataset dataset = session.query(Dataset).filter(Dataset.id == dataset_id).one() skyrgn = get_skyregion(session, dataset, centre_ra, centre_decl, xtr_radius) band = get_band(session, dataset, freq_eff, freq_bw, freq_bw_max) rb_smaj = beam_smaj_pix * math.fabs(deltax) rb_smin = beam_smin_pix * math.fabs(deltay) rb_pa = 180 * beam_pa_rad / math.pi args = ['dataset', 'band', 'tau_time', 'freq_eff', 'freq_bw', 'taustart_ts', 'skyrgn', 'rb_smaj', 'rb_smin', 'rb_pa', 'deltax', 'deltay', 'url', 'rms_qc', 'rms_min', 'rms_max', 'detection_thresh', 'analysis_thresh'] l = locals() kwargs = {arg: l[arg] for arg in args} image = Image(**kwargs) session.add(image) return image def insert_dataset(session, description): rerun = session.query(func.max(Dataset.rerun)). \ select_from(Dataset). \ filter(Dataset.description == "description"). \ one()[0] if not rerun: rerun = 0 else: rerun += 1 dataset = Dataset(rerun=rerun, process_start_ts=datetime.now(), description=description) session.add(dataset) return dataset
bsd-2-clause
2,059,044,770,803,609,900
40.378788
118
0.620896
false
msabramo/kallithea
kallithea/tests/__init__.py
1
7575
# -*- coding: utf-8 -*- # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Pylons application test package This package assumes the Pylons environment is already loaded, such as when this script is imported from the `nosetests --with-pylons=test.ini` command. This module initializes the application via ``websetup`` (`paster setup-app`) and provides the base testing objects. nosetests -x - fail on first error nosetests kallithea.tests.functional.test_admin_settings:TestSettingsController.test_my_account nosetests --pdb --pdb-failures nosetests --with-coverage --cover-package=kallithea.model.validators kallithea.tests.test_validators optional FLAGS: KALLITHEA_WHOOSH_TEST_DISABLE=1 - skip whoosh index building and tests KALLITHEA_NO_TMP_PATH=1 - disable new temp path for tests, used mostly for test_vcs_operations """ import os import time import logging import datetime import hashlib import tempfile from os.path import join as jn from tempfile import _RandomNameSequence import pylons import pylons.test from pylons import config, url from pylons.i18n.translation import _get_translator from pylons.util import ContextObj from routes.util import URLGenerator from webtest import TestApp from nose.plugins.skip import SkipTest from kallithea.lib.compat import unittest from kallithea import is_windows from kallithea.model.db import User from kallithea.tests.nose_parametrized import parameterized from kallithea.lib.utils2 import safe_str os.environ['TZ'] = 'UTC' if not is_windows: time.tzset() log = logging.getLogger(__name__) __all__ = [ 'parameterized', 'environ', 'url', 'get_new_dir', 'TestController', 'SkipTest', 'ldap_lib_installed', 'BaseTestCase', 'init_stack', 'TESTS_TMP_PATH', 'HG_REPO', 'GIT_REPO', 'NEW_HG_REPO', 'NEW_GIT_REPO', 'HG_FORK', 'GIT_FORK', 'TEST_USER_ADMIN_LOGIN', 'TEST_USER_ADMIN_PASS', 'TEST_USER_REGULAR_LOGIN', 'TEST_USER_REGULAR_PASS', 'TEST_USER_REGULAR_EMAIL', 'TEST_USER_REGULAR2_LOGIN', 'TEST_USER_REGULAR2_PASS', 'TEST_USER_REGULAR2_EMAIL', 'TEST_HG_REPO', 'TEST_HG_REPO_CLONE', 'TEST_HG_REPO_PULL', 'TEST_GIT_REPO', 'TEST_GIT_REPO_CLONE', 'TEST_GIT_REPO_PULL', 'HG_REMOTE_REPO', 'GIT_REMOTE_REPO', 'SCM_TESTS', ] # Invoke websetup with the current config file # SetupCommand('setup-app').run([config_file]) environ = {} #SOME GLOBALS FOR TESTS TESTS_TMP_PATH = jn('/', 'tmp', 'rc_test_%s' % _RandomNameSequence().next()) TEST_USER_ADMIN_LOGIN = 'test_admin' TEST_USER_ADMIN_PASS = 'test12' TEST_USER_ADMIN_EMAIL = '[email protected]' TEST_USER_REGULAR_LOGIN = 'test_regular' TEST_USER_REGULAR_PASS = 'test12' TEST_USER_REGULAR_EMAIL = '[email protected]' TEST_USER_REGULAR2_LOGIN = 'test_regular2' TEST_USER_REGULAR2_PASS = 'test12' TEST_USER_REGULAR2_EMAIL = '[email protected]' HG_REPO = 'vcs_test_hg' GIT_REPO = 'vcs_test_git' NEW_HG_REPO = 'vcs_test_hg_new' NEW_GIT_REPO = 'vcs_test_git_new' HG_FORK = 'vcs_test_hg_fork' GIT_FORK = 'vcs_test_git_fork' ## VCS SCM_TESTS = ['hg', 'git'] uniq_suffix = str(int(time.mktime(datetime.datetime.now().timetuple()))) GIT_REMOTE_REPO = 'git://github.com/codeinn/vcs.git' TEST_GIT_REPO = jn(TESTS_TMP_PATH, GIT_REPO) TEST_GIT_REPO_CLONE = jn(TESTS_TMP_PATH, 'vcsgitclone%s' % uniq_suffix) TEST_GIT_REPO_PULL = jn(TESTS_TMP_PATH, 'vcsgitpull%s' % uniq_suffix) HG_REMOTE_REPO = 'http://bitbucket.org/marcinkuzminski/vcs' TEST_HG_REPO = jn(TESTS_TMP_PATH, HG_REPO) TEST_HG_REPO_CLONE = jn(TESTS_TMP_PATH, 'vcshgclone%s' % uniq_suffix) TEST_HG_REPO_PULL = jn(TESTS_TMP_PATH, 'vcshgpull%s' % uniq_suffix) TEST_DIR = tempfile.gettempdir() TEST_REPO_PREFIX = 'vcs-test' # cached repos if any ! # comment out to get some other repos from bb or github GIT_REMOTE_REPO = jn(TESTS_TMP_PATH, GIT_REPO) HG_REMOTE_REPO = jn(TESTS_TMP_PATH, HG_REPO) #skip ldap tests if LDAP lib is not installed ldap_lib_installed = False try: import ldap ldap.API_VERSION ldap_lib_installed = True except ImportError: # means that python-ldap is not installed pass def get_new_dir(title): """ Returns always new directory path. """ from kallithea.tests.vcs.utils import get_normalized_path name = TEST_REPO_PREFIX if title: name = '-'.join((name, title)) hex = hashlib.sha1(str(time.time())).hexdigest() name = '-'.join((name, hex)) path = os.path.join(TEST_DIR, name) return get_normalized_path(path) import logging class NullHandler(logging.Handler): def emit(self, record): pass def init_stack(config=None): if not config: config = pylons.test.pylonsapp.config url._push_object(URLGenerator(config['routes.map'], environ)) pylons.app_globals._push_object(config['pylons.app_globals']) pylons.config._push_object(config) pylons.tmpl_context._push_object(ContextObj()) # Initialize a translator for tests that utilize i18n translator = _get_translator(pylons.config.get('lang')) pylons.translator._push_object(translator) h = NullHandler() logging.getLogger("kallithea").addHandler(h) class BaseTestCase(unittest.TestCase): def __init__(self, *args, **kwargs): self.wsgiapp = pylons.test.pylonsapp init_stack(self.wsgiapp.config) unittest.TestCase.__init__(self, *args, **kwargs) class TestController(BaseTestCase): def __init__(self, *args, **kwargs): BaseTestCase.__init__(self, *args, **kwargs) self.app = TestApp(self.wsgiapp) self.maxDiff = None self.index_location = config['app_conf']['index_dir'] def log_user(self, username=TEST_USER_ADMIN_LOGIN, password=TEST_USER_ADMIN_PASS): self._logged_username = username response = self.app.post(url(controller='login', action='index'), {'username': username, 'password': password}) if 'invalid user name' in response.body: self.fail('could not login using %s %s' % (username, password)) self.assertEqual(response.status, '302 Found') ses = response.session['authuser'] self.assertEqual(ses.get('username'), username) response = response.follow() self.assertEqual(ses.get('is_authenticated'), True) return response.session['authuser'] def _get_logged_user(self): return User.get_by_username(self._logged_username) def checkSessionFlash(self, response, msg, skip=0): if 'flash' not in response.session: self.fail(safe_str(u'msg `%s` not found - session has no flash ' % msg)) try: level, m = response.session['flash'][-1 - skip] if msg in m: return except IndexError: pass self.fail(safe_str(u'msg `%s` not found in session flash (skipping %s): %s' % (msg, skip, ', '.join('`%s`' % m for level, m in response.session['flash']))))
gpl-3.0
7,480,193,394,177,342,000
32.370044
100
0.682904
false
TiddlySpace/tiddlyspace
tiddlywebplugins/tiddlyspace/web.py
1
2486
""" Web related utility functions. """ from httpexceptor import HTTP404 from tiddlyweb.model.policy import PermissionsError from tiddlyweb.model.recipe import Recipe from tiddlyweb.model.tiddler import Tiddler from tiddlyweb.store import StoreError from tiddlywebplugins.tiddlyspace.space import Space def determine_host(environ): """ Extract the current HTTP host from the environment. Return that plus the server_host from config. This is used to help calculate what space we are in when HTTP requests are made. """ server_host = environ['tiddlyweb.config']['server_host'] port = int(server_host['port']) if port == 80 or port == 443: host_url = server_host['host'] else: host_url = '%s:%s' % (server_host['host'], port) http_host = environ.get('HTTP_HOST', host_url) if ':' in http_host: for port in [':80', ':443']: if http_host.endswith(port): http_host = http_host.replace(port, '') break return http_host, host_url def determine_space(environ, http_host): """ Calculate the space associated with a subdomain. """ server_host = environ['tiddlyweb.config']['server_host']['host'] if '.%s' % server_host in http_host: return http_host.rsplit('.', server_host.count('.') + 1)[0] else: if ':' in http_host: http_host = http_host.split(':', 1)[0] store = environ['tiddlyweb.store'] tiddler = Tiddler(http_host, 'MAPSPACE') try: tiddler = store.get(tiddler) return tiddler.fields['mapped_space'] except (KeyError, StoreError): pass return None def determine_space_recipe(environ, space_name): """ Given a space name, check if the current user is a member of that named space. If so, use the private recipe. """ store = environ['tiddlyweb.store'] usersign = environ['tiddlyweb.usersign'] try: space = Space(space_name) recipe = Recipe(space.public_recipe()) recipe = store.get(recipe) except (ValueError, StoreError), exc: raise HTTP404('Space for %s does not exist: %s' % (space_name, exc)) try: recipe.policy.allows(usersign, 'manage') space_type = 'private' except PermissionsError: space_type = 'public' recipe_name_method = getattr(space, '%s_recipe' % space_type) recipe_name = recipe_name_method() return recipe_name
bsd-3-clause
-7,599,089,718,721,337,000
30.468354
76
0.626307
false
craigmbooth/chicago_neighborhood_finder
chicago_community_areas.py
1
4133
import requests import os import zipfile import StringIO import glob import shapefile import tempfile import json def point_inside_polygon(x,y,poly): """Return True if the point described by x, y is inside of the polygon described by the list of points [(x0, y0), (x1, y1), ... (xn, yn)] in ``poly`` Code from http://www.ariel.com.au/a/python-point-int-poly.html which in turn was adapted from C code found at http://local.wasp.uwa.edu.au/~pbourke/geometry/insidepoly/ """ n = len(poly) inside =False p1x,p1y = poly[0] for i in range(n+1): p2x,p2y = poly[i % n] if y > min(p1y,p2y): if y <= max(p1y,p2y): if x <= max(p1x,p2x): if p1y != p2y: xinters = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x if p1x == p2x or x <= xinters: inside = not inside p1x,p1y = p2x,p2y return inside def download_shapefiles(): """Retrieve the community area shapefiles from the Chicago open data portal and unzip them to data/""" comm_area_url = ("https://data.cityofchicago.org/download/" "9wp7-iasj/application/zip") try: os.mkdir("data") except OSError: #Directory alreay exists. Continue pass # GET the zip file stored at ``comm_area_url`` and put it into an in-memory # buffer. Extract the contents of the buffer to data/ r = requests.get(comm_area_url) f = StringIO.StringIO() f.write(r.content) with zipfile.ZipFile(f, 'r') as zipf: zipf.extractall("data") def get_community_area_coords(): """Read in the shapefile downloaded from the Chicago open data portal. This contains coordinates for the boundaries of Chicago's community areas. Use gdaltransform to get these to latitude and longitude and return a dict, keyed off of neighborhood ID containing these coordinates.""" shapefiles = list(glob.glob("data/*.shp")) if len(shapefiles) != 1: raise ValueError("There is more than one shapefile matching the" "glob 'data/*.shp'") shapefile_name = shapefiles[0] sf = shapefile.Reader(shapefile_name) shapes = sf.shapes() recs = sf.records() #Loop through community areas: results = {} for i, (shape, rec) in enumerate(zip(shapes, recs)): _, temp_out_filename = tempfile.mkstemp() _, temp_in_filename = tempfile.mkstemp() with open(temp_out_filename, "w") as f: for coords in shape.points: f.write("%f %f\n"%(coords[0],coords[1])) #Use gdaltransform to convert StatePlane coordinates to lat and long: os.system("gdaltransform -s_srs '+proj=tmerc +lat_0=36.66666666666666 " "+lon_0=-88.33333333333333 +k=0.9999749999999999 " "+x_0=300000.0000000001 +y_0=0 +ellps=GRS80 " "+towgs84=0,0,0,0,0,0,0 +units=us-ft +no_defs' " " -t_srs epsg:4326 < "+temp_out_filename+" > "+ temp_in_filename) with open(temp_in_filename, "r") as f: coords = [] for latlng in f: lat, lng, _ = latlng.split() coords.append((float(lat), float(lng))) results[rec[0]] = coords return results def get_neighborhood_for_point(lat, lng, commareas): """Given a latitude and longitude, find the neighborhood that this point is inside. The third argument to this function is the output from get_community_area_coords()""" for commarea, commdata in commareas.iteritems(): if point_inside_polygon(lng, lat, commdata): return commarea else: return None if __name__ == "__main__": # Retrieve the community area shapefile from the Chicago open data portal: download_shapefiles() # Build a dictionary, keyed off community area name, containing lat,lng # tuples: areas = get_community_area_coords() # Dump the results to a JSON file: json.dump(areas, open("community_areas.json", "w"), indent=2)
apache-2.0
-5,800,915,206,791,777,000
32.601626
80
0.602952
false
aileron-split/aileron-web
server/news/serializers.py
1
2606
from rest_framework import serializers from server import settings from .models import Article from team.serializers import MemberShortSerializer from gallery.serializers import AlbumSerializer class ArticleSerializer(serializers.ModelSerializer): card_sm_image = serializers.SerializerMethodField() card_mat_image = serializers.SerializerMethodField() card_lg_image = serializers.SerializerMethodField() author = MemberShortSerializer(many=False, read_only=True) album = AlbumSerializer(many=False, read_only=True) class Meta: model = Article fields = ( 'id', 'published', 'published_date', 'slug', 'title', 'subtitle', 'summary', 'card_sm_image', 'card_mat_image', 'card_lg_image', 'video', 'album', 'author', 'created_date', 'modified_date', ) def get_card_sm_image(self, obj): return settings.MEDIA_URL + obj.card_sm_image.name if obj.card_sm_image.name else None def get_card_mat_image(self, obj): return settings.MEDIA_URL + obj.card_mat_image.name if obj.card_mat_image.name else None def get_card_lg_image(self, obj): return settings.MEDIA_URL + obj.card_lg_image.name if obj.card_lg_image.name else None class ArticleDetailSerializer(serializers.ModelSerializer): card_sm_image = serializers.SerializerMethodField() card_mat_image = serializers.SerializerMethodField() card_lg_image = serializers.SerializerMethodField() author = MemberShortSerializer(many=False, read_only=True) album = AlbumSerializer(many=False, read_only=True) class Meta: model = Article fields = ( 'id', 'published', 'published_date', 'slug', 'title', 'subtitle', 'summary', 'content', 'card_sm_image', 'card_mat_image', 'card_lg_image', 'video', 'album', 'author', 'created_date', 'modified_date', ) def get_card_sm_image(self, obj): return settings.MEDIA_URL + obj.card_sm_image.name if obj.card_sm_image.name else None def get_card_mat_image(self, obj): return settings.MEDIA_URL + obj.card_mat_image.name if obj.card_mat_image.name else None def get_card_lg_image(self, obj): return settings.MEDIA_URL + obj.card_lg_image.name if obj.card_lg_image.name else None
gpl-3.0
-7,981,823,308,071,281,000
29.658824
96
0.60284
false
asrivat1/word2vec
scripts/doc2vec.py
1
2019
#!/usr/bin/pypy import sys import utilities # This file will compute a vector representation # of each document, based on the words in that document. def readInput(): if len(sys.argv) < 4: print "./doc2vec.py <cluster_file> <vector_file> <doc_file> <output_file>" sys.exit() clustFileName = sys.argv[1] vecFileName = sys.argv[2] docFileName = sys.argv[3] outFileName = sys.argv[4] return clustFileName, vecFileName, docFileName, outFileName def vectorizeDocs(docFileName, word2clust, numclusters): infile = open(docFileName) documents = [] vocab = set(word2clust.getVocab()) i = 0 for line in infile: i += 1 if i % 1000 == 0: print i arr = line.split() arr.pop(0) # discard the topic name # compute total similarity document to each cluster docvec = [0.0] * numclusters for word in arr: if word in vocab: clusterSims = word2clust.getVector(word) for i in range(0, numclusters): docvec[i] += clusterSims[i] documents.append(docvec) infile.close() return documents def writeDocVecs(documents, outFileName): outfile = open(outFileName, 'w') for doc in documents: stringDoc = [str(dim) for dim in doc] outfile.write(' '.join(stringDoc) + '\n') outfile.close() def main(): # read input clustFileName, vecFileName, docFileName, outFileName = readInput() # read in the vector file vectors = utilities.readVectors(vecFileName) # read in cluster file clusters = utilities.readClusters(clustFileName) # compute the similarity of each word to each cluster word2clust = utilities.wordClusterVectors(vectors, clusters) # for each doc, compute the vector documents = vectorizeDocs(docFileName, word2clust, len(clusters)) # write the doc vectors to file writeDocVecs(documents, outFileName) if __name__ == "__main__": main()
apache-2.0
1,266,811,214,027,012,000
25.220779
82
0.637444
false
Southpaw-TACTIC/TACTIC
src/pyasm/common/container.py
1
6371
########################################################### # # Copyright (c) 2005, Southpaw Technology # All Rights Reserved # # PROPRIETARY INFORMATION. This software is proprietary to # Southpaw Technology, and is not to be reproduced, transmitted, # or disclosed in any way without written permission. # # # __all__ = ["Container", "GlobalContainer"] try: import _thread as thread except: import thread # Get the container instance INSTANCES = {} # containers separated by thread #session_instances = {} # containers separated by ticket def _get_instance(): # get the current thread instance = INSTANCES.get(thread.get_ident()) if instance == None: # create a new container instance = Container() INSTANCES[thread.get_ident()] = instance return instance def _set_instance(instance): INSTANCES[thread.get_ident()] = instance def _create_instance(): instance = Container() INSTANCES[thread.get_ident()] = instance return instance def _delete_instance(): # delete old one for this thread container = INSTANCES[thread.get_ident()] container.clear_data() del(container) del(INSTANCES[thread.get_ident()]) class Container(object): '''general low level container object to store global information to the entire application''' def __init__(self): #print "INITIALIZING CONTAINER: ", thread.get_ident(), self self.info = dict() def get_data(self): return self.info def clear_data(self): self.info = {} def get_instance(): return _get_instance() get_instance = staticmethod(get_instance) def set_instance(instance): return _set_instance(instance) set_instance = staticmethod(set_instance) def get_all_instances(): return INSTANCES get_all_instances = staticmethod(get_all_instances) def put(key, value): _get_instance().info[key] = value put = staticmethod(put) def set(key, value): _get_instance().info[key] = value set = staticmethod(set) stats = {} def get(key): #try: # return _get_instance().info[key] #except: # return None return _get_instance().info.get(key) #instance = _get_instance() #return instance.info.get(key) get = staticmethod(get) def has(key): return key in _get_instance().info has = staticmethod(has) def remove(key): instance = _get_instance() if key in instance.info: del(instance.info[key]) remove = staticmethod(remove) # convenience methods for managing a sequence # def clear_seq(key): seq = Container.get(key) if seq == None: seq = [] Container.put(key, seq) else: del seq[:] # clear the sequence clear_seq = staticmethod(clear_seq) def append_seq(key, value): seq = Container.get(key) if seq == None: seq = [] Container.put(key, seq) seq.append(value) return seq append_seq = staticmethod(append_seq) def get_seq(key): seq = Container.get(key) if seq == None: seq = [] Container.put(key, seq) return seq get_seq = staticmethod(get_seq) # convenience methods for managing a dictionary def _get_dict(dict_name): data = Container.get(dict_name) if data == None: data = {} Container.put(dict_name, data) return data _get_dict = staticmethod(_get_dict) def put_dict(dict_name, key, value): dict = Container._get_dict(dict_name) dict[key] = value put_dict = staticmethod(put_dict) def get_dict(dict_name, key): dict = Container._get_dict(dict_name) return dict.get(key) get_dict = staticmethod(get_dict) def get_full_dict(dict_name): dict = Container._get_dict(dict_name) return dict get_full_dict = staticmethod(get_full_dict) def clear_dict(dict_name): Container.put(dict_name, {}) clear_dict = staticmethod(clear_dict) ############################### # Counter methods # def start_counter(key): Container.put(key, 0) return 0 start_counter = staticmethod(start_counter) def get_counter(key): counter = Container.get(key) if counter == None: counter = Container.start_counter(key) return counter get_counter = staticmethod(get_counter) def increment(key): counter = Container.get(key) if counter == None: counter = 1 else: counter += 1 Container.put(key, counter) return counter increment = staticmethod(increment) def decrement(key): counter = Container.get(key) if counter == None: counter = -1 else: counter -= 1 Container.put(key, counter) return counter decrement = staticmethod(decrement) def clear(): '''Clears the container. Should be called at the initialization of the application''' #print "clearing container!!!" instance = _get_instance() #del(instance.info) instance.info = {} clear = staticmethod(clear) def create(): '''Creates the container. Should be called at the initialization of the application''' instance = _create_instance() return instance create = staticmethod(create) def delete(): '''Removes the container.''' #print "deleting container" _delete_instance() delete = staticmethod(delete) def clear_all(): '''clears all the instances for all threads''' INSTANCES = {} clear_all = staticmethod(clear_all) GLOBAL_CONTAINER = {} class GlobalContainer(object): '''Global container that spans across all threads''' def put(key, value): GLOBAL_CONTAINER[key] = value put = staticmethod(put) def get(key): return GLOBAL_CONTAINER.get(key) get = staticmethod(get) def remove(key): instance = GLOBAL_CONTAINER if key in instance: del(instance[key]) remove = staticmethod(remove)
epl-1.0
-1,837,061,838,531,271,000
22.167273
73
0.585622
false
pyannote/pyannote-audio
pyannote/audio/core/task.py
1
14390
# MIT License # # Copyright (c) 2020-2021 CNRS # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import annotations import multiprocessing import sys import warnings from dataclasses import dataclass from enum import Enum from numbers import Number from typing import List, Optional, Text, Tuple, Union import pytorch_lightning as pl import torch from torch.utils.data import DataLoader, Dataset, IterableDataset from torch.utils.data._utils.collate import default_collate from torch_audiomentations.core.transforms_interface import BaseWaveformTransform from typing_extensions import Literal from pyannote.audio.utils.loss import binary_cross_entropy, nll_loss from pyannote.audio.utils.protocol import check_protocol from pyannote.database import Protocol # Type of machine learning problem class Problem(Enum): BINARY_CLASSIFICATION = 0 MONO_LABEL_CLASSIFICATION = 1 MULTI_LABEL_CLASSIFICATION = 2 REPRESENTATION = 3 REGRESSION = 4 # any other we could think of? # A task takes an audio chunk as input and returns # either a temporal sequence of predictions # or just one prediction for the whole audio chunk class Resolution(Enum): FRAME = 1 # model outputs a sequence of frames CHUNK = 2 # model outputs just one vector for the whole chunk @dataclass class Specifications: problem: Problem resolution: Resolution # chunk duration in seconds. # use None for variable-length chunks duration: Optional[float] = None # use that many seconds on the left- and rightmost parts of each chunk # to warm up the model. This is mostly useful for segmentation tasks. # While the model does process those left- and right-most parts, only # the remaining central part of each chunk is used for computing the # loss during training, and for aggregating scores during inference. # Defaults to 0. (i.e. no warm-up). warm_up: Optional[Tuple[float, float]] = (0.0, 0.0) # (for classification tasks only) list of classes classes: Optional[List[Text]] = None # whether classes are permutation-invariant (e.g. diarization) permutation_invariant: bool = False class TrainDataset(IterableDataset): def __init__(self, task: Task): super().__init__() self.task = task def __iter__(self): return self.task.train__iter__() def __len__(self): return self.task.train__len__() class ValDataset(Dataset): def __init__(self, task: Task): super().__init__() self.task = task def __getitem__(self, idx): return self.task.val__getitem__(idx) def __len__(self): return self.task.val__len__() class Task(pl.LightningDataModule): """Base task class A task is the combination of a "problem" and a "dataset". For example, here are a few tasks: - voice activity detection on the AMI corpus - speaker embedding on the VoxCeleb corpus - end-to-end speaker diarization on the VoxConverse corpus A task is expected to be solved by a "model" that takes an audio chunk as input and returns the solution. Hence, the task is in charge of generating (input, expected_output) samples used for training the model. Parameters ---------- protocol : Protocol pyannote.database protocol duration : float, optional Chunks duration in seconds. Defaults to two seconds (2.). min_duration : float, optional Sample training chunks duration uniformely between `min_duration` and `duration`. Defaults to `duration` (i.e. fixed length chunks). warm_up : float or (float, float), optional Use that many seconds on the left- and rightmost parts of each chunk to warm up the model. This is mostly useful for segmentation tasks. While the model does process those left- and right-most parts, only the remaining central part of each chunk is used for computing the loss during training, and for aggregating scores during inference. Defaults to 0. (i.e. no warm-up). batch_size : int, optional Number of training samples per batch. Defaults to 32. num_workers : int, optional Number of workers used for generating training samples. Defaults to multiprocessing.cpu_count() // 2. pin_memory : bool, optional If True, data loaders will copy tensors into CUDA pinned memory before returning them. See pytorch documentation for more details. Defaults to False. augmentation : BaseWaveformTransform, optional torch_audiomentations waveform transform, used by dataloader during training. Attributes ---------- specifications : Specifications or dict of Specifications Task specifications (available after `Task.setup` has been called.) """ def __init__( self, protocol: Protocol, duration: float = 2.0, min_duration: float = None, warm_up: Union[float, Tuple[float, float]] = 0.0, batch_size: int = 32, num_workers: int = None, pin_memory: bool = False, augmentation: BaseWaveformTransform = None, ): super().__init__() # dataset self.protocol, self.has_validation = check_protocol(protocol) # batching self.duration = duration self.min_duration = duration if min_duration is None else min_duration self.batch_size = batch_size # training if isinstance(warm_up, Number): warm_up = (warm_up, warm_up) self.warm_up = warm_up # multi-processing if num_workers is None: num_workers = multiprocessing.cpu_count() // 2 if ( num_workers > 0 and sys.platform == "darwin" and sys.version_info[0] >= 3 and sys.version_info[1] >= 8 ): warnings.warn( "num_workers > 0 is not supported with macOS and Python 3.8+: " "setting num_workers = 0." ) num_workers = 0 self.num_workers = num_workers self.pin_memory = pin_memory self.augmentation = augmentation def prepare_data(self): """Use this to download and prepare data This is where we might end up downloading datasets and transform them so that they are ready to be used with pyannote.database. but for now, the API assume that we directly provide a pyannote.database.Protocol. Notes ----- Called only once. """ pass def setup(self, stage=None): """Called at the beginning of fit and test just before Model.setup() Parameters ---------- stage : "fit" or "test" Whether model is being trained ("fit") or used for inference ("test"). Notes ----- This hook is called on every process when using DDP. If `specifications` attribute has not been set in `__init__`, `setup` is your last chance to set it. """ pass def setup_loss_func(self): pass def setup_validation_metric(self): pass def train__iter__(self): # will become train_dataset.__iter__ method msg = f"Missing '{self.__class__.__name__}.train__iter__' method." raise NotImplementedError(msg) def train__len__(self): # will become train_dataset.__len__ method msg = f"Missing '{self.__class__.__name__}.train__len__' method." raise NotImplementedError(msg) def collate_fn(self, batch): collated_batch = default_collate(batch) if self.augmentation is not None: collated_batch["X"] = self.augmentation( collated_batch["X"], sample_rate=self.model.hparams.sample_rate ) return collated_batch def train_dataloader(self) -> DataLoader: return DataLoader( TrainDataset(self), batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=self.pin_memory, drop_last=True, collate_fn=self.collate_fn, ) def default_loss( self, specifications: Specifications, target, prediction, weight=None ) -> torch.Tensor: """Guess and compute default loss according to task specification Parameters ---------- specifications : Specifications Task specifications target : torch.Tensor * (batch_size, num_frames) for binary classification * (batch_size, num_frames) for multi-class classification * (batch_size, num_frames, num_classes) for multi-label classification prediction : torch.Tensor (batch_size, num_frames, num_classes) weight : torch.Tensor, optional (batch_size, num_frames, 1) Returns ------- loss : torch.Tensor Binary cross-entropy loss in case of binary and multi-label classification, Negative log-likelihood loss in case of multi-class classification. """ if specifications.problem in [ Problem.BINARY_CLASSIFICATION, Problem.MULTI_LABEL_CLASSIFICATION, ]: return binary_cross_entropy(prediction, target, weight=weight) elif specifications.problem == Problem.MONO_LABEL_CLASSIFICATION: return nll_loss(prediction, target, weight=weight) else: msg = "TODO: implement for other types of problems" raise NotImplementedError(msg) def common_step(self, batch, batch_idx: int, stage: Literal["train", "val"]): """Default training or validation step according to task specification * binary cross-entropy loss for binary or multi-label classification * negative log-likelihood loss for regular classification If "weight" attribute exists, batch[self.weight] is also passed to the loss function during training (but has no effect in validation). Parameters ---------- batch : (usually) dict of torch.Tensor Current batch. batch_idx: int Batch index. stage : {"train", "val"} "train" for training step, "val" for validation step Returns ------- loss : {str: torch.tensor} {"loss": loss} """ # forward pass y_pred = self.model(batch["X"]) batch_size, num_frames, _ = y_pred.shape # (batch_size, num_frames, num_classes) # target y = batch["y"] # frames weight weight_key = getattr(self, "weight", None) if stage == "train" else None weight = batch.get( weight_key, torch.ones(batch_size, num_frames, 1, device=self.model.device), ) # (batch_size, num_frames, 1) # warm-up warm_up_left = round(self.warm_up[0] / self.duration * num_frames) weight[:, :warm_up_left] = 0.0 warm_up_right = round(self.warm_up[1] / self.duration * num_frames) weight[:, num_frames - warm_up_right :] = 0.0 # compute loss loss = self.default_loss(self.specifications, y, y_pred, weight=weight) self.model.log( f"{self.ACRONYM}@{stage}_loss", loss, on_step=False, on_epoch=True, prog_bar=True, logger=True, ) return {"loss": loss} # default training_step provided for convenience # can obviously be overriden for each task def training_step(self, batch, batch_idx: int): return self.common_step(batch, batch_idx, "train") def val__getitem__(self, idx): # will become val_dataset.__getitem__ method msg = f"Missing '{self.__class__.__name__}.val__getitem__' method." raise NotImplementedError(msg) def val__len__(self): # will become val_dataset.__len__ method msg = f"Missing '{self.__class__.__name__}.val__len__' method." raise NotImplementedError(msg) def val_dataloader(self) -> Optional[DataLoader]: if self.has_validation: return DataLoader( ValDataset(self), batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=self.pin_memory, drop_last=False, ) else: return None # default validation_step provided for convenience # can obviously be overriden for each task def validation_step(self, batch, batch_idx: int): return self.common_step(batch, batch_idx, "val") def validation_epoch_end(self, outputs): pass @property def val_monitor(self): """Quantity (and direction) to monitor Useful for model checkpointing or early stopping. Returns ------- monitor : str Name of quantity to monitor. mode : {'min', 'max} Minimize See also -------- pytorch_lightning.callbacks.ModelCheckpoint pytorch_lightning.callbacks.EarlyStopping """ if self.has_validation: return f"{self.ACRONYM}@val_loss", "min" else: return None, "min"
mit
-7,840,265,510,211,118,000
32.774648
92
0.62712
false
butterscotchstallion/SpiffyRPG
SpiffyRPG/SpiffyWorld/unit_level.py
1
2607
#!/usr/bin/env python # -*- coding: utf-8 -*- class UnitLevel: """ Functionality related to how much xp a unit has """ def get_level_by_xp(self, total_experience): player_level = 1 levels = self.get_levels() for level, experience_needed in levels: if total_experience > experience_needed: player_level = level """ If this player is max level or higher, just choose last one """ if total_experience > levels[-1][1]: player_level = levels[-1][0] return player_level def get_xp_for_level(self, player_level): levels = self.get_levels() for level, experience_needed in levels: if level == player_level: return experience_needed return player_level def get_xp_for_max_level(self): levels = self.get_levels() return self.get_xp_for_level(levels[-1][1]) def get_xp_for_next_level(self, level): xp = 0 levels = self.get_levels() for xp_level, req_xp in levels: if xp_level == (level + 1): xp = req_xp break return xp def get_levels(self): return [(1, 0), (2, 100), (3, 200), (4, 300), (5, 400), (6, 1000), (7, 1500), (8, 2500), (9, 3500), (10, 5000), (11, 6500), (12, 8000), (13, 9500), (14, 10500), (15, 12000), (16, 15000), (17, 18000), (18, 21000), (19, 24000), (20, 27000), (21, 30000), (22, 33000), (24, 36000), (25, 39000), (26, 42000), (27, 45000), (28, 48000), (29, 51000), (30, 54000), (31, 57000), (32, 60000), (33, 63000), (34, 66000), (35, 69000), (36, 70000), (37, 73000), (38, 76000), (39, 79000), (40, 76000), (41, 82000), (42, 85000), (43, 88000), (44, 92000), (45, 95000), (46, 98000), (47, 101000), (48, 104000), (49, 107000), (50, 110000)]
mit
723,719,038,350,232,700
25.333333
75
0.380898
false
PuercoPop/EleccionesPeru
Elecciones/views.py
1
1263
# coding=utf8 # Create your views here. from django.shortcuts import render from django.http import HttpResponse from ONPEcrawler import ONPEcrawler import Elecciones.models as m def index(request): return render(request,'index.html') def seed(request): crawler = ONPEcrawler(url = "http://www.web.onpe.gob.pe/modElecciones/elecciones/elecciones2011/1ravuelta/onpe/congreso/" ) crawler.seed_tree() return HttpResponse('Success') def crawl(request): crawler = ONPEcrawler(url = "http://www.web.onpe.gob.pe/modElecciones/elecciones/elecciones2011/1ravuelta/onpe/congreso/" ) crawler.make_tree() return HttpResponse('Success') def test(request): crawler = ONPEcrawler(url = "http://www.web.onpe.gob.pe/modElecciones/elecciones/elecciones2011/1ravuelta/onpe/congreso/" ) (f, soup) = crawler.test() print soup return HttpResponse('Success') def visualize_tree(request): return render(request,'visualize_tree.html',{'locales':m.UbiGeo.objects.filter(tipo='local'), 'departamentos':m.UbiGeo.objects.filter(tipo='departamento')}) def clean_db(request): for item in m.UbiGeo.objects.all(): item.delete() return HttpResponse('Success')
mit
-7,431,224,069,060,348,000
27.704545
127
0.697546
false
bellowsj/aiopogo
aiopogo/pogoprotos/networking/platform/responses/plat_eight_response_pb2.py
1
2327
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: pogoprotos/networking/platform/responses/plat_eight_response.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='pogoprotos/networking/platform/responses/plat_eight_response.proto', package='pogoprotos.networking.platform.responses', syntax='proto3', serialized_pb=_b('\nBpogoprotos/networking/platform/responses/plat_eight_response.proto\x12(pogoprotos.networking.platform.responses\"$\n\x11PlatEightResponse\x12\x0f\n\x07message\x18\x02 \x01(\tb\x06proto3') ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _PLATEIGHTRESPONSE = _descriptor.Descriptor( name='PlatEightResponse', full_name='pogoprotos.networking.platform.responses.PlatEightResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='message', full_name='pogoprotos.networking.platform.responses.PlatEightResponse.message', index=0, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=112, serialized_end=148, ) DESCRIPTOR.message_types_by_name['PlatEightResponse'] = _PLATEIGHTRESPONSE PlatEightResponse = _reflection.GeneratedProtocolMessageType('PlatEightResponse', (_message.Message,), dict( DESCRIPTOR = _PLATEIGHTRESPONSE, __module__ = 'pogoprotos.networking.platform.responses.plat_eight_response_pb2' # @@protoc_insertion_point(class_scope:pogoprotos.networking.platform.responses.PlatEightResponse) )) _sym_db.RegisterMessage(PlatEightResponse) # @@protoc_insertion_point(module_scope)
mit
-7,797,228,191,189,211,000
32.724638
210
0.758917
false
fbradyirl/home-assistant
tests/components/deconz/test_scene.py
1
2977
"""deCONZ scene platform tests.""" from unittest.mock import Mock, patch from homeassistant import config_entries from homeassistant.components import deconz from homeassistant.setup import async_setup_component import homeassistant.components.scene as scene from tests.common import mock_coro GROUP = { "1": { "id": "Group 1 id", "name": "Group 1 name", "state": {}, "action": {}, "scenes": [{"id": "1", "name": "Scene 1"}], "lights": [], } } ENTRY_CONFIG = { deconz.const.CONF_ALLOW_CLIP_SENSOR: True, deconz.const.CONF_ALLOW_DECONZ_GROUPS: True, deconz.config_flow.CONF_API_KEY: "ABCDEF", deconz.config_flow.CONF_BRIDGEID: "0123456789", deconz.config_flow.CONF_HOST: "1.2.3.4", deconz.config_flow.CONF_PORT: 80, } async def setup_gateway(hass, data): """Load the deCONZ scene platform.""" from pydeconz import DeconzSession loop = Mock() session = Mock() config_entry = config_entries.ConfigEntry( 1, deconz.DOMAIN, "Mock Title", ENTRY_CONFIG, "test", config_entries.CONN_CLASS_LOCAL_PUSH, ) gateway = deconz.DeconzGateway(hass, config_entry) gateway.api = DeconzSession(loop, session, **config_entry.data) gateway.api.config = Mock() hass.data[deconz.DOMAIN] = {gateway.bridgeid: gateway} with patch("pydeconz.DeconzSession.async_get_state", return_value=mock_coro(data)): await gateway.api.async_load_parameters() await hass.config_entries.async_forward_entry_setup(config_entry, "scene") # To flush out the service call to update the group await hass.async_block_till_done() return gateway async def test_platform_manually_configured(hass): """Test that we do not discover anything or try to set up a gateway.""" assert ( await async_setup_component( hass, scene.DOMAIN, {"scene": {"platform": deconz.DOMAIN}} ) is True ) assert deconz.DOMAIN not in hass.data async def test_no_scenes(hass): """Test that scenes can be loaded without scenes being available.""" gateway = await setup_gateway(hass, {}) assert not hass.data[deconz.DOMAIN][gateway.bridgeid].deconz_ids assert len(hass.states.async_all()) == 0 async def test_scenes(hass): """Test that scenes works.""" with patch("pydeconz.DeconzSession.async_put_state", return_value=mock_coro(True)): gateway = await setup_gateway(hass, {"groups": GROUP}) assert "scene.group_1_name_scene_1" in gateway.deconz_ids assert len(hass.states.async_all()) == 1 await hass.services.async_call( "scene", "turn_on", {"entity_id": "scene.group_1_name_scene_1"}, blocking=True ) async def test_unload_scene(hass): """Test that it works to unload scene entities.""" gateway = await setup_gateway(hass, {"groups": GROUP}) await gateway.async_reset() assert len(hass.states.async_all()) == 0
apache-2.0
7,678,554,311,255,111,000
28.77
87
0.655358
false
chandangreddy/autotuner
individual.py
1
13400
import timeit import os import re import debug import compiler_flags import config import enums import collections import subprocess import threading import internal_exceptions import time class EndOfQueue: def __init__(self): pass def get_fittest(population): fittest = None for individual in population: if individual.status == enums.Status.passed: if fittest: if individual.fitness > fittest.fitness: fittest = individual else: fittest = individual if not fittest: raise internal_exceptions.NoFittestException("None of the individuals among this population completed successfully, hence there is no fittest individual") return fittest def create_test_case(tile_size, block_size, grid_size, shared_mem=True, private_mem=True, k=compiler_flags.SizesFlag.ALL_KERNELS_SENTINEL): individual = Individual() per_kernel_size_info = collections.OrderedDict() per_kernel_size_info[k] = compiler_flags.SizeTuple(tile_size, block_size, grid_size) individual.kernel_num = k #for flag in compiler_flags.PPCG.optimisation_flags: # print(flag) #TODO: Get a better way of getting size_data_flag flag = compiler_flags.PPCG.optimisation_flags[4] individual.ppcg_flags[flag] = per_kernel_size_info if not shared_mem: flag = compiler_flags.PPCG.optimisation_flags[0] #individual.ppcg_flags[flag] = compiler_flags.EnumerationFlag(flag) individual.ppcg_flags[flag] = True if not private_mem: flag = compiler_flags.PPCG.optimisation_flags[7] #individual.ppcg_flags[flag] = compiler_flags.EnumerationFlag(flag) individual.ppcg_flags[flag] = True #Set isl fusion flag #flag = compiler_flags.PPCG.optimisation_flags[6] #individual.ppcg_flags[flag] = fusion #string = individual.ppcg_flags[flag].get_command_line_string(1024) #print(string) #print("end") return individual def create_random(): individual = Individual() for flag in compiler_flags.PPCG.optimisation_flags: print(flag) individual.ppcg_flags[flag] = flag.random_value() for flag in compiler_flags.CC.optimisation_flags: individual.cc_flags[flag] = flag.random_value() for flag in compiler_flags.CXX.optimisation_flags: individual.cxx_flags[flag] = flag.random_value() for flag in compiler_flags.NVCC.optimisation_flags: individual.nvcc_flags[flag] = flag.random_value() return individual class Individual: """An individual solution in a population""" ID = 0 @staticmethod def get_ID_init(): Individual.ID += 1 return Individual.ID def file_name(self): if config.Arguments.binary_file_name: return config.Arguments.binary_file_name return 'testcase'+str(self.ID) #return 'gemm' def set_ID(self, num): self.ID = num def get_ID(self): return self.ID def __init__(self): self.ID = Individual.get_ID_init() self.ppcg_flags = collections.OrderedDict() self.cc_flags = collections.OrderedDict() self.cxx_flags = collections.OrderedDict() self.nvcc_flags = collections.OrderedDict() self.status = enums.Status.failed self.execution_time = float("inf") self.num = 0 self.kernel_num=compiler_flags.SizesFlag.ALL_KERNELS_SENTINEL self.per_kernel_time = [] for k in config.Arguments.kernels_to_tune: self.per_kernel_time.append(float("inf")) def all_flags(self): return self.ppcg_flags.keys() + self.cc_flags.keys() + self.cxx_flags.keys() + self.nvcc_flags.keys() def all_flag_values(self): return self.ppcg_flags.values() + self.cc_flags.values() + self.cxx_flags.values() + self.nvcc_flags.values() def run(self, timeout): try: self.compile(timeout) if self.status == enums.Status.passed: # Fitness is inversely proportional to execution time if self.execution_time == 0: self.fitness = float("inf") else: self.fitness = 1/self.execution_time debug.verbose_message("Individual %d: execution time = %f, fitness = %f" \ % (self.ID, self.execution_time, self.fitness), __name__) else: self.fitness = 0 except internal_exceptions.FailedCompilationException as e: debug.exit_message(e) def checkforpause(self): while(1): if os.path.isfile('.pause'): print("Auto tuning paused, remove .pause to restart") time.sleep(20) else: #print("Auto tuning restarted") break def compile(self, timeout=float("inf")): self.checkforpause() self.ppcg() #sucess=self.ppcg_with_timeout(timeout) #if not sucess: # return self.build() self.binary(timeout) def ppcg(self): self.ppcg_cmd_line_flags = "--target=%s --dump-sizes %s" % (config.Arguments.target, ' '.join(flag.get_command_line_string(self.ppcg_flags[flag]) for flag in self.ppcg_flags.keys())) os.environ["AUTOTUNER_PPCG_FLAGS"] = self.ppcg_cmd_line_flags if config.Arguments.cmd_string_complete: cmd = config.Arguments.ppcg_cmd+ ' '+self.ppcg_cmd_line_flags elif config.Arguments.target == enums.Targets.cuda: cmd = config.Arguments.ppcg_cmd + ' '+self.ppcg_cmd_line_flags+' -o '+self.file_name() else: cmd = config.Arguments.ppcg_cmd + ' '+self.ppcg_cmd_line_flags+' -o '+self.file_name()+'_host.c' debug.verbose_message("Running '%s'" % cmd, __name__) #debug.verbose_message("Running '%s'" % self.ppcg_cmd_line_flags , __name__) start = timeit.default_timer() self.ppcg_proc = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE) stderr = self.ppcg_proc.communicate()[1] end = timeit.default_timer() config.time_PPCG += end - start if self.ppcg_proc.returncode: raise internal_exceptions.FailedCompilationException("FAILED: '%s'" % config.Arguments.ppcg_cmd) def ppcg_with_timeout(self, timeout=float("inf")): thread = threading.Thread(target=self.ppcg) thread.start() thread.join(timeout) if thread.is_alive(): print("Timeout: terminating the ppcg ") self.ppcg_proc.terminate() thread.join(timeout) self.status = enums.Status.ppcgtimeout return False return True def build(self): if config.Arguments.cmd_string_complete: build_cmd = config.Arguments.build_cmd elif config.Arguments.target == enums.Targets.cuda: build_cmd = config.Arguments.build_cmd + ' ' + self.file_name()+ '_host.cu ' + self.file_name()+ '_kernel.cu '+ '-o '+ self.file_name()+'.exe' else: build_cmd = config.Arguments.build_cmd + ' ' + self.file_name()+ '_host.c ' + '-o '+ self.file_name()+'.exe' + ' -lprl -lOpenCL' debug.verbose_message("Running '%s'" % build_cmd, __name__) start = timeit.default_timer() proc = subprocess.Popen(build_cmd, shell=True) stderr = proc.communicate()[1] end = timeit.default_timer() config.time_backend += end - start if proc.returncode: raise internal_exceptions.FailedCompilationException("FAILED: '%s'" % config.Arguments.build_cmd) def deleteFile(self, fileName): try: if os.path.exists(fileName): os.remove(fileName) except: pass def extract_kernel_time(self, kernel_num, stdout): re_str = r'kernel'+str(kernel_num)+'\s*:\s*(\d*.\d+)ms' time_regex = re.compile(re_str) total_time = 0.0 nmatchedlines = 0 for line in stdout.split(os.linesep): line = line.strip() matches = time_regex.findall(line) if matches: nmatchedlines += 1 try: total_time += float(matches[0]) except: raise internal_exceptions.BinaryRunException("Execution time '%s' is not in the required format" % matches[0]) if nmatchedlines == 0: total_time = float("inf") return total_time def update_kernel_times(self, stdout): if not config.Arguments.prl_profiling: return for k in config.Arguments.kernels_to_tune: self.per_kernel_time[k] = self.extract_kernel_time(k, stdout) def binary(self, best_execution_time=float("inf")): #time_regex = re.compile(r'^(\d*\.\d+|\d+)$') #print config.Arguments.execution_time_regex if config.Arguments.prl_profiling: if self.kernel_num == compiler_flags.SizesFlag.ALL_KERNELS_SENTINEL: re_str = r'compute\s*:\s*(\d*.\d+)ms' else: re_str = r'kernel'+str(self.kernel_num)+'\s*:\s*(\d*.\d+)ms' else: re_str = config.Arguments.execution_time_regex print re_str time_regex = re.compile(re_str) total_time = 0.0 status = enums.Status.passed num_actual_runs = 0 for run in xrange(1,config.Arguments.runs+1): if config.Arguments.cmd_string_complete: run_cmd = config.Arguments.run_cmd else: run_cmd = './'+self.file_name()+'.exe '+config.Arguments.run_cmd_input #run_cmd = config.Arguments.run_cmd debug.verbose_message("Run #%d of '%s'" % (run, run_cmd), __name__) start = timeit.default_timer() self.proc = subprocess.Popen(run_cmd, shell=True, stdout=subprocess.PIPE) stdout, stderr = self.proc.communicate() end = timeit.default_timer() if self.proc.returncode: sper_kernel_size_infotatus = enums.Status.failed debug.warning_message("FAILED: '%s'" % config.Arguments.run_cmd) continue if config.Arguments.execution_time_from_binary: if not stdout: raise internal_exceptions.BinaryRunException("Expected the binary to dump its execution time. Found nothing") self.update_kernel_times(stdout) nmatchedlines = 0 for line in stdout.split(os.linesep): line = line.strip() matches = time_regex.findall(line) if matches: nmatchedlines += 1 try: total_time += float(matches[0]) except: raise internal_exceptions.BinaryRunException("Execution time '%s' is not in the required format" % matches[0]) if nmatchedlines == 0: raise internal_exceptions.BinaryRunException("Regular expression did not match anything on the program's output") else: total_time += end - start num_actual_runs +=1 per_var = 1 + config.Arguments.max_exec_time_var/100 time = per_var * best_execution_time if total_time > time * num_actual_runs: #print "Execution time of cur test case is worst than the best so far, stopping at first run" break self.status = status config.time_binary += total_time if num_actual_runs != 0: self.execution_time = total_time/num_actual_runs else: self.execution_time = total_time self.deleteFile(self.file_name()+'.exe') self.deleteFile(self.file_name()+'_host.c') self.deleteFile(self.file_name()+'_host_kernel.cl') self.deleteFile(self.file_name()+'_host.cu') self.deleteFile(self.file_name()+'_kernel.cu') self.deleteFile(self.file_name()+'_kernel.hu') self.deleteFile(self.file_name()+'_host_kernel.hu') self.deleteFile(self.file_name()+'_host_kernel.h') self.deleteFile(self.file_name()) def run_with_timeout(self, timeout=2): print "executing task " + str(self.ID) timeout = config.Arguments.timeout_ppcg try: thread = threading.Thread(target=self.binary) thread.start() thread.join(timeout) if thread.is_alive(): print("Timeout: terminating the procs") self.proc.terminate() thread.join() self.status = enums.Status.timeout except: print("Exception running"+str(self.ID)) self.status = enums.Status.timeout return def __str__(self): return "ID %4d: execution time = %3f, ppcg = %s, status = %s" % (self.ID, self.execution_time, self.ppcg_cmd_line_flags, self.status)
mit
1,518,361,591,463,232,800
38.64497
165
0.578209
false
swipeat/back
interfaces/login.py
1
1560
# # list.py : Interfaces to access list. # from db import user from interfaces import app import json from flask import request from flask import make_response from flask import session # /login : Login to user's account @app.route("/login", methods=["POST"]) def login(): """ Login to user's account """ # Get login and password # login = request.authorization["username"] # password = request.authorization["password"] # temp hack to make demo faster login = "user" password = "user" # Do some check in the database cond, msg = user.check_login(login, password) if cond: # Set username/password in the response session["username"] = login session["password"] = password # Send ok response with cookie return json.dumps({"response" : 0, "message" : msg}) else: # Sorry man return json.dumps({"reponse" : -1, "error" : "Wrong username or password"}, sort_keys=True) # /login/signup : Create an account @app.route("/login/signup", methods=["POST"]) def signup(): """ Create a new account """ # Get new username and password # username = request.form["username"] # password = request.form["password"] # temp hack to make demo faster username = "user" password = "user" # Try to create account resp, msg = user.create_account(username, password) # Create the account if resp: return json.dumps({"response" : 0, "message" : msg}) else: return json.dumps({"response" : -1, "message" : msg})
bsd-3-clause
-8,406,343,558,309,903,000
25.913793
99
0.633974
false
iandees/all-the-places
locations/spiders/jamba_juice.py
1
3672
import json import scrapy from six.moves.urllib.parse import urlencode import re from locations.items import GeojsonPointItem DAYS = { '1': 'Mo', '2': 'Tu', '3': 'We', '4': 'Th', '5': 'Fr', '6': 'Sa', '7': 'Su' } class JambaJuiceSpider(scrapy.Spider): name = "jambajuice" allowed_domains = ["https://momentfeed-prod.apigee.net/"] download_delay = 1.5 start_urls = ( 'https://momentfeed-prod.apigee.net/api/llp.json?', ) def start_requests(self): url = self.start_urls[0] headers = { 'Accept': 'application/json, text/plain, */*', 'Accept-Language': 'en-US,en;q=0.8,ru;q=0.6', 'Origin': 'http://locations.jambajuice.com', 'Accept-Encoding': 'gzip, deflate, br', 'Referer': 'http://locations.jambajuice.com/site-map', 'Connection': 'keep-alive', } params = [('auth_token', 'PQUBOCBNLKOUIYUP'), ('sitemap', 'true')] url = self.start_urls[0] + urlencode(params) yield scrapy.Request(url=url, headers=headers, callback=self.parse) def parse_hours(self, hours): if not hours: return '' days = [x for x in hours.split(';') if x] day_hours = {} all_days = [] opening_hours = [] for day in days: matches = re.findall(r"^(.*){1},([\d]{4}),([\d]{4})$", day) for day, start, end in matches: start_hour_min = '{}:{}'.format(start[:2], start[2:]) end_hour_min = '{}:{}'.format(end[:2], end[2:]) hours = '{}-{}'.format(start_hour_min, end_hour_min) if not day_hours: day_hours = { 'from_day': DAYS[day], 'to_day': DAYS[day], 'hours': hours } elif hours == day_hours['hours']: day_hours['to_day'] = DAYS[day] elif hours != day_hours['hours']: all_days.append(day_hours) day_hours = { 'from_day': DAYS[day], 'to_day': DAYS[day], 'hours': hours } for day in all_days: osm_hours = '' if day['from_day'] == day['to_day']: osm_hours = '{} {}'.format(day['from_day'], day['hours']) else: osm_hours = '{}-{} {}'.format(day['from_day'], day['to_day'], day['hours']) opening_hours.append(osm_hours) return ";".join(opening_hours) def parse(self, response): stores = json.loads(response.body_as_unicode()) for store in stores: props = {} store_info = store.get('store_info', '') props['ref'] = store_info['corporate_id'] props['lat'] = store_info['latitude'] props['lon'] = store_info['longitude'] props['state'] = store_info['region'] props['city'] = store_info['locality'] props['opening_hours'] = self.parse_hours(store_info.get('store_hours', '')) props['addr_full'] = ', '.join([store_info['address'], store_info.get('address_extended', '')]) sieve_out = ['website', 'phone', 'postcode', 'country'] props.update({key: store_info[key] for key in sieve_out}) yield GeojsonPointItem(**props)
mit
2,389,071,301,505,917,000
32.688073
91
0.455065
false
alephdata/ingestors
ingestors/media/image.py
1
2561
import logging from io import BytesIO from PIL import Image, ExifTags from followthemoney import model from ingestors.ingestor import Ingestor from ingestors.support.ocr import OCRSupport from ingestors.support.timestamp import TimestampSupport from ingestors.exc import ProcessingException log = logging.getLogger(__name__) class ImageIngestor(Ingestor, OCRSupport, TimestampSupport): """Image file ingestor class. Extracts the text from images using OCR.""" MIME_TYPES = [ "image/x-portable-graymap", "image/png", "image/x-png", "image/jpeg", "image/jpg", "image/gif", "image/pjpeg", "image/bmp", "image/x-windows-bmp", "image/x-portable-bitmap", "image/x-coreldraw", "application/postscript", "image/vnd.dxf", ] EXTENSIONS = ["jpg", "jpe", "jpeg", "png", "gif", "bmp"] SCORE = 10 def extract_exif(self, img, entity): if not hasattr(img, "_getexif"): return exif = img._getexif() if exif is None: return for num, value in exif.items(): try: tag = ExifTags.TAGS[num] except KeyError: log.warning("Unknown EXIF code: %s", num) continue if tag == "DateTimeOriginal": entity.add("authoredAt", self.parse_timestamp(value)) if tag == "DateTime": entity.add("date", self.parse_timestamp(value)) if tag == "Make": entity.add("generator", value) if tag == "Model": entity.add("generator", value) def ingest(self, file_path, entity): entity.schema = model.get("Image") with open(file_path, "rb") as fh: data = fh.read() try: image = Image.open(BytesIO(data)) image.load() self.extract_exif(image, entity) languages = self.manager.context.get("languages") text = self.extract_ocr_text(data, languages=languages) entity.add("bodyText", text) except (OSError, IOError, Exception) as err: raise ProcessingException("Failed to open image: %s" % err) @classmethod def match(cls, file_path, entity): score = super(ImageIngestor, cls).match(file_path, entity) if score <= 0: for mime_type in entity.get("mimeType"): if mime_type.startswith("image/"): score = cls.SCORE - 1 return score
mit
7,502,466,121,803,189,000
31.0125
77
0.565795
false
cthit/singIT
utilities/singIT-scraper.py
1
4562
#!/usr/bin/env python3 from pathlib import Path from PIL import Image, ImageFile import requests import time import chardet import hashlib import json import argparse import sys ImageFile.LOAD_TRUNCATED_IMAGES = True config = {} api_key = '' image_dir = Path('.') / 'images' def main(args): l = [] root = get_root(args.directory) for txt in root.rglob('*.txt'): if txt.is_file(): try: song = build_song_object(txt) l.append(song) except: print(str(txt)+" is broken!") print(get_encoding(txt)) pass post_content(l, args) def single(args): filename = Path(args.single) if not (filename.exists() and filename.suffix == '.txt'): print('Specify a readable txt file kthx') sys.exit(0) song = build_song_object(filename) post_content([song], args) def get_root(directory): root = Path(directory) if not root.exists(): print('Specify a readable directory kthx') sys.exit(0) root = root.resolve() if not root.parts[-1] == 'songs': new_root = root / 'songs' if not new_root.exists(): print('Specified path does not end in, or contain, a \'songs\' directory') sys.exit(0) else: root = new_root return(root) def build_song_object(filename): song = get_metadata(filename) if 'cover' in song: cover_file = filename.parent / song['cover'] if cover_file.exists(): make_small_image(cover_file, song['song_hash']) else: song['cover'] = None else: song['cover'] = None return(song) def get_metadata(filename): song = {} song['song_hash'] = get_hash(filename) f_encoding = get_encoding(filename)['encoding'] if f_encoding == 'ISO-8859-2': f_encoding = 'ISO-8859-1' with filename.open(encoding=f_encoding) as f: for line in f: if line.startswith('#'): key_end = line.find(':') key = line[1:key_end].lower() value = line[key_end+1:] song[key] = value.rstrip('\n') return(song) def get_encoding(filename): byte_object = filename.open('rb').read() return(chardet.detect(byte_object)) def get_hash(filename): h = hashlib.md5() h.update(str(filename).encode('utf-8')) return(h.hexdigest()) def make_small_image(filename, song_hash): dest_fname = song_hash + '.png' destination_file = image_dir.resolve() destination_file = destination_file / dest_fname if not destination_file.exists(): basewidth = 200 try: with Image.open(filename) as img: wpercent = (basewidth/float(img.size[0])) hsize = int((float(img.size[1]) * float(wpercent))) with img.resize((basewidth, hsize), Image.ANTIALIAS) as sml_img: sml_img.save(str(destination_file), 'PNG') except IOError: pass def post_content(songlist, args): d = {'songs': songlist} if not args.file: print('Posting', len(songlist), 'songs') h = { 'Content-Type': 'application/json', 'Authorization': 'Token token=' + api_key } r = requests.post(config['api'], data=json.dumps(d), headers=h) for index, song in enumerate(songlist): if jsonlist[index]: print("Song with index {} is empty in r.json()".format(index)) print(song) color = '\033[32m' if r.status_code == requests.codes.created else '\033[91m' reset = '\033[0m' # reset color print("[{}] {:2} -> {} -> {}{} {}{}" .format(time.strftime("%F %T"), len(songlist), config['api'], color, r.status_code, r.reason, reset), flush=True) else: print('Outputting', len(songlist), 'songs') with open('output.txt', 'w') as outfile: json.dump(d, outfile) if __name__ == "__main__": try: with open('config.json', 'r') as f: the_config = json.load(f) except FileNotFoundError: print('config.json not found') sys.exit(0) except Exception as e: raise e api_key = the_config['api_key'] del the_config['api_key'] config = the_config parser = argparse.ArgumentParser(description='Parse Ultrastar DX song files into JSON.') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('-d','--directory', help='directory to use', action='store') group.add_argument('-s','--single', help='txt file to use', action='store') parser.add_argument('-f','--file', help='parse to file instead of upload', action='store_true', default=False) args = parser.parse_args() if not image_dir.exists(): image_dir.mkdir() if args.single: single(args) else: main(args)
mit
-8,570,207,953,481,354,000
26.648485
90
0.620342
false
noba3/KoTos
addons/plugin.video.movie25/resources/libs/live/nhl.py
1
7302
import urllib,urllib2,re,cookielib,os,sys import xbmc, xbmcgui, xbmcaddon, xbmcplugin,time from resources.libs import main #Mash Up - by Mash2k3 2012. from t0mm0.common.addon import Addon addon_id = 'plugin.video.movie25' selfAddon = xbmcaddon.Addon(id=addon_id) addon = Addon('plugin.video.movie25', sys.argv) art = main.art def MAINNHL(murl): source_media = {} from datetime import datetime datex=datetime.now().strftime('%Y%m%d') xml='http://live.nhl.com/GameData/SeasonSchedule-20142015.json' link=main.OPENURL(xml) link=link.replace('\r','').replace('\n','').replace('\t','').replace('&nbsp;','').replace(' ','') main.addDir('[COLOR red]Archived Games[/COLOR]','Archived',394,art+'/nhl.png') if 'Archived' not in murl: main.addLink("[COLOR red]Live Games Windows Only, Requires some modifications to get working visit forum.[/COLOR]",'','') match=re.compile('{"id":(.+?),"est":"(.+?)","a":"(.+?)","h":"(.+?)"}',re.DOTALL).findall(link) for id,timed,ateam,hteam in match: split= re.search('(.+?)\s(\d+:\d+):\d+',timed) split1=str(split.group(1)) split2=str(split.group(2)) if 'Archived' in murl: if int(split1)<=int(datex): dates= re.search('(\d{4})(\d{2})(\d{2})',split1) date=str(dates.group(2))+"/"+str(dates.group(3))+"/"+str(dates.group(1)) timed = time.strftime("%I:%M %p", time.strptime(split2, "%H:%M")) main.addDir(ateam+' at '+hteam+' [COLOR red]('+timed+')[/COLOR] [COLOR blue]('+date+')[/COLOR]',id,395,art+'/nhl.png') else: if datex == split1: dates= re.search('(\d{4})(\d{2})(\d{2})',split1) date=str(dates.group(2))+"/"+str(dates.group(3))+"/"+str(dates.group(1)) timed = time.strftime("%I:%M %p", time.strptime(split2, "%H:%M")) main.addDir(ateam+' at '+hteam+' [COLOR red]('+timed+')[/COLOR] [COLOR blue]('+date+')[/COLOR]',id,395,art+'/nhl.png') def LISTSTREAMS(mname,murl): mname=main.removeColoredText(mname) id= re.search('(\d{4})(\d{2})(\d{4})',murl) xml='http://smb.cdnak.neulion.com/fs/nhl/mobile/feed_new/data/streams/'+str(id.group(1))+'/ipad/'+str(id.group(2))+'_'+str(id.group(3))+'.json' link=main.OPENURL(xml) link=link.replace('\r','').replace('\n','').replace('\t','').replace('&nbsp;','').replace(' ','') match=re.compile('"vod-condensed":{"bitrate0":"([^"]+)"},"vod-continuous":{"bitrate0":"([^"]+)","image":"([^"]+)"},"vod-whole":{"bitrate0":"([^"]+)"}',re.DOTALL).findall(link) for cond,cont,thumb,whole in match: if '_h_condensed' in cond: main.addPlayc(mname+' [COLOR blue]Home Condensed[/COLOR]',cond,396,thumb,'','','','','') else: main.addPlayc(mname+' [COLOR blue]Away Condensed[/COLOR]',cond,396,thumb,'','','','','') if '_h_continuous' in cont: main.addPlayc(mname+' [COLOR blue]Home Continuous[/COLOR]',cont,396,thumb,'','','','','') else: main.addPlayc(mname+' [COLOR blue]Away Continuous[/COLOR]',cont,396,thumb,'','','','','') if '_h_whole' in whole: main.addPlayc(mname+' [COLOR blue]Home Whole[/COLOR]',whole,396,thumb,'','','','','') else: main.addPlayc(mname+' [COLOR blue]Away Whole[/COLOR]',whole,396,thumb,'','','','','') match2=re.compile('"away".+?"live":{"bitrate0":"([^"]+)"},.+?"image":"([^"]+)"',re.DOTALL).findall(link) for live,thumb in match2: main.addPlayc(mname+' [COLOR blue]Away Live[/COLOR]',live+'x0xe'+str(murl),396,thumb,'','','','','') match3=re.compile('"home".+?"live":{"bitrate0":"([^"]+)"},.+?"image":"([^"]+)"',re.DOTALL).findall(link) for live,thumb in match3: main.addPlayc(mname+' [COLOR blue]Home LIVE[/COLOR]',live+'x0xe'+str(murl),396,thumb,'','','','','') def LINK(mname,murl,thumb): main.GA(mname,"Watched") ok=True namelist=[] urllist=[] playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO) playlist.clear() if '_whole' in murl: link=main.OPENURL(murl) link=link.replace('\r','').replace('\n','').replace('\t','').replace('&nbsp;','').replace(' ','') part= re.findall('/([^/]+)ipad.mp4.m3u8',murl)[0] match=re.compile('BANDWIDTH=.+?'+part+'(.+?)_ipad.mp4.m3u8',re.DOTALL).findall(link) for band in sorted(match): namelist.append(band) dialog = xbmcgui.Dialog() answer =dialog.select("Pick A Bandwidth", namelist) if answer != -1: nurl=murl.split('ipad.mp4.m3u8')[0] stream_url=nurl+namelist[int(answer)]+'_ipad.mp4.m3u8'+'|User-Agent=PS4 libhttp/1.76 (PlayStation 4)' else: return elif '/live/' in murl: import subprocess jarfile = xbmc.translatePath('special://home/addons/plugin.video.movie25/resources/libs/live/FuckNeulionV2.jar') if 'Home' in mname: Side='home' if 'Away' in mname: Side='away' SelectGame=murl.split('x0xe')[1] murl=murl.split('x0xe')[0] startupinfo = None if os.name == 'nt': startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW command=['java','-jar',jarfile,SelectGame,Side] proxy_hack_process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, startupinfo=startupinfo) xbmc.sleep(1000) link=main.OPENURL(murl) link=link.replace('\r','').replace('\n','').replace('\t','').replace('&nbsp;','').replace(' ','') part= re.findall('/([^/]+)ipad.m3u8',murl)[0] match=re.compile('BANDWIDTH=.+?'+part+'(.+?)_ipad.m3u8',re.DOTALL).findall(link) for band in sorted(match): namelist.append(band) dialog = xbmcgui.Dialog() answer =dialog.select("Pick A Bandwidth", namelist) if answer != -1: nurl=murl.split('ipad.m3u8')[0] stream_url=nurl+namelist[int(answer)]+'_ipad.m3u8'+'|User-Agent=PS4 libhttp/1.76 (PlayStation 4)' else: return else: stream_url = murl+'|User-Agent=PS4 libhttp/1.76 (PlayStation 4)' listitem = xbmcgui.ListItem(thumbnailImage=thumb) infoL={'Title': mname, 'Genre': 'Live'} from resources.universal import playbackengine player = playbackengine.PlayWithoutQueueSupport(resolved_url=stream_url, addon_id=addon_id, video_type='movie', title=mname,season='', episode='', year='',img=thumb,infolabels=infoL, watchedCallbackwithParams='',imdb_id='') #WatchHistory if selfAddon.getSetting("whistory") == "true": from resources.universal import watchhistory wh = watchhistory.WatchHistory('plugin.video.movie25') wh.add_item(mname+' '+'[COLOR green]NHL[/COLOR]', sys.argv[0]+sys.argv[2], infolabels='', img=thumb, fanart='', is_folder=False) return ok
gpl-2.0
-4,919,304,330,789,671,000
53.088889
231
0.556012
false
jthi3rry/hasoffers
tests/test_api.py
1
4154
import unittest from tests import mocks from hasoffers.api import Client, BrandClient, AffiliateClient class TestBrandAPI(unittest.TestCase): def setUp(self): self.client = BrandClient("token", "id") def test_client(self): self.assertEqual("token", self.client.network_token) self.assertEqual("id", self.client.network_id) def test_backward_compatibility(self): self.assertIsInstance(Client("token", "id"), BrandClient) def test_auth_params(self): self.assertEqual({"NetworkToken": "token", "NetworkId": "id"}, self.client.get_auth_params()) @mocks.patch_response(mocks.APPLICATION_FINDALLOFFERCATEGORIES_SUCCESS) def test_request(self, response_data): response = self.client.request("Application", "findAllOfferCategories", fields=["id", "name", "status"], sort={"name": "asc"}) self.assertTrue(response.success) self.assertDictEqual(response_data, response.raw) self.assertEqual(response_data['request']['Target'], response.target) self.assertEqual(response_data['request']['Method'], response.method) self.assertEqual(response_data['request']['Version'], response.version) self.assertEqual(response_data['response']['status'], response.status) self.assertEqual(response_data['response']['errors'], response.errors) self.assertDictEqual(response_data['response']['data'], response.data) @mocks.patch_response(mocks.CONVERSION_FINDALL_SUCCESS) def test_request_with_filters(self, response_data): response = self.client.request("Conversion", "findAll", page="1", limit="100", fields=["id", "payout", "revenue"], sort={"datetime": "asc"}, filters={ "OR": { "advertiser_id": [444, 555], "revenue": {"GREATER_THAN_OR_EQUAL_TO": 100} }, "user_agent": {"LIKE": "%AppleWebKit%"}, "affiliate_id": 111 }) self.assertTrue(response.success) self.assertDictEqual(response_data, response.raw) self.assertEqual(response_data['request']['Target'], response.target) self.assertEqual(response_data['request']['Method'], response.method) self.assertEqual(response_data['request']['Version'], response.version) self.assertEqual(response_data['response']['status'], response.status) self.assertEqual(response_data['response']['errors'], response.errors) self.assertDictEqual(response_data['response']['data'], response.data) class TestAffiliateAPI(unittest.TestCase): def setUp(self): self.client = AffiliateClient("key", "id") def test_client(self): self.assertEqual("key", self.client.api_key) self.assertEqual("id", self.client.network_id) def test_auth_params(self): self.assertEqual({"api_key": "key", "NetworkId": "id"}, self.client.get_auth_params()) @mocks.patch_response(mocks.AFFILIATE_AFFILIATE_FINDBYID_SUCCESS) def test_request(self, response_data): response = self.client.request("Affiliate_Affiliate", "findById") self.assertTrue(response.success) self.assertDictEqual(response_data, response.raw) self.assertEqual(response_data['request']['Target'], response.target) self.assertEqual(response_data['request']['Method'], response.method) self.assertEqual(response_data['request']['Version'], response.version) self.assertEqual(response_data['response']['status'], response.status) self.assertEqual(response_data['response']['errors'], response.errors) self.assertDictEqual(response_data['response']['data'], response.data)
mit
6,900,767,210,658,063,000
47.870588
101
0.590997
false
dvenkatsagar/buildozer
buildozer/__init__.py
1
41010
''' Buildozer ========= Generic Python packager for Android / iOS. Desktop later. ''' __version__ = '0.30dev' import os import re import sys import zipfile import select import codecs from buildozer.jsonstore import JsonStore from sys import stdout, stderr, exit from re import search from os.path import join, exists, dirname, realpath, splitext, expanduser from subprocess import Popen, PIPE from os import environ, unlink, rename, walk, sep, listdir, makedirs from copy import copy from shutil import copyfile, rmtree, copytree from fnmatch import fnmatch try: from urllib.request import FancyURLopener from configparser import SafeConfigParser except ImportError: from urllib import FancyURLopener from ConfigParser import SafeConfigParser try: import fcntl except ImportError: # on windows, no fcntl fcntl = None try: # if installed, it can give color to windows as well import colorama colorama.init() RESET_SEQ = colorama.Fore.RESET + colorama.Style.RESET_ALL COLOR_SEQ = lambda x: x BOLD_SEQ = '' if sys.platform == 'win32': BLACK = colorama.Fore.BLACK + colorama.Style.DIM else: BLACK = colorama.Fore.BLACK + colorama.Style.BRIGHT RED = colorama.Fore.RED BLUE = colorama.Fore.CYAN USE_COLOR = 'NO_COLOR' not in environ except ImportError: if sys.platform != 'win32': RESET_SEQ = "\033[0m" COLOR_SEQ = lambda x: "\033[1;{}m".format(30 + x) BOLD_SEQ = "\033[1m" BLACK = 0 RED = 1 BLUE = 4 USE_COLOR = 'NO_COLOR' not in environ else: RESET_SEQ = '' COLOR_SEQ = '' BOLD_SEQ = '' RED = BLUE = BLACK = 0 USE_COLOR = False # error, info, debug LOG_LEVELS_C = (RED, BLUE, BLACK) LOG_LEVELS_T = 'EID' SIMPLE_HTTP_SERVER_PORT = 8000 IS_PY3 = sys.version_info[0] >= 3 class ChromeDownloader(FancyURLopener): version = ( 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 ' '(KHTML, like Gecko) Chrome/28.0.1500.71 Safari/537.36') urlretrieve = ChromeDownloader().retrieve class BuildozerException(Exception): ''' Exception raised for general situations buildozer cannot process. ''' pass class BuildozerCommandException(BuildozerException): ''' Exception raised when an external command failed. See: `Buildozer.cmd()`. ''' pass class Buildozer(object): standard_cmds = ('distclean', 'update', 'debug', 'release', 'deploy', 'run', 'serve') def __init__(self, filename='buildozer.spec', target=None): super(Buildozer, self).__init__() self.log_level = 1 self.environ = {} self.specfilename = filename self.state = None self.build_id = None self.config_profile = '' self.config = SafeConfigParser(allow_no_value=True) self.config.optionxform = lambda value: value self.config.getlist = self._get_config_list self.config.getlistvalues = self._get_config_list_values self.config.getdefault = self._get_config_default self.config.getbooldefault = self._get_config_bool if exists(filename): self.config.read(filename) self.check_configuration_tokens() # Check all section/tokens for env vars, and replace the # config value if a suitable env var exists. set_config_from_envs(self.config) try: self.log_level = int(self.config.getdefault( 'buildozer', 'log_level', '1')) except: pass self.builddir = self.config.getdefault('buildozer', 'builddir', None) self.targetname = None self.target = None if target: self.set_target(target) def set_target(self, target): '''Set the target to use (one of buildozer.targets, such as "android") ''' self.targetname = target m = __import__('buildozer.targets.{0}'.format(target), fromlist=['buildozer']) self.target = m.get_target(self) self.check_build_layout() self.check_configuration_tokens() def prepare_for_build(self): '''Prepare the build. ''' assert(self.target is not None) if hasattr(self.target, '_build_prepared'): return self.info('Preparing build') self.info('Check requirements for {0}'.format(self.targetname)) self.target.check_requirements() self.info('Install platform') self.target.install_platform() self.info('Check application requirements') self.check_application_requirements() self.info('Check garden requirements') self.check_garden_requirements() self.info('Compile platform') self.target.compile_platform() # flag to prevent multiple build self.target._build_prepared = True def build(self): '''Do the build. The target can set build_mode to 'release' or 'debug' before calling this method. (:meth:`prepare_for_build` must have been call before.) ''' assert(self.target is not None) assert(hasattr(self.target, '_build_prepared')) if hasattr(self.target, '_build_done'): return # increment the build number self.build_id = int(self.state.get('cache.build_id', '0')) + 1 self.state['cache.build_id'] = str(self.build_id) self.info('Build the application #{}'.format(self.build_id)) self.build_application() self.info('Package the application') self.target.build_package() # flag to prevent multiple build self.target._build_done = True # # Log functions # def log(self, level, msg): if level > self.log_level: return if USE_COLOR: color = COLOR_SEQ(LOG_LEVELS_C[level]) print(''.join((RESET_SEQ, color, '# ', msg, RESET_SEQ))) else: print('{} {}'.format(LOG_LEVELS_T[level], msg)) def debug(self, msg): self.log(2, msg) def info(self, msg): self.log(1, msg) def error(self, msg): self.log(0, msg) # # Internal check methods # def checkbin(self, msg, fn): self.debug('Search for {0}'.format(msg)) if exists(fn): return realpath(fn) for dn in environ['PATH'].split(':'): rfn = realpath(join(dn, fn)) if exists(rfn): self.debug(' -> found at {0}'.format(rfn)) return rfn self.error('{} not found, please install it.'.format(msg)) exit(1) def cmd(self, command, **kwargs): # prepare the environ, based on the system + our own env env = copy(environ) env.update(self.environ) # prepare the process kwargs.setdefault('env', env) kwargs.setdefault('stdout', PIPE) kwargs.setdefault('stderr', PIPE) kwargs.setdefault('close_fds', True) kwargs.setdefault('shell', True) kwargs.setdefault('show_output', self.log_level > 1) show_output = kwargs.pop('show_output') get_stdout = kwargs.pop('get_stdout', False) get_stderr = kwargs.pop('get_stderr', False) break_on_error = kwargs.pop('break_on_error', True) sensible = kwargs.pop('sensible', False) if not sensible: self.debug('Run {0!r}'.format(command)) else: if type(command) in (list, tuple): self.debug('Run {0!r} ...'.format(command[0])) else: self.debug('Run {0!r} ...'.format(command.split()[0])) self.debug('Cwd {}'.format(kwargs.get('cwd'))) # open the process if sys.platform == 'win32': kwargs.pop('close_fds', None) process = Popen(command, **kwargs) # prepare fds fd_stdout = process.stdout.fileno() fd_stderr = process.stderr.fileno() if fcntl: fcntl.fcntl( fd_stdout, fcntl.F_SETFL, fcntl.fcntl(fd_stdout, fcntl.F_GETFL) | os.O_NONBLOCK) fcntl.fcntl( fd_stderr, fcntl.F_SETFL, fcntl.fcntl(fd_stderr, fcntl.F_GETFL) | os.O_NONBLOCK) ret_stdout = [] if get_stdout else None ret_stderr = [] if get_stderr else None while True: try: readx = select.select([fd_stdout, fd_stderr], [], [])[0] except select.error: break if fd_stdout in readx: chunk = process.stdout.read() if not chunk: break if get_stdout: ret_stdout.append(chunk) if show_output: if IS_PY3: stdout.write(chunk.decode('utf-8')) else: stdout.write(chunk) if fd_stderr in readx: chunk = process.stderr.read() if not chunk: break if get_stderr: ret_stderr.append(chunk) if show_output: if IS_PY3: stderr.write(chunk.decode('utf-8')) else: stderr.write(chunk) stdout.flush() stderr.flush() process.communicate() if process.returncode != 0 and break_on_error: self.error('Command failed: {0}'.format(command)) self.error('') self.error('Buildozer failed to execute the last command') if self.log_level <= 1: self.error('If the error is not obvious, please raise the log_level to 2') self.error('and retry the latest command.') else: self.error('The error might be hidden in the log above this error') self.error('Please read the full log, and search for it before') self.error('raising an issue with buildozer itself.') self.error('In case of a bug report, please add a full log with log_level = 2') raise BuildozerCommandException() if ret_stdout: ret_stdout = b''.join(ret_stdout) if ret_stderr: ret_stderr = b''.join(ret_stderr) return (ret_stdout.decode('utf-8', 'ignore') if ret_stdout else None, ret_stderr.decode('utf-8') if ret_stderr else None, process.returncode) def cmd_expect(self, command, **kwargs): from pexpect import spawnu # prepare the environ, based on the system + our own env env = copy(environ) env.update(self.environ) # prepare the process kwargs.setdefault('env', env) kwargs.setdefault('show_output', self.log_level > 1) sensible = kwargs.pop('sensible', False) show_output = kwargs.pop('show_output') if show_output: kwargs['logfile'] = codecs.getwriter('utf8')(stdout) if not sensible: self.debug('Run (expect) {0!r}'.format(command)) else: self.debug('Run (expect) {0!r} ...'.format(command.split()[0])) self.debug('Cwd {}'.format(kwargs.get('cwd'))) return spawnu(command, **kwargs) def check_configuration_tokens(self): '''Ensure the spec file is 'correct'. ''' self.info('Check configuration tokens') get = self.config.getdefault errors = [] adderror = errors.append if not get('app', 'title', ''): adderror('[app] "title" is missing') if not get('app', 'source.dir', ''): adderror('[app] "source.dir" is missing') package_name = get('app', 'package.name', '') if not package_name: adderror('[app] "package.name" is missing') elif package_name[0] in map(str, range(10)): adderror('[app] "package.name" may not start with a number.') version = get('app', 'version', '') version_regex = get('app', 'version.regex', '') if not version and not version_regex: adderror('[app] One of "version" or "version.regex" must be set') if version and version_regex: adderror('[app] Conflict between "version" and "version.regex"' ', only one can be used.') if version_regex and not get('app', 'version.filename', ''): adderror('[app] "version.filename" is missing' ', required by "version.regex"') orientation = get('app', 'orientation', 'landscape') if orientation not in ('landscape', 'portrait', 'all'): adderror('[app] "orientation" have an invalid value') if errors: self.error('{0} error(s) found in the buildozer.spec'.format( len(errors))) for error in errors: print(error) exit(1) def check_build_layout(self): '''Ensure the build (local and global) directory layout and files are ready. ''' self.info('Ensure build layout') if not exists(self.specfilename): print('No {0} found in the current directory. Abandon.'.format( self.specfilename)) exit(1) # create global dir self.mkdir(self.global_buildozer_dir) self.mkdir(self.global_cache_dir) # create local dir specdir = dirname(self.specfilename) if self.builddir: specdir = self.builddir self.mkdir(join(specdir, '.buildozer')) self.mkdir(join(specdir, 'bin')) self.mkdir(self.applibs_dir) self.state = JsonStore(join(self.buildozer_dir, 'state.db')) if self.targetname: target = self.targetname self.mkdir(join(self.global_platform_dir, target, 'platform')) self.mkdir(join(specdir, '.buildozer', target, 'platform')) self.mkdir(join(specdir, '.buildozer', target, 'app')) def check_application_requirements(self): '''Ensure the application requirements are all available and ready to be packaged as well. ''' requirements = self.config.getlist('app', 'requirements', '') target_available_packages = self.target.get_available_packages() # remove all the requirements that the target can compile onlyname = lambda x: x.split('==')[0] requirements = [x for x in requirements if onlyname(x) not in target_available_packages] # did we already installed the libs ? if exists(self.applibs_dir) and \ self.state.get('cache.applibs', '') == requirements: self.debug('Application requirements already installed, pass') return # recreate applibs self.rmdir(self.applibs_dir) self.mkdir(self.applibs_dir) # ok now check the availability of all requirements for requirement in requirements: self._install_application_requirement(requirement) # everything goes as expected, save this state! self.state['cache.applibs'] = requirements def _install_application_requirement(self, module): self._ensure_virtualenv() # resetup distribute, just in case self.debug('Install distribute') self.cmd('curl http://python-distribute.org/distribute_setup.py | venv/bin/python', get_stdout=True, cwd=self.buildozer_dir) self.debug('Install requirement {} in virtualenv'.format(module)) self.cmd('pip install --download-cache={} --target={} {}'.format( self.global_cache_dir, self.applibs_dir, module), env=self.env_venv, cwd=self.buildozer_dir) def check_garden_requirements(self): '''Ensure required garden packages are available to be included. ''' garden_requirements = self.config.getlist('app', 'garden_requirements', '') # have we installed the garden packages? if exists(self.gardenlibs_dir) and \ self.state.get('cache.gardenlibs', '') == garden_requirements: self.debug('Garden requirements already installed, pass') return # we're going to reinstall all the garden libs. self.rmdir(self.gardenlibs_dir) # but if we don't have requirements, or if the user removed everything, # don't do anything. if not garden_requirements: self.state['cache.gardenlibs'] = garden_requirements return self._ensure_virtualenv() self.cmd('pip install Kivy-Garden==0.1.1', env=self.env_venv) # recreate gardenlibs self.mkdir(self.gardenlibs_dir) for requirement in garden_requirements: self._install_garden_package(requirement) # save gardenlibs state self.state['cache.gardenlibs'] = garden_requirements def _install_garden_package(self, package): self._ensure_virtualenv() self.debug('Install garden package {} in buildozer_dir'.format(package)) self.cmd('garden install --app {}'.format(package), env=self.env_venv, cwd=self.buildozer_dir) def _ensure_virtualenv(self): if hasattr(self, 'venv'): return self.venv = join(self.buildozer_dir, 'venv') if not self.file_exists(self.venv): self.cmd('virtualenv --python=python2.7 ./venv', cwd=self.buildozer_dir) # read virtualenv output and parse it output = self.cmd('bash -c "source venv/bin/activate && env"', get_stdout=True, cwd=self.buildozer_dir) self.env_venv = copy(self.environ) for line in output[0].splitlines(): args = line.split('=', 1) if len(args) != 2: continue key, value = args if key in ('VIRTUAL_ENV', 'PATH'): self.env_venv[key] = value if 'PYTHONHOME' in self.env_venv: del self.env_venv['PYTHONHOME'] # ensure any sort of compilation will fail self.env_venv['CC'] = '/bin/false' self.env_venv['CXX'] = '/bin/false' def mkdir(self, dn): if exists(dn): return self.debug('Create directory {0}'.format(dn)) makedirs(dn) def rmdir(self, dn): if not exists(dn): return self.debug('Remove directory and subdirectory {}'.format(dn)) rmtree(dn) def file_matches(self, patterns): from glob import glob result = [] for pattern in patterns: matches = glob(expanduser(pattern.strip())) if not matches: return result.extend(matches) return result def file_exists(self, *args): return exists(join(*args)) def file_rename(self, source, target, cwd=None): if cwd: source = join(cwd, source) target = join(cwd, target) self.debug('Rename {0} to {1}'.format(source, target)) if not os.path.isdir(os.path.dirname(target)): self.error('Rename {0} to {1} fails becaues {2} is not a directory'.format(source, target, os.path.directory(target))) rename(source, target) def file_copy(self, source, target, cwd=None): if cwd: source = join(cwd, source) target = join(cwd, target) self.debug('Copy {0} to {1}'.format(source, target)) copyfile(source, target) def file_extract(self, archive, cwd=None): if archive.endswith('.tgz') or archive.endswith('.tar.gz'): # XXX tarfile doesn't work for NDK-r8c :( #tf = tarfile.open(archive, 'r:*') #tf.extractall(path=cwd) #tf.close() self.cmd('tar xzf {0}'.format(archive), cwd=cwd) return if archive.endswith('.tbz2') or archive.endswith('.tar.bz2'): # XXX same as before self.cmd('tar xjf {0}'.format(archive), cwd=cwd) return if archive.endswith('.bin'): # To process the bin files for linux and darwin systems self.cmd('chmod a+x {0}'.format(archive),cwd=cwd) self.cmd('./{0}'.format(archive),cwd=cwd) return if archive.endswith('.zip'): archive = join(cwd, archive) zf = zipfile.ZipFile(archive) zf.extractall(path=cwd) zf.close() return raise Exception('Unhandled extraction for type {0}'.format(archive)) def file_copytree(self, src, dest): print('copy {} to {}'.format(src, dest)) if os.path.isdir(src): if not os.path.isdir(dest): os.makedirs(dest) files = os.listdir(src) for f in files: self.file_copytree( os.path.join(src, f), os.path.join(dest, f)) else: copyfile(src, dest) def clean_platform(self): self.info('Clean the platform build directory') if not exists(self.platform_dir): return rmtree(self.platform_dir) def download(self, url, filename, cwd=None): def report_hook(index, blksize, size): if size <= 0: progression = '{0} bytes'.format(index * blksize) else: progression = '{0:.2f}%'.format( index * blksize * 100. / float(size)) stdout.write('- Download {}\r'.format(progression)) stdout.flush() url = url + filename if cwd: filename = join(cwd, filename) if self.file_exists(filename): unlink(filename) self.debug('Downloading {0}'.format(url)) urlretrieve(url, filename, report_hook) return filename def get_version(self): c = self.config has_version = c.has_option('app', 'version') has_regex = c.has_option('app', 'version.regex') has_filename = c.has_option('app', 'version.filename') # version number specified if has_version: if has_regex or has_filename: raise Exception( 'version.regex and version.filename conflict with version') return c.get('app', 'version') # search by regex if has_regex or has_filename: if has_regex and not has_filename: raise Exception('version.filename is missing') if has_filename and not has_regex: raise Exception('version.regex is missing') fn = c.get('app', 'version.filename') with open(fn) as fd: data = fd.read() regex = c.get('app', 'version.regex') match = search(regex, data) if not match: raise Exception( 'Unable to find capture version in {0}\n' ' (looking for `{1}`)'.format(fn, regex)) version = match.groups()[0] self.debug('Captured version: {0}'.format(version)) return version raise Exception('Missing version or version.regex + version.filename') def build_application(self): self._copy_application_sources() self._copy_application_libs() self._copy_garden_libs() self._add_sitecustomize() def _copy_application_sources(self): # XXX clean the inclusion/exclusion algo. source_dir = realpath(self.config.getdefault('app', 'source.dir', '.')) include_exts = self.config.getlist('app', 'source.include_exts', '') exclude_exts = self.config.getlist('app', 'source.exclude_exts', '') exclude_dirs = self.config.getlist('app', 'source.exclude_dirs', '') exclude_patterns = self.config.getlist('app', 'source.exclude_patterns', '') app_dir = self.app_dir self.debug('Copy application source from {}'.format(source_dir)) rmtree(self.app_dir) for root, dirs, files in walk(source_dir): # avoid hidden directory if True in [x.startswith('.') for x in root.split(sep)]: continue # need to have sort-of normalization. Let's say you want to exclude # image directory but not images, the filtered_root must have a / at # the end, same for the exclude_dir. And then we can safely compare filtered_root = root[len(source_dir) + 1:].lower() if filtered_root: filtered_root += '/' # manual exclude_dirs approach is_excluded = False for exclude_dir in exclude_dirs: if exclude_dir[-1] != '/': exclude_dir += '/' if filtered_root.startswith(exclude_dir): is_excluded = True break if is_excluded: continue # pattern matching for pattern in exclude_patterns: if fnmatch(filtered_root, pattern): is_excluded = True break if is_excluded: continue for fn in files: # avoid hidden files if fn.startswith('.'): continue # exclusion by pattern matching is_excluded = False dfn = fn.lower() if filtered_root: dfn = join(filtered_root, fn) for pattern in exclude_patterns: if fnmatch(dfn, pattern): is_excluded = True break if is_excluded: continue # filter based on the extension # TODO more filters basename, ext = splitext(fn) if ext: ext = ext[1:] if include_exts and ext not in include_exts: continue if exclude_exts and ext in exclude_exts: continue sfn = join(root, fn) rfn = realpath(join(app_dir, root[len(source_dir) + 1:], fn)) # ensure the directory exists dfn = dirname(rfn) self.mkdir(dfn) # copy! self.debug('Copy {0}'.format(sfn)) copyfile(sfn, rfn) def _copy_application_libs(self): # copy also the libs copytree(self.applibs_dir, join(self.app_dir, '_applibs')) def _copy_garden_libs(self): if exists(self.gardenlibs_dir): copytree(self.gardenlibs_dir, join(self.app_dir, 'libs')) def _add_sitecustomize(self): copyfile(join(dirname(__file__), 'sitecustomize.py'), join(self.app_dir, 'sitecustomize.py')) main_py = join(self.app_dir, 'service', 'main.py') if not self.file_exists(main_py): #self.error('Unable to patch main_py to add applibs directory.') return header = ('import sys, os; ' 'sys.path = [os.path.join(os.getcwd(),' '"..", "_applibs")] + sys.path\n') with open(main_py, 'rb') as fd: data = fd.read() data = header + data with open(main_py, 'wb') as fd: fd.write(data) self.info('Patched service/main.py to include applibs') def namify(self, name): '''Return a "valid" name from a name with lot of invalid chars (allowed characters: a-z, A-Z, 0-9, -, _) ''' return re.sub('[^a-zA-Z0-9_\-]', '_', name) @property def root_dir(self): return realpath(join(dirname(self.specfilename))) @property def buildozer_dir(self): if self.builddir: return join(self.builddir, '.buildozer') return join(self.root_dir, '.buildozer') @property def bin_dir(self): return join(self.root_dir, 'bin') @property def platform_dir(self): return join(self.buildozer_dir, self.targetname, 'platform') @property def app_dir(self): return join(self.buildozer_dir, self.targetname, 'app') @property def applibs_dir(self): return join(self.buildozer_dir, 'applibs') @property def gardenlibs_dir(self): return join(self.buildozer_dir, 'libs') @property def global_buildozer_dir(self): return join(expanduser('~'), '.buildozer') @property def global_platform_dir(self): return join(self.global_buildozer_dir, self.targetname, 'platform') @property def global_packages_dir(self): return join(self.global_buildozer_dir, self.targetname, 'packages') @property def global_cache_dir(self): return join(self.global_buildozer_dir, 'cache') @property def package_full_name(self): package_name = self.config.getdefault('app', 'package.name', '') package_domain = self.config.getdefault('app', 'package.domain', '') if package_domain == '': return package_name return '{}.{}'.format(package_domain, package_name) # # command line invocation # def targets(self): for fn in listdir(join(dirname(__file__), 'targets')): if fn.startswith('.') or fn.startswith('__'): continue if not fn.endswith('.py'): continue target = fn[:-3] try: m = __import__('buildozer.targets.{0}'.format(target), fromlist=['buildozer']) yield target, m except NotImplementedError: pass except: raise pass def usage(self): print('Usage:') print(' buildozer [--profile <name>] [--verbose] [target] <command>...') print(' buildozer --version') print('') print('Available targets:') targets = list(self.targets()) for target, m in targets: doc = m.__doc__.strip().splitlines()[0].strip() print(' {0:<18} {1}'.format(target, doc)) print('') print('Global commands (without target):') cmds = [x for x in dir(self) if x.startswith('cmd_')] for cmd in cmds: name = cmd[4:] meth = getattr(self, cmd) if not meth.__doc__: continue doc = [x for x in meth.__doc__.strip().splitlines()][0].strip() print(' {0:<18} {1}'.format(name, doc)) print('') print('Target commands:') print(' clean Clean the target environment') print(' update Update the target dependencies') print(' debug Build the application in debug mode') print(' release Build the application in release mode') print(' deploy Deploy the application on the device') print(' run Run the application on the device') print(' serve Serve the bin directory via SimpleHTTPServer') for target, m in targets: mt = m.get_target(self) commands = mt.get_custom_commands() if not commands: continue print('') print('Target "{0}" commands:'.format(target)) for command, doc in commands: if not doc: continue doc = doc.strip().splitlines()[0].strip() print(' {0:<18} {1}'.format(command, doc)) print('') def run_default(self): self.check_build_layout() if 'buildozer:defaultcommand' not in self.state: print('No default command set.') print('Use "buildozer setdefault <command args...>"') print('Use "buildozer help" for a list of all commands"') exit(1) cmd = self.state['buildozer:defaultcommand'] self.run_command(cmd) def run_command(self, args): while args: if not args[0].startswith('-'): break arg = args.pop(0) if arg in ('-v', '--verbose'): self.log_level = 2 elif arg in ('-h', '--help'): self.usage() exit(0) elif arg in ('-p', '--profile'): self.config_profile = args.pop(0) elif arg == '--version': print('Buildozer {0}'.format(__version__)) exit(0) self._merge_config_profile() self.check_root() if not args: self.run_default() return command, args = args[0], args[1:] cmd = 'cmd_{0}'.format(command) # internal commands ? if hasattr(self, cmd): getattr(self, cmd)(*args) return # maybe it's a target? targets = [x[0] for x in self.targets()] if command not in targets: print('Unknown command/target {}'.format(command)) exit(1) self.set_target(command) self.target.run_commands(args) def check_root(self): '''If effective user id is 0, display a warning and require user input to continue (or to cancel)''' if IS_PY3: input_func = input else: input_func = raw_input warn_on_root = self.config.getdefault('buildozer', 'warn_on_root', '1') try: euid = os.geteuid() == 0 except AttributeError: if sys.platform == 'win32': import ctypes euid = ctypes.windll.shell32.IsUserAnAdmin() != 0 if warn_on_root == '1' and euid: print('\033[91m\033[1mBuildozer is running as root!\033[0m') print('\033[91mThis is \033[1mnot\033[0m \033[91mrecommended, and may lead to problems later.\033[0m') cont = None while cont not in ('y', 'n'): cont = input_func('Are you sure you want to continue [y/n]? ') if cont == 'n': sys.exit() def cmd_init(self, *args): '''Create a initial buildozer.spec in the current directory ''' if exists('buildozer.spec'): print('ERROR: You already have a buildozer.spec file.') exit(1) copyfile(join(dirname(__file__), 'default.spec'), 'buildozer.spec') print('File buildozer.spec created, ready to customize!') def cmd_distclean(self, *args): '''Clean the whole Buildozer environment. ''' print("Warning: Your ndk, sdk and all other cached packages will be" " removed. Continue? (y/n)") if sys.stdin.readline().lower()[0] == 'y': self.info('Clean the global build directory') if not exists(self.global_buildozer_dir): return rmtree(self.global_buildozer_dir) def cmd_help(self, *args): '''Show the Buildozer help. ''' self.usage() def cmd_setdefault(self, *args): '''Set the default command to do when to arguments are given ''' self.check_build_layout() self.state['buildozer:defaultcommand'] = args def cmd_version(self, *args): '''Show the Buildozer version ''' print('Buildozer {0}'.format(__version__)) def cmd_serve(self, *args): '''Serve the bin directory via SimpleHTTPServer ''' try: from http.server import SimpleHTTPRequestHandler from socketserver import TCPServer except ImportError: from SimpleHTTPServer import SimpleHTTPRequestHandler from SocketServer import TCPServer os.chdir(self.bin_dir) handler = SimpleHTTPRequestHandler httpd = TCPServer(("", SIMPLE_HTTP_SERVER_PORT), handler) print("Serving via HTTP at port {}".format(SIMPLE_HTTP_SERVER_PORT)) print("Press Ctrl+c to quit serving.") httpd.serve_forever() # # Private # def _merge_config_profile(self): profile = self.config_profile if not profile: return for section in self.config.sections(): # extract the profile part from the section name # example: [app@default,hd] parts = section.split('@', 1) if len(parts) < 2: continue # create a list that contain all the profiles of the current section # ['default', 'hd'] section_base, section_profiles = parts section_profiles = section_profiles.split(',') if profile not in section_profiles: continue # the current profile is one available in the section # merge with the general section, or make it one. if not self.config.has_section(section_base): self.config.add_section(section_base) for name, value in self.config.items(section): print('merged ({}, {}) into {} (profile is {})'.format(name, value, section_base, profile)) self.config.set(section_base, name, value) def _get_config_list_values(self, *args, **kwargs): kwargs['with_values'] = True return self._get_config_list(*args, **kwargs) def _get_config_list(self, section, token, default=None, with_values=False): # monkey-patch method for ConfigParser # get a key as a list of string, seperated from the comma # check if an env var exists that should replace the file config set_config_token_from_env(section, token, self.config) # if a section:token is defined, let's use the content as a list. l_section = '{}:{}'.format(section, token) if self.config.has_section(l_section): values = self.config.options(l_section) if with_values: return ['{}={}'.format(key, self.config.get(l_section, key)) for key in values] else: return [x.strip() for x in values] values = self.config.getdefault(section, token, '') if not values: return default values = values.split(',') if not values: return default return [x.strip() for x in values] def _get_config_default(self, section, token, default=None): # monkey-patch method for ConfigParser # get an appropriate env var if it exists, else # get a key in a section, or the default # check if an env var exists that should replace the file config set_config_token_from_env(section, token, self.config) if not self.config.has_section(section): return default if not self.config.has_option(section, token): return default return self.config.get(section, token) def _get_config_bool(self, section, token, default=False): # monkey-patch method for ConfigParser # get a key in a section, or the default # check if an env var exists that should replace the file config set_config_token_from_env(section, token, self.config) if not self.config.has_section(section): return default if not self.config.has_option(section, token): return default return self.config.getboolean(section, token) def set_config_from_envs(config): '''Takes a ConfigParser, and checks every section/token for an environment variable of the form SECTION_TOKEN, with any dots replaced by underscores. If the variable exists, sets the config variable to the env value. ''' for section in config.sections(): for token in config.options(section): set_config_token_from_env(section, token, config) def set_config_token_from_env(section, token, config): '''Given a config section and token, checks for an appropriate environment variable. If the variable exists, sets the config entry to its value. The environment variable checked is of the form SECTION_TOKEN, all upper case, with any dots replaced by underscores. Returns True if the environment variable exists and was used, or False otherwise. ''' env_var_name = ''.join([section.upper(), '_', token.upper().replace('.', '_')]) env_var = os.environ.get(env_var_name) if env_var is None: return False config.set(section, token, env_var) return True
mit
-815,319,661,355,655,400
33.578415
132
0.556986
false
red-hood/calendarserver
contrib/performance/_event_change.py
1
3820
## # Copyright (c) 2010-2015 Apple Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## """ Benchmark a server's handling of event summary changes. """ from itertools import count from urllib2 import HTTPDigestAuthHandler from twisted.internet import reactor from twisted.internet.defer import inlineCallbacks, returnValue from twisted.web.client import Agent from twisted.web.http_headers import Headers from twisted.web.http import NO_CONTENT from httpauth import AuthHandlerAgent from httpclient import StringProducer from benchlib import initialize, sample from _event_create import makeEvent @inlineCallbacks def measure(host, port, dtrace, attendeeCount, samples, fieldName, replacer, eventPerSample=False): user = password = "user01" root = "/" principal = "/" calendar = "event-%s-benchmark" % (fieldName,) authinfo = HTTPDigestAuthHandler() authinfo.add_password( realm="Test Realm", uri="http://%s:%d/" % (host, port), user=user, passwd=password) agent = AuthHandlerAgent(Agent(reactor), authinfo) # Set up the calendar first yield initialize(agent, host, port, user, password, root, principal, calendar) if eventPerSample: # Create an event for each sample that will be taken, so that no event # is used for two different samples. f = _selfish_sample else: # Just create one event and re-use it for all samples. f = _generous_sample data = yield f( dtrace, replacer, agent, host, port, user, calendar, fieldName, attendeeCount, samples) returnValue(data) @inlineCallbacks def _selfish_sample(dtrace, replacer, agent, host, port, user, calendar, fieldName, attendeeCount, samples): url = 'http://%s:%s/calendars/__uids__/%s/%s/%s-change-%%d.ics' % ( host, port, user, calendar, fieldName) headers = Headers({"content-type": ["text/calendar"]}) events = [ # The organizerSequence here (1) may need to be a parameter. # See also the makeEvent call below. (makeEvent(i, 1, attendeeCount), url % (i,)) for i in range(samples)] for (event, url) in events: yield agent.request('PUT', url, headers, StringProducer(event)) # Sample changing the event according to the replacer. samples = yield sample( dtrace, samples, agent, (('PUT', url, headers, StringProducer(replacer(event, i))) for i, (event, url) in enumerate(events)).next, NO_CONTENT) returnValue(samples) @inlineCallbacks def _generous_sample(dtrace, replacer, agent, host, port, user, calendar, fieldName, attendeeCount, samples): url = 'http://%s:%s/calendars/__uids__/%s/%s/%s-change.ics' % ( host, port, user, calendar, fieldName) headers = Headers({"content-type": ["text/calendar"]}) # See the makeEvent call above. event = makeEvent(0, 1, attendeeCount) yield agent.request('PUT', url, headers, StringProducer(event)) # Sample changing the event according to the replacer. samples = yield sample( dtrace, samples, agent, (('PUT', url, headers, StringProducer(replacer(event, i))) for i in count(1)).next, NO_CONTENT) returnValue(samples)
apache-2.0
-910,572,483,388,707,700
31.649573
109
0.674607
false
hrhtspr/IkaLog
ikalog/utils/__init__.py
1
1183
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # IkaLog # ====== # Copyright (C) 2015 Takeshi HASEGAWA # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function from .ikautils import IkaUtils from .matcher import IkaMatcher from .glyph_recoginizer import IkaGlyphRecoginizer from .gearpower_recoginizer import GearpowerRecoginizer from .character_recoginizer import CharacterRecoginizer from .character_recoginizer.number import NumberRecoginizer from .character_recoginizer.udemae import UdemaeRecoginizer from .character_recoginizer.fes_gender import FesGenderRecoginizer from .character_recoginizer.fes_level import FesLevelRecoginizer
apache-2.0
-2,459,047,259,338,792,400
38.433333
75
0.782756
false
nwjs/chromium.src
testing/scripts/run_performance_tests.py
1
27631
#!/usr/bin/env python # Copyright 2017 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Runs telemetry benchmarks and gtest perf tests. This script attempts to emulate the contract of gtest-style tests invoked via recipes. The main contract is that the caller passes the argument: --isolated-script-test-output=[FILENAME] json is written to that file in the format detailed here: https://www.chromium.org/developers/the-json-test-results-format Optional argument: --isolated-script-test-filter=[TEST_NAMES] is a double-colon-separated ("::") list of test names, to run just that subset of tests. This list is forwarded to the run_telemetry_benchmark_as_googletest script. This script is intended to be the base command invoked by the isolate, followed by a subsequent Python script. It could be generalized to invoke an arbitrary executable. It currently runs several benchmarks. The benchmarks it will execute are based on the shard it is running on and the sharding_map_path. If this is executed with a gtest perf test, the flag --non-telemetry has to be passed in to the script so the script knows it is running an executable and not the run_benchmark command. This script obeys the --isolated-script-test-output flag and merges test results from all the benchmarks into the one output.json file. The test results and perf results are also put in separate directories per benchmark. Two files will be present in each directory; perf_results.json, which is the perf specific results (with unenforced format, could be histogram or graph json), and test_results.json, which is a JSON test results format file https://chromium.googlesource.com/chromium/src/+/master/docs/testing/json_test_results_format.md TESTING: To test changes to this script, please run cd tools/perf ./run_tests ScriptsSmokeTest.testRunPerformanceTests """ import argparse import json import os import shutil import sys import time import tempfile import traceback import common CHROMIUM_SRC_DIR = os.path.abspath( os.path.join(os.path.dirname(__file__), '..', '..')) PERF_DIR = os.path.join(CHROMIUM_SRC_DIR, 'tools', 'perf') sys.path.append(PERF_DIR) import generate_legacy_perf_dashboard_json from core import path_util PERF_CORE_DIR = os.path.join(PERF_DIR, 'core') sys.path.append(PERF_CORE_DIR) import results_merger # Add src/testing/ into sys.path for importing xvfb and test_env. sys.path.append(os.path.join(os.path.dirname(__file__), '..')) import xvfb import test_env # Unfortunately we need to copy these variables from ../test_env.py. # Importing it and using its get_sandbox_env breaks test runs on Linux # (it seems to unset DISPLAY). CHROME_SANDBOX_ENV = 'CHROME_DEVEL_SANDBOX' CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox' SHARD_MAPS_DIRECTORY = os.path.join( os.path.dirname(__file__), '..', '..', 'tools', 'perf', 'core', 'shard_maps') # See https://crbug.com/923564. # We want to switch over to using histograms for everything, but converting from # the format output by gtest perf tests to histograms has introduced several # problems. So, only perform the conversion on tests that are whitelisted and # are okay with potentially encountering issues. GTEST_CONVERSION_WHITELIST = [ 'angle_perftests', 'base_perftests', 'cc_perftests', 'components_perftests', 'command_buffer_perftests', 'dawn_perf_tests', 'gpu_perftests', 'load_library_perf_tests', 'media_perftests', 'net_perftests', 'browser_tests', 'services_perftests', 'tracing_perftests', 'views_perftests', 'viz_perftests', 'wayland_client_perftests', 'xr.vr.common_perftests', ] class OutputFilePaths(object): """Provide paths to where results outputs should be written. The process_perf_results.py merge script later will pull all of these together, so that's why they aren't in the standard locations. Also, note that because of the OBBS (One Build Bot Step), Telemetry has multiple tests running on a single shard, so we need to prefix these locations with a directory named by the benchmark name. """ def __init__(self, isolated_out_dir, perf_test_name): self.name = perf_test_name self.benchmark_path = os.path.join(isolated_out_dir, perf_test_name) def SetUp(self): os.makedirs(self.benchmark_path) return self @property def perf_results(self): return os.path.join(self.benchmark_path, 'perf_results.json') @property def test_results(self): return os.path.join(self.benchmark_path, 'test_results.json') @property def logs(self): return os.path.join(self.benchmark_path, 'benchmark_log.txt') @property def csv_perf_results(self): """Path for csv perf results. Note that the chrome.perf waterfall uses the json histogram perf results exclusively. csv_perf_results are implemented here in case a user script passes --output-format=csv. """ return os.path.join(self.benchmark_path, 'perf_results.csv') def print_duration(step, start): print 'Duration of %s: %d seconds' % (step, time.time() - start) def IsWindows(): return sys.platform == 'cygwin' or sys.platform.startswith('win') class GtestCommandGenerator(object): def __init__(self, options, override_executable=None, additional_flags=None, ignore_shard_env_vars=False): self._options = options self._override_executable = override_executable self._additional_flags = additional_flags or [] self._ignore_shard_env_vars = ignore_shard_env_vars def generate(self, output_dir): """Generate the command to run to start the gtest perf test. Returns: list of strings, the executable and its arguments. """ return ([self._get_executable()] + self._generate_filter_args() + self._generate_repeat_args() + self._generate_also_run_disabled_tests_args() + self._generate_output_args(output_dir) + self._generate_shard_args() + self._get_additional_flags() ) @property def executable_name(self): """Gets the platform-independent name of the executable.""" return self._override_executable or self._options.executable def _get_executable(self): executable = str(self.executable_name) if IsWindows(): return r'.\%s.exe' % executable else: return './%s' % executable def _get_additional_flags(self): return self._additional_flags def _generate_shard_args(self): """Teach the gtest to ignore the environment variables. GTEST_SHARD_INDEX and GTEST_TOTAL_SHARDS will confuse the gtest and convince it to only run some of its tests. Instead run all of them. """ if self._ignore_shard_env_vars: return ['--test-launcher-total-shards=1', '--test-launcher-shard-index=0'] return [] def _generate_filter_args(self): if self._options.isolated_script_test_filter: filter_list = common.extract_filter_list( self._options.isolated_script_test_filter) return ['--gtest_filter=' + ':'.join(filter_list)] return [] def _generate_repeat_args(self): # TODO(crbug.com/920002): Support --isolated-script-test-repeat. return [] def _generate_also_run_disabled_tests_args(self): # TODO(crbug.com/920002): Support # --isolated-script-test-also-run-disabled-tests. return [] def _generate_output_args(self, output_dir): output_args = [] if self._options.use_gtest_benchmark_script: output_args.append('--output-dir=' + output_dir) # These flags are to make sure that test output perf metrics in the log. if not '--verbose' in self._get_additional_flags(): output_args.append('--verbose') if (not '--test-launcher-print-test-stdio=always' in self._get_additional_flags()): output_args.append('--test-launcher-print-test-stdio=always') return output_args def write_simple_test_results(return_code, output_filepath, benchmark_name): # TODO(crbug.com/920002): Fix to output # https://chromium.googlesource.com/chromium/src/+/master/docs/testing/json_test_results_format.md # for each test rather than this summary. output_json = { 'tests': { benchmark_name: { 'expected': 'PASS', 'actual': 'FAIL' if return_code else 'PASS', }, }, 'interrupted': False, 'path_delimiter': '/', 'version': 3, 'seconds_since_epoch': time.time(), 'num_failures_by_type': { 'FAIL': 1 if return_code else 0, 'PASS': 0 if return_code else 1, }, } with open(output_filepath, 'w') as fh: json.dump(output_json, fh) def execute_gtest_perf_test(command_generator, output_paths, use_xvfb=False): env = os.environ.copy() # Assume we want to set up the sandbox environment variables all the # time; doing so is harmless on non-Linux platforms and is needed # all the time on Linux. env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH env['CHROME_HEADLESS'] = '1' return_code = 0 try: command = command_generator.generate(output_paths.benchmark_path) if use_xvfb: # When running with xvfb, we currently output both to stdout and to the # file. It would be better to only output to the file to keep the logs # clean. return_code = xvfb.run_executable( command, env, stdoutfile=output_paths.logs) else: with open(output_paths.logs, 'w') as handle: try: return_code = test_env.run_command_output_to_handle( command, handle, env=env) except OSError as e: print('Command to run gtest perf test %s failed with an OSError: %s' % (output_paths.name, e)) return_code = 1 if (not os.path.exists(output_paths.perf_results) and os.path.exists(output_paths.logs)): # Get the correct json format from the stdout to write to the perf # results file if gtest does not generate one. results_processor = generate_legacy_perf_dashboard_json.\ LegacyResultsProcessor() graph_json_string = results_processor.GenerateJsonResults( output_paths.logs) with open(output_paths.perf_results, 'w') as fh: fh.write(graph_json_string) except Exception: traceback.print_exc() return_code = 1 if os.path.exists(output_paths.perf_results): if command_generator.executable_name in GTEST_CONVERSION_WHITELIST: with path_util.SysPath(path_util.GetTracingDir()): # pylint: disable=no-name-in-module from tracing.value import gtest_json_converter # pylint: enable=no-name-in-module gtest_json_converter.ConvertGtestJsonFile(output_paths.perf_results) else: print('ERROR: gtest perf test %s did not generate perf output' % output_paths.name) return_code = 1 write_simple_test_results(return_code, output_paths.test_results, output_paths.name) return return_code class _TelemetryFilterArgument(object): def __init__(self, filter_string): self.benchmark, self.story = filter_string.split('/') class TelemetryCommandGenerator(object): def __init__(self, benchmark, options, story_selection_config=None, is_reference=False): self.benchmark = benchmark self._options = options self._story_selection_config = story_selection_config self._is_reference = is_reference def generate(self, output_dir): """Generate the command to run to start the benchmark. Args: output_dir: The directory to configure the command to put output files into. Returns: list of strings, the executable and its arguments. """ return ([sys.executable, self._options.executable] + [self.benchmark] + self._generate_filter_args() + self._generate_also_run_disabled_tests_args() + self._generate_output_args(output_dir) + self._generate_story_selection_args() + # passthrough args must be before reference args and repeat args: # crbug.com/928928, crbug.com/894254#c78 self._get_passthrough_args() + self._generate_repeat_args() + self._generate_reference_build_args() ) def _get_passthrough_args(self): return self._options.passthrough_args def _generate_filter_args(self): if self._options.isolated_script_test_filter: filter_list = common.extract_filter_list( self._options.isolated_script_test_filter) filter_arguments = [_TelemetryFilterArgument(f) for f in filter_list] applicable_stories = [ f.story for f in filter_arguments if f.benchmark == self.benchmark] # Need to convert this to a valid regex. filter_regex = '(' + '|'.join(applicable_stories) + ')' return ['--story-filter=' + filter_regex] return [] def _generate_repeat_args(self): if self._options.isolated_script_test_repeat: return ['--pageset-repeat=' + str( self._options.isolated_script_test_repeat)] return [] def _generate_also_run_disabled_tests_args(self): if self._options.isolated_script_test_also_run_disabled_tests: return ['--also-run-disabled-tests'] return [] def _generate_output_args(self, output_dir): return ['--output-format=json-test-results', '--output-format=histograms', '--output-dir=' + output_dir] def _generate_story_selection_args(self): """Returns arguments that limit the stories to be run inside the benchmark. """ selection_args = [] if self._story_selection_config: if 'begin' in self._story_selection_config: selection_args.append('--story-shard-begin-index=%d' % ( self._story_selection_config['begin'])) if 'end' in self._story_selection_config: selection_args.append('--story-shard-end-index=%d' % ( self._story_selection_config['end'])) if self._story_selection_config.get('abridged', True): selection_args.append('--run-abridged-story-set') return selection_args def _generate_reference_build_args(self): if self._is_reference: reference_browser_flag = '--browser=reference' # TODO(crbug.com/1038137): Make the logic generic once more reference # settings are added if '--browser=android-chrome-bundle' in self._get_passthrough_args(): reference_browser_flag = '--browser=reference-android-chrome-bundle' return [reference_browser_flag, '--max-failures=5'] return [] def execute_telemetry_benchmark( command_generator, output_paths, use_xvfb=False): start = time.time() env = os.environ.copy() env['CHROME_HEADLESS'] = '1' # Assume we want to set up the sandbox environment variables all the # time; doing so is harmless on non-Linux platforms and is needed # all the time on Linux. env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH return_code = 1 temp_dir = tempfile.mkdtemp('telemetry') try: command = command_generator.generate(temp_dir) if use_xvfb: # When running with xvfb, we currently output both to stdout and to the # file. It would be better to only output to the file to keep the logs # clean. return_code = xvfb.run_executable( command, env=env, stdoutfile=output_paths.logs) else: with open(output_paths.logs, 'w') as handle: return_code = test_env.run_command_output_to_handle( command, handle, env=env) expected_results_filename = os.path.join(temp_dir, 'test-results.json') if os.path.exists(expected_results_filename): shutil.move(expected_results_filename, output_paths.test_results) else: common.write_interrupted_test_results_to(output_paths.test_results, start) expected_perf_filename = os.path.join(temp_dir, 'histograms.json') shutil.move(expected_perf_filename, output_paths.perf_results) csv_file_path = os.path.join(temp_dir, 'results.csv') if os.path.isfile(csv_file_path): shutil.move(csv_file_path, output_paths.csv_perf_results) except Exception: print ('The following exception may have prevented the code from ' 'outputing structured test results and perf results output:') print traceback.format_exc() finally: # Add ignore_errors=True because otherwise rmtree may fail due to leaky # processes of tests are still holding opened handles to files under # |tempfile_dir|. For example, see crbug.com/865896 shutil.rmtree(temp_dir, ignore_errors=True) print_duration('executing benchmark %s' % command_generator.benchmark, start) # Telemetry sets exit code to -1 to indicate that no stories were run. This # becomes 255 on linux because linux doesn't support -1 so it does modulo: # -1 % 256 == 255. # TODO(crbug.com/1019139): Make 111 be the exit code that means # "no stories were run.". if return_code in (111, -1, 255): print ('Exit code %s indicates that no stories were run, so we are marking ' 'this as a success.' % return_code) return 0 if return_code: return return_code return 0 def parse_arguments(args): parser = argparse.ArgumentParser() parser.add_argument('executable', help='The name of the executable to run.') parser.add_argument( '--isolated-script-test-output', required=True) # The following two flags may be passed in sometimes by Pinpoint # or by the recipe, but they don't do anything. crbug.com/927482. parser.add_argument( '--isolated-script-test-chartjson-output', required=False) parser.add_argument( '--isolated-script-test-perf-output', required=False) parser.add_argument( '--isolated-script-test-filter', type=str, required=False) # Note that the following three arguments are only supported by Telemetry # tests right now. See crbug.com/920002. parser.add_argument( '--isolated-script-test-repeat', type=int, required=False) parser.add_argument( '--isolated-script-test-launcher-retry-limit', type=int, required=False, choices=[0]) # Telemetry does not support retries. crbug.com/894254#c21 parser.add_argument( '--isolated-script-test-also-run-disabled-tests', default=False, action='store_true', required=False) parser.add_argument('--xvfb', help='Start xvfb.', action='store_true') parser.add_argument('--non-telemetry', help='Type of perf test', type=bool, default=False) parser.add_argument('--gtest-benchmark-name', help='Name of the gtest benchmark', type=str, required=False) parser.add_argument('--use-gtest-benchmark-script', help='Whether gtest is invoked via benchmark script.', default=False, action='store_true') parser.add_argument('--benchmarks', help='Comma separated list of benchmark names' ' to run in lieu of indexing into our benchmark bot maps', required=False) # Some executions may have a different sharding scheme and/or set of tests. # These files must live in src/tools/perf/core/shard_maps parser.add_argument('--test-shard-map-filename', type=str, required=False) parser.add_argument('--run-ref-build', help='Run test on reference browser', action='store_true') parser.add_argument('--passthrough-arg', help='Arguments to pass directly through to the test ' 'executable.', action='append', dest='passthrough_args', default=[]) options, leftover_args = parser.parse_known_args(args) options.passthrough_args.extend(leftover_args) return options def main(sys_args): args = sys_args[1:] # Skip program name. options = parse_arguments(args) isolated_out_dir = os.path.dirname(options.isolated_script_test_output) overall_return_code = 0 # This is a list of test results files to be merged into a standard # output.json file for use by infrastructure including FindIt. # This list should not contain reference build runs # since we do not monitor those. Also, merging test reference build results # with standard build results may not work properly. test_results_files = [] print('Running a series of performance test subprocesses. Logs, performance\n' 'results, and test results JSON will be saved in a subfolder of the\n' 'isolated output directory. Inside the hash marks in the following\n' 'lines is the name of the subfolder to find results in.\n') if options.non_telemetry: command_generator = GtestCommandGenerator( options, additional_flags=options.passthrough_args) benchmark_name = options.gtest_benchmark_name # Fallback to use the name of the executable if flag isn't set. # TODO(crbug.com/870899): remove fallback logic and raise parser error if # --non-telemetry is set but --gtest-benchmark-name is not set once pinpoint # is converted to always pass --gtest-benchmark-name flag. if not benchmark_name: benchmark_name = options.executable output_paths = OutputFilePaths(isolated_out_dir, benchmark_name).SetUp() print('\n### {folder} ###'.format(folder=benchmark_name)) overall_return_code = execute_gtest_perf_test( command_generator, output_paths, options.xvfb) test_results_files.append(output_paths.test_results) else: # If the user has supplied a list of benchmark names, execute those instead # of using the shard map. if options.benchmarks: benchmarks = options.benchmarks.split(',') for benchmark in benchmarks: output_paths = OutputFilePaths(isolated_out_dir, benchmark).SetUp() command_generator = TelemetryCommandGenerator( benchmark, options) print('\n### {folder} ###'.format(folder=benchmark)) return_code = execute_telemetry_benchmark( command_generator, output_paths, options.xvfb) overall_return_code = return_code or overall_return_code test_results_files.append(output_paths.test_results) if options.run_ref_build: print ('Not running reference build. --run-ref-build argument is only ' 'supported for sharded benchmarks. It is simple to support ' 'this for unsharded --benchmarks if needed.') elif options.test_shard_map_filename: # First determine what shard we are running on to know how to # index into the bot map to get list of telemetry benchmarks to run. shard_index = None shard_map_path = os.path.join(SHARD_MAPS_DIRECTORY, options.test_shard_map_filename) # Copy sharding map file to isolated_out_dir so that the merge script # can collect it later. # TODO(crouleau): Move this step over to merge script # (process_perf_results.py). shutil.copyfile( shard_map_path, os.path.join(isolated_out_dir, 'benchmarks_shard_map.json')) with open(shard_map_path) as f: shard_map = json.load(f) env = os.environ.copy() if 'GTEST_SHARD_INDEX' in env: shard_index = env['GTEST_SHARD_INDEX'] # TODO(crbug.com/972844): shard environment variables are not specified # for single-shard shard runs. if not shard_index: shard_map_has_multiple_shards = bool(shard_map.get('1', False)) if not shard_map_has_multiple_shards: shard_index = '0' if not shard_index: raise Exception( 'Sharded Telemetry perf tests must either specify --benchmarks ' 'list or have GTEST_SHARD_INDEX environment variable present.') shard_configuration = shard_map[shard_index] assert ('benchmarks' in shard_configuration or 'executables' in shard_configuration), ( 'Every shard must have benchmarks or executables associated ' 'with it.') if 'benchmarks' in shard_configuration: benchmarks_and_configs = shard_configuration['benchmarks'] for (benchmark, story_selection_config ) in benchmarks_and_configs.iteritems(): # Need to run the benchmark on both latest browser and reference # build. output_paths = OutputFilePaths(isolated_out_dir, benchmark).SetUp() command_generator = TelemetryCommandGenerator( benchmark, options, story_selection_config=story_selection_config) print('\n### {folder} ###'.format(folder=benchmark)) return_code = execute_telemetry_benchmark( command_generator, output_paths, options.xvfb) overall_return_code = return_code or overall_return_code test_results_files.append(output_paths.test_results) if options.run_ref_build: reference_benchmark_foldername = benchmark + '.reference' reference_output_paths = OutputFilePaths( isolated_out_dir, reference_benchmark_foldername).SetUp() reference_command_generator = TelemetryCommandGenerator( benchmark, options, story_selection_config=story_selection_config, is_reference=True) print('\n### {folder} ###'.format( folder=reference_benchmark_foldername)) # We intentionally ignore the return code and test results of the # reference build. execute_telemetry_benchmark( reference_command_generator, reference_output_paths, options.xvfb) if 'executables' in shard_configuration: names_and_configs = shard_configuration['executables'] for (name, configuration ) in names_and_configs.iteritems(): additional_flags = [] if 'arguments' in configuration: additional_flags = configuration['arguments'] command_generator = GtestCommandGenerator( options, override_executable=configuration['path'], additional_flags=additional_flags, ignore_shard_env_vars=True) output_paths = OutputFilePaths(isolated_out_dir, name).SetUp() print('\n### {folder} ###'.format(folder=name)) return_code = execute_gtest_perf_test( command_generator, output_paths, options.xvfb) overall_return_code = return_code or overall_return_code test_results_files.append(output_paths.test_results) else: raise Exception('Telemetry tests must provide either a shard map or a ' '--benchmarks list so that we know which stories to run.') test_results_list = [] for test_results_file in test_results_files: if os.path.exists(test_results_file): with open(test_results_file, 'r') as fh: test_results_list.append(json.load(fh)) merged_test_results = results_merger.merge_test_results(test_results_list) with open(options.isolated_script_test_output, 'w') as f: json.dump(merged_test_results, f) return overall_return_code # This is not really a "script test" so does not need to manually add # any additional compile targets. def main_compile_targets(args): json.dump([], args.output) if __name__ == '__main__': # Conform minimally to the protocol defined by ScriptTest. if 'compile_targets' in sys.argv: funcs = { 'run': None, 'compile_targets': main_compile_targets, } sys.exit(common.run_script(sys.argv[1:], funcs)) sys.exit(main(sys.argv))
bsd-3-clause
5,552,059,738,795,052,000
39.161337
100
0.673374
false
google/trax
trax/layers/research/efficient_attention_test.py
1
17561
# coding=utf-8 # Copyright 2021 The Trax Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for trax.layers.research.efficient_attention.""" from absl.testing import parameterized import jax import numpy as np from tensorflow import test from trax import fastmath from trax import shapes from trax.fastmath import numpy as jnp from trax.layers.research import efficient_attention class EfficientAttentionTest(test.TestCase, parameterized.TestCase): def test_self_attention(self): with fastmath.use_backend(fastmath.Backend.JAX): layer = efficient_attention.SelfAttention( n_heads=5, d_qk=7, d_v=17, share_qk=False, causal=True, chunk_len=8, n_chunks_before=1, n_chunks_after=0, use_reference_code=True, attention_dropout=0.0, mode='train') x = np.ones((3, 32, 8)).astype(np.float32) _, _ = layer.init(shapes.signature(x)) y = layer(x) self.assertEqual(y.shape, x.shape) def test_lsh_ff(self): with fastmath.use_backend(fastmath.Backend.JAX): layer = efficient_attention.LSHFF(d_ff=1024*8, n_buckets=[16, 8]) x = np.ones((3, 7, 1024)).astype(np.float32) _, _ = layer.init(shapes.signature(x)) y = layer(x) self.assertEqual(y.shape, x.shape) def test_self_attention_tf(self): with fastmath.use_backend(fastmath.Backend.TFNP): layer = efficient_attention.SelfAttention( n_heads=5, d_qk=7, d_v=17, share_qk=False, causal=True, chunk_len=8, n_chunks_before=1, n_chunks_after=0, use_reference_code=True, attention_dropout=0.0, mode='train') x = np.ones((3, 32, 8)).astype(np.float32) _, _ = layer.init(shapes.signature(x)) y = layer(x) self.assertEqual(y.shape, x.shape) def test_lsh_self_attention(self): with fastmath.use_backend(fastmath.Backend.JAX): layer = efficient_attention.LSHSelfAttention( n_heads=5, d_qk=7, d_v=17, causal=True, chunk_len=8, n_chunks_before=1, n_chunks_after=0, n_hashes=2, n_buckets=4, use_reference_code=True, attention_dropout=0.0, mode='train') x = np.ones((3, 32, 8)).astype(np.float32) _, _ = layer.init(shapes.signature(x)) y = layer(x) self.assertEqual(y.shape, x.shape) def _run_forward_and_backward(self, model, inp, weights, state): def forward(inp, weights): return model.pure_fn( inp, weights, state, rng=jax.random.PRNGKey(0)) out, vjpfun, new_state = jax.vjp(forward, inp, weights, has_aux=True) inp_grad, weights_grad = vjpfun(fastmath.numpy.ones_like(inp)) return out, new_state, inp_grad, weights_grad def _test_equivalence_to_reference_code( self, model_cls, inp, input_signature, common_kwargs, *test_kwargs): ref_model = model_cls(use_reference_code=True, **common_kwargs) rng = fastmath.random.get_prng(123) weights, state = ref_model.init(input_signature, rng) ref_all = self._run_forward_and_backward(ref_model, inp, weights, state) ref_out, ref_state, ref_inp_grad, ref_weights_grad = ref_all for kwargs in test_kwargs: test_model = model_cls(**common_kwargs, **kwargs) state = test_model.init(input_signature, rng)[1] test_all = self._run_forward_and_backward(test_model, inp, weights, state) test_out, test_state, test_inp_grad, test_weights_grad = test_all self.assertEqual(jax.tree_structure(ref_out), jax.tree_structure(test_out)) self.assertEqual(jax.tree_structure(ref_state), jax.tree_structure(test_state)) self.assertEqual(jax.tree_structure(ref_inp_grad), jax.tree_structure(test_inp_grad)) self.assertEqual(jax.tree_structure(ref_weights_grad), jax.tree_structure(test_weights_grad)) check_close = lambda x, y: self.assertAllClose(x, y, rtol=2e-3, atol=2e-3) fastmath.nested_map_multiarg(check_close, ref_out, test_out) fastmath.nested_map_multiarg(check_close, ref_state, test_state) fastmath.nested_map_multiarg(check_close, ref_inp_grad, test_inp_grad) fastmath.nested_map_multiarg(check_close, ref_weights_grad, test_weights_grad) def test_batching_self_attention(self): with fastmath.use_backend(fastmath.Backend.JAX): common_kwargs = dict( n_heads=6, d_qk=7, d_v=17, share_qk=False, causal=True, chunk_len=5, n_chunks_before=1, n_chunks_after=0, attention_dropout=0.2, output_dropout=0.1, mode='train', ) test_kwargs = [] for n_parallel_heads in [1, 3, 6, 12]: for use_python_loop in [True, False]: test_kwargs.append(dict(n_parallel_heads=n_parallel_heads, use_python_loop=use_python_loop)) x = jax.random.uniform( jax.random.PRNGKey(0), (2, 10, 13), dtype=jnp.float32) input_signature = shapes.signature(x) self._test_equivalence_to_reference_code( efficient_attention.SelfAttention, x, input_signature, common_kwargs, *test_kwargs) def test_batching_lsh_self_attention(self): with fastmath.use_backend(fastmath.Backend.JAX): common_kwargs = dict( n_heads=6, d_qk=7, d_v=17, causal=True, chunk_len=5, n_chunks_before=1, n_chunks_after=0, n_hashes=2, n_buckets=4, attention_dropout=0.2, output_dropout=0.1, mode='train', ) test_kwargs = [] for n_parallel_heads in [1, 3, 6, 12]: for use_python_loop in [True, False]: test_kwargs.append(dict(n_parallel_heads=n_parallel_heads, use_python_loop=use_python_loop)) x = jax.random.uniform( jax.random.PRNGKey(0), (2, 10, 13), dtype=jnp.float32) input_signature = shapes.signature(x) self._test_equivalence_to_reference_code( efficient_attention.LSHSelfAttention, x, input_signature, common_kwargs, *test_kwargs) def _test_fast_inference( self, model_cls, x, input_signature, common_kwargs, *test_kwargs): ref_model = model_cls(use_reference_code=True, mode='eval', **common_kwargs) weights, state = ref_model.init(input_signature) ref_out, _ = ref_model.pure_fn( x, weights, state, rng=jax.random.PRNGKey(0)) def get_slice(pytree, i): def get_slice_for_val(x): if isinstance(x, shapes.ShapeDtype): return shapes.ShapeDtype(shape=x.shape[:1] + (1,) + x.shape[2:], dtype=x.dtype) else: return x[:, i:i+1] return jax.tree_map(get_slice_for_val, pytree) seqlen = x[0].shape[1] if isinstance(x, (tuple, list)) else x.shape[1] for kwargs in test_kwargs: test_model = model_cls(mode='predict', **common_kwargs, **kwargs) cur_state = test_model.init(get_slice(input_signature, 0))[1] out = [] for i in range(seqlen): cur_out, cur_state = test_model.pure_fn( get_slice(x, i), weights, cur_state, jax.random.PRNGKey(0)) out.append(cur_out) out = jnp.concatenate(out, axis=1) self.assertAllClose(out, ref_out, rtol=1e-3, atol=1e-3) def test_fast_inference_self_attention(self): with fastmath.use_backend(fastmath.Backend.JAX): common_kwargs = dict( n_heads=6, d_qk=7, d_v=17, share_qk=False, causal=True, chunk_len=5, n_chunks_before=1, n_chunks_after=0, attention_dropout=0.0, output_dropout=0.0, ) test_kwargs = [] for n_parallel_heads in [1, 3, 6, 12]: for use_python_loop in [True, False]: test_kwargs.append(dict(n_parallel_heads=n_parallel_heads, use_python_loop=use_python_loop)) x = jax.random.uniform( jax.random.PRNGKey(0), (2, 10, 13), dtype=jnp.float32) input_signature = shapes.signature(x) self._test_fast_inference( efficient_attention.SelfAttention, x, input_signature, common_kwargs, *test_kwargs) def _test_lsh_self_attention_deterministic_given_seed(self, causal=False): # Once the initialization and the call seeds are pinned down we have # deterministic output. with fastmath.use_backend(fastmath.Backend.JAX): layer = efficient_attention.LSHSelfAttention( n_heads=5, d_qk=7, d_v=17, causal=causal, chunk_len=8, n_chunks_before=1, n_chunks_after=0, n_hashes=2, n_buckets=4, use_reference_code=True, attention_dropout=0.0, mode='train') x = np.ones((3, 32, 8)).astype(np.float32) def get_output(): _, _ = layer.init(shapes.signature(x), jax.random.PRNGKey(0)) return layer(x, rng=jax.random.PRNGKey(1)) ys = [get_output() for _ in range(10)] self.assertEqual(ys[0].shape, x.shape) for y in ys[1:]: np.testing.assert_array_almost_equal(ys[0], y, decimal=6) def test_lsh_determinism_causal(self): self._test_lsh_self_attention_deterministic_given_seed(causal=True) def test_lsh_determinism_non_causal(self): self._test_lsh_self_attention_deterministic_given_seed(causal=False) def test_lsh_self_attention_masked_non_causal(self): # Test that when the input that is in the masked area changes the attention # for the un-masked outputs doesn't change, but the masked region does # change. with fastmath.use_backend(fastmath.Backend.JAX): layer = efficient_attention.LSHSelfAttention( n_heads=5, d_qk=7, d_v=17, causal=False, masked=True, chunk_len=8, n_chunks_before=1, n_chunks_after=0, n_hashes=2, n_buckets=4, use_reference_code=True, attention_dropout=0.0, mode='train') batch = 5 max_len = 32 hidden = 8 x = np.random.uniform(size=(batch, max_len, hidden)) mask = np.ones((batch, max_len)).astype(np.bool) rngs = jax.random.randint( jax.random.PRNGKey(0), (batch,), minval=1, maxval=max_len - 1) # Set some suffix of each mask[b] to 0. for i in range(batch): mask[i, rngs[i]:] = 0 # Fix rngs and get the output for the LSH layer. def get_output(x, mask): xs = [x, mask] _, _ = layer.init(shapes.signature(xs), jax.random.PRNGKey(0)) return layer(xs, rng=jax.random.PRNGKey(1)) # Get the attention output for masked x. y = get_output(x, mask) # Change x, but only in the masked regions. for i in range(batch): x[i, rngs[i]:] = np.random.uniform(size=(max_len - rngs[i], hidden)) y2 = get_output(x, mask) for i in range(batch): # y and y2 should be identical in the non-masked part. np.testing.assert_array_almost_equal(y[i, :rngs[i]], y2[i, :rngs[i]], decimal=6) # In the masked out part, they should be different. self.assertGreater( np.mean(np.abs(y[i, rngs[i]:] - y2[i, rngs[i]:])), 1e-5) @parameterized.named_parameters(('_weights_2', 2), ('_weights_3', 3)) def test_pure_lsh_wrapper_causal_non_masked(self, num_weights): with fastmath.use_backend(fastmath.Backend.JAX): n_heads = 5 batch, seqlen, d_head = 3, 32, 8 n_hashes = 2 d_model = n_heads * d_head layer = efficient_attention.PureLSHSelfAttentionWrapper( n_heads=n_heads, d_qk=d_head, d_v=d_head, causal=True, masked=False, chunk_len=8, n_chunks_before=1, n_chunks_after=0, n_hashes=n_hashes, n_buckets=4, bias=False, pure_lsh_implementation=efficient_attention.PureLSHSelfAttention, mode='train', num_weights=num_weights) rng = jax.random.PRNGKey(0) rng, x_rng = jax.random.split(rng) input_shape = (batch, seqlen, d_model) x = jax.random.uniform(x_rng, input_shape, dtype=jnp.float32) inp = x w, s = layer.init(shapes.signature(inp)) o = layer(inp) # Get the actual weights. weights = fastmath.tree_leaves(w) # Assert number of weights is as expected, the extra 1 is for output. self.assertLen(weights, num_weights + 1) # Assert each weight is of the expected shape. for i in range(num_weights + 1): self.assertEqual(weights[i].shape, (d_model, d_model)) # Test that the output and the input shape match. self.assertEqual(inp.shape, o.shape) # Assert state is the shape expected. state = fastmath.tree_leaves(s) self.assertLen(state, 2) # buckets self.assertEqual(state[0].shape, (batch * n_heads, n_hashes * seqlen)) # rngs self.assertEqual(state[1].shape, (batch * n_heads, 2)) @parameterized.named_parameters(('_weights_2', 2), ('_weights_3', 3)) def test_pure_lsh_wrapper_non_causal_masked(self, num_weights): with fastmath.use_backend(fastmath.Backend.JAX): n_heads = 5 batch, seqlen, d_head = 3, 32, 8 num_weights = 2 n_hashes = 2 d_model = n_heads * d_head layer = efficient_attention.PureLSHSelfAttentionWrapper( n_heads=n_heads, d_qk=d_head, d_v=d_head, causal=False, masked=True, chunk_len=8, n_chunks_before=1, n_chunks_after=0, n_hashes=n_hashes, n_buckets=4, bias=False, pure_lsh_implementation=efficient_attention.PureLSHSelfAttention, mode='train', num_weights=num_weights) rng = jax.random.PRNGKey(0) rng, x_rng = jax.random.split(rng) input_shape = (batch, seqlen, d_model) x = jax.random.uniform(x_rng, input_shape, dtype=jnp.float32) mask = jnp.ones((batch, seqlen), dtype=jnp.int32) inp = (x, mask) w, s = layer.init(shapes.signature(inp)) o = layer(inp) # Get the actual weights. weights = fastmath.tree_leaves(w) # Assert number of weights is as expected, the extra 1 is for output. self.assertLen(weights, num_weights + 1) # Assert each weight is of the expected shape. for i in range(num_weights + 1): self.assertEqual(weights[i].shape, (d_model, d_model)) # Test that the output and the x's shape match. self.assertEqual(x.shape, o.shape) # Assert state is the shape expected. state = fastmath.tree_leaves(s) self.assertLen(state, 2) # buckets self.assertEqual(state[0].shape, (batch * n_heads, n_hashes * seqlen)) # rngs self.assertEqual(state[1].shape, (batch * n_heads, 2)) def test_lsh_and_pure_lsh_self_attention_equivalence(self): # Given the same weight matrices and random numbers, do these produce the # same output. with fastmath.use_backend(fastmath.Backend.JAX): n_heads = 4 d_head = 4 d_model = n_heads * d_head pure_lsh_layer = efficient_attention.PureLSHSelfAttention( n_heads=n_heads, d_qk=d_head, d_v=d_head, causal=True, masked=False, chunk_len=8, n_chunks_before=1, n_chunks_after=0, n_hashes=4, n_buckets=8, use_reference_code=False, attention_dropout=0.0, use_python_loop=True, bias=False, mode='train') lsh_layer = efficient_attention.LSHSelfAttention( n_heads=n_heads, d_qk=d_head, d_v=d_head, causal=True, masked=False, chunk_len=8, n_chunks_before=1, n_chunks_after=0, n_hashes=4, n_buckets=8, use_reference_code=False, attention_dropout=0.0, use_python_loop=True, mode='train') batch, seqlen = 3, 32 input_shape = (batch, seqlen, d_model) x = jax.random.uniform(jax.random.PRNGKey(0), input_shape, dtype=jnp.float32) lsh_layer_input = x call_rng = jax.random.PRNGKey(42) lsh_layer_weights, lsh_layer_state = lsh_layer.init( shapes.signature(lsh_layer_input)) lsh_layer.rng = call_rng lsh_layer_output = lsh_layer(lsh_layer_input) # Shapes are: (n_heads, d_model, d_head), (n_heads, d_model, d_head), # (n_heads, d_head, d_model) # Abbreviated as - hmn, hmn, hnm w_qk, w_v, w_o = lsh_layer_weights qk = jnp.einsum('blm,hmn->bhln', x, w_qk) qk = qk.reshape((-1, qk.shape[2], qk.shape[3])) v = jnp.einsum('blm,hmn->bhln', x, w_v) v = v.reshape((-1, v.shape[2], v.shape[3])) pure_lsh_layer_input = (qk, v) _, _ = pure_lsh_layer.init(shapes.signature(pure_lsh_layer_input)) pure_lsh_layer.rng = call_rng pure_lsh_layer.state = lsh_layer_state pure_lsh_layer_output = pure_lsh_layer(pure_lsh_layer_input) # b*h,l,n pure_lsh_layer_output = pure_lsh_layer_output.reshape( (batch, -1) + pure_lsh_layer_output.shape[1:]) pure_lsh_layer_output_projected = ( jnp.einsum('bhld,hdm->blm', pure_lsh_layer_output, w_o)) diff = pure_lsh_layer_output_projected - lsh_layer_output avg_diff = jnp.sum(jnp.abs(diff)) / jnp.sum(jnp.ones_like(diff)) self.assertLess(avg_diff, 1e-5) if __name__ == '__main__': test.main()
apache-2.0
-3,856,225,903,987,072,000
38.730769
80
0.624452
false
cubicova17/annet
timeseries/redis_metrics/tests/test_utils.py
1
4900
from datetime import date, timedelta from mock import call, patch, Mock from django.conf import settings from django.test import TestCase from ..models import R from .. import utils class TestUtils(TestCase): """Tests for functions in ``redis_metrics.utils``.""" def setUp(self): self.old_host = getattr(settings, 'REDIS_METRICS_HOST', 'localhost') self.old_port = getattr(settings, 'REDIS_METRICS_PORT', 6379) self.old_db = getattr(settings, 'REDIS_METRICS_DB', 0) settings.REDIS_METRICS_HOST = 'localhost' settings.REDIS_METRICS_PORT = 6379 settings.REDIS_METRICS_DB = 0 # The redis client instance on R is a MagicMock object # TODO: create a patcher for StrictRedis def tearDown(self): settings.REDIS_METRICS_HOST = self.old_host settings.REDIS_METRICS_PORT = self.old_port settings.REDIS_METRICS_DB = self.old_db super(TestUtils, self).tearDown() # TODO: unpatch StrictRedis def test_get_r(self): # Global `_redis_model` is None by default self.assertEqual(utils._redis_model, None) with patch("redis_metrics.models.redis.StrictRedis") as mock_redis: r = utils.get_r() self.assertIsInstance(r, R) self.assertEqual(r, utils._redis_model) mock_redis.assert_called_once_with( host="localhost", port=6379, db=0, password=None, connection_pool=None, socket_timeout=None) def test_metric(self): with patch("redis_metrics.utils.get_r") as mock_get_r: utils.metric("test-slug") mock_get_r.assert_has_calls([ call(), call().metric("test-slug", num=1, category=None, expire=None), ]) def test_metric_with_category(self): with patch("redis_metrics.utils.get_r") as mock_get_r: utils.metric("test-slug", category="Woo") mock_get_r.assert_has_calls([ call(), call().metric("test-slug", num=1, category="Woo", expire=None), ]) def test_metric_with_expiration(self): with patch("redis_metrics.utils.get_r") as mock_get_r: utils.metric("test-slug", expire=300) mock_get_r.assert_has_calls([ call(), call().metric("test-slug", num=1, category=None, expire=300), ]) def test_gauge(self): with patch("redis_metrics.utils.get_r") as mock_get_r: utils.gauge("test-slug", 9000) mock_get_r.assert_has_calls([ call(), call().gauge("test-slug", 9000), ]) def test__dates(self): # The following line is the expected result of _dates for 5 days expected = (date.today() - timedelta(days=d) for d in range(5)) result = utils._dates(5) self.assertEqual(type(expected), type(result)) self.assertEqual(list(utils._dates(5)), list(expected)) def test_generate_test_metrics(self): config = { '_build_keys.return_value': ['key'], '_metric_slugs_key': 'MSK', } mock_r = Mock(**config) config = {'return_value': mock_r} with patch("redis_metrics.utils.get_r", **config) as mock_get_r: # When called with random = True with patch("redis_metrics.utils.random") as mock_random: mock_random.randint.return_value = 9999 utils.generate_test_metrics( slug="test_slug", num=1, randomize=True) mock_get_r.assert_called_once_with() mock_r.r.sadd.assert_called_once_with('MSK', 'key') mock_random.seed.assert_called_once_with() mock_random.randint.assert_called_once_with(0, 100 + 100) mock_r.r.incr.assert_called_once_with('key', 9999) mock_get_r.reset_mock() mock_r.reset_mock() # When called with random = False utils.generate_test_metrics( slug="test_slug", num=1, randomize=False) mock_get_r.assert_called_once_with() mock_r.r.sadd.assert_called_once_with('MSK', 'key') mock_r.r.incr.assert_called_once_with('key', 100) def test_delete_test_metrics(self): d = list(utils._dates(1))[0] # Date used inside function with patch('redis_metrics.utils.get_r') as mock_get_r: mock_r = mock_get_r.return_value mock_r._metric_slugs_key = 'MSK' mock_r._build_keys.return_value = ['keys'] utils.delete_test_metrics(slug="test-metric", num=1) mock_r._build_keys.assert_called_once_with("test-metric", date=d) mock_r.r.srem.assert_called_once_with('MSK', 'keys') mock_r.r.delete.assert_called_once_with('keys')
mit
2,679,833,120,838,151,000
39.163934
79
0.577347
false
quantopian/zipline
zipline/examples/__init__.py
1
2271
from importlib import import_module import os from toolz import merge from trading_calendars import register_calendar, get_calendar from zipline import run_algorithm # These are used by test_examples.py to discover the examples to run. def load_example_modules(): example_modules = {} for f in os.listdir(os.path.dirname(__file__)): if not f.endswith('.py') or f == '__init__.py': continue modname = f[:-len('.py')] mod = import_module('.' + modname, package=__name__) example_modules[modname] = mod globals()[modname] = mod # Remove noise from loop variables. del f, modname, mod return example_modules # Columns that we expect to be able to reliably deterministic # Doesn't include fields that have UUIDS. _cols_to_check = [ 'algo_volatility', 'algorithm_period_return', 'alpha', 'benchmark_period_return', 'benchmark_volatility', 'beta', 'capital_used', 'ending_cash', 'ending_exposure', 'ending_value', 'excess_return', 'gross_leverage', 'long_exposure', 'long_value', 'longs_count', 'max_drawdown', 'max_leverage', 'net_leverage', 'period_close', 'period_label', 'period_open', 'pnl', 'portfolio_value', 'positions', 'returns', 'short_exposure', 'short_value', 'shorts_count', 'sortino', 'starting_cash', 'starting_exposure', 'starting_value', 'trading_days', 'treasury_period_return', ] def run_example(example_modules, example_name, environ, benchmark_returns=None): """ Run an example module from zipline.examples. """ mod = example_modules[example_name] register_calendar("YAHOO", get_calendar("NYSE"), force=True) return run_algorithm( initialize=getattr(mod, 'initialize', None), handle_data=getattr(mod, 'handle_data', None), before_trading_start=getattr(mod, 'before_trading_start', None), analyze=getattr(mod, 'analyze', None), bundle='test', environ=environ, benchmark_returns=benchmark_returns, # Provide a default capital base, but allow the test to override. **merge({'capital_base': 1e7}, mod._test_args()) )
apache-2.0
26,062,786,449,103,360
25.717647
73
0.620872
false
jokey2k/sentry
src/sentry/models/authidentity.py
1
1441
from __future__ import absolute_import, print_function from datetime import timedelta from django.db import models from django.utils import timezone from jsonfield import JSONField from sentry.db.models import FlexibleForeignKey, Model class AuthIdentity(Model): user = FlexibleForeignKey('sentry.User') auth_provider = FlexibleForeignKey('sentry.AuthProvider') ident = models.CharField(max_length=128) data = JSONField() last_verified = models.DateTimeField(default=timezone.now) last_synced = models.DateTimeField(default=timezone.now) date_added = models.DateTimeField(default=timezone.now) class Meta: app_label = 'sentry' db_table = 'sentry_authidentity' unique_together = (('auth_provider', 'ident'), ('auth_provider', 'user')) def get_audit_log_data(self): return { 'user_id': self.user_id, 'data': self.data, } # TODO(dcramer): we'd like to abstract this so there's a central Role object # and it doesnt require two composite db objects to talk to each other def is_valid(self, member): if getattr(member.flags, 'sso:invalid'): return False if not getattr(member.flags, 'sso:linked'): return False if not self.last_verified: return False if self.last_verified < timezone.now() - timedelta(hours=24): return False return True
bsd-3-clause
8,686,039,802,315,802,000
32.511628
81
0.66204
false
ATIX-AG/foreman-ansible-modules
plugins/modules/compute_resource.py
1
14623
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) Philipp Joos 2017 # (c) Baptiste Agasse 2019 # (c) Mark Hlawatschek 2020 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = ''' --- module: compute_resource version_added: 1.0.0 short_description: Manage Compute Resources description: - Create, update, and delete Compute Resources author: - "Philipp Joos (@philippj)" - "Baptiste Agasse (@bagasse)" - "Manisha Singhal (@Manisha15) ATIX AG" - "Mark Hlawatschek (@hlawatschek) ATIX AG" options: name: description: compute resource name required: true type: str updated_name: description: new compute resource name required: false type: str description: description: compute resource description required: false type: str provider: description: Compute resource provider. Required if I(state=present_with_defaults). required: false choices: ["vmware", "libvirt", "ovirt", "proxmox", "EC2", "AzureRm", "GCE"] type: str provider_params: description: Parameter specific to compute resource provider. Required if I(state=present_with_defaults). required: false type: dict suboptions: url: description: - URL of the compute resource type: str user: description: - Username for the compute resource connection, not valid for I(provider=libvirt) type: str password: description: - Password for the compute resource connection, not valid for I(provider=libvirt) type: str region: description: - AWS region, AZURE region type: str tenant: description: - AzureRM tenant type: str app_ident: description: - AzureRM client id type: str datacenter: description: - Datacenter the compute resource is in, not valid for I(provider=libvirt) type: str display_type: description: - Display type to use for the remote console, only valid for I(provider=libvirt) type: str use_v4: description: - Use oVirt API v4, only valid for I(provider=ovirt) type: bool ovirt_quota: description: - oVirt quota ID, only valid for I(provider=ovirt) type: str project: description: - Project id for I(provider=GCE) type: str email: description: - Email for I(provider=GCE) type: str key_path: description: - Certificate path for I(provider=GCE) type: str zone: description: - zone for I(provider=GCE) type: str cloud: description: - cloud for I(provider=AzureRm) type: str choices: - azure - azureusgovernment - azurechina - azuregermancloud version_added: 2.1.0 sub_id: description: - Subscription ID for I(provider=AzureRm) type: str version_added: 2.1.0 ssl_verify_peer: description: - verify ssl from provider I(provider=proxmox) type: bool caching_enabled: description: - enable caching for I(provider=vmware) type: bool set_console_password: description: - Set a randomly generated password on the display connection for I(provider=vmware) and I(provider=libvirt) type: bool version_added: 2.0.0 keyboard_layout: description: - Default VNC Keyboard for I(provider=ovirt) type: str version_added: 2.0.0 choices: - 'ar' - 'da' - 'de' - 'de-ch' - 'en-gb' - 'en-us' - 'es' - 'et' - 'fi' - 'fo' - 'fr' - 'fr-be' - 'fr-ca' - 'fr-ch' - 'hr' - 'hu' - 'is' - 'it' - 'ja' - 'lt' - 'lv' - 'mk' - 'nl' - 'nl-be' - 'no' - 'pl' - 'pt' - 'pt-br' - 'ru' - 'sl' - 'sv' - 'th' - 'tr' public_key: description: - X509 Certification Authorities, only valid for I(provider=ovirt) type: str version_added: 2.0.0 extends_documentation_fragment: - theforeman.foreman.foreman - theforeman.foreman.foreman.entity_state_with_defaults - theforeman.foreman.foreman.taxonomy ''' EXAMPLES = ''' - name: Create livirt compute resource theforeman.foreman.compute_resource: name: example_compute_resource locations: - Munich organizations: - ACME provider: libvirt provider_params: url: libvirt.example.com display_type: vnc server_url: "https://foreman.example.com" username: "admin" password: "changeme" state: present - name: Update libvirt compute resource theforeman.foreman.compute_resource: name: example_compute_resource description: updated compute resource locations: - Munich organizations: - ACME provider: libvirt provider_params: url: libvirt.example.com display_type: vnc server_url: "https://foreman.example.com" username: "admin" password: "changeme" state: present - name: Delete libvirt compute resource theforeman.foreman.compute_resource: name: example_compute_resource server_url: "https://foreman.example.com" username: "admin" password: "changeme" state: absent - name: Create vmware compute resource theforeman.foreman.compute_resource: name: example_compute_resource locations: - Munich organizations: - ACME provider: vmware provider_params: caching_enabled: false url: vsphere.example.com user: admin password: secret datacenter: ax01 server_url: "https://foreman.example.com" username: "admin" password: "changeme" state: present - name: Create ovirt compute resource theforeman.foreman.compute_resource: name: ovirt_compute_resource locations: - France/Toulouse organizations: - Example Org provider: ovirt provider_params: url: ovirt.example.com user: [email protected] password: ovirtsecret datacenter: aa92fb54-0736-4066-8fa8-b8b9e3bd75ac ovirt_quota: 24868ab9-c2a1-47c3-87e7-706f17d215ac use_v4: true server_url: "https://foreman.example.com" username: "admin" password: "changeme" state: present - name: Create proxmox compute resource theforeman.foreman.compute_resource: name: proxmox_compute_resource locations: - Munich organizations: - ACME provider: proxmox provider_params: url: https://proxmox.example.com:8006/api2/json user: root@pam password: secretpassword ssl_verify_peer: true server_url: "https://foreman.example.com" username: "admin" password: "changeme" state: present - name: create EC2 compute resource theforeman.foreman.compute_resource: name: EC2_compute_resource description: EC2 locations: - AWS organizations: - ACME provider: EC2 provider_params: user: AWS_ACCESS_KEY password: AWS_SECRET_KEY region: eu-west-1 server_url: "https://foreman.example.com" username: "admin" password: "changeme" state: present - name: create Azure compute resource theforeman.foreman.compute_resource: name: AzureRm_compute_resource description: AzureRm locations: - Azure organizations: - ACME provider: AzureRm provider_params: sub_id: SUBSCRIPTION_ID tenant: TENANT_ID app_ident: CLIENT_ID password: CLIENT_SECRET region: westeurope server_url: "https://foreman.example.com" username: "admin" password: "changeme" state: present - name: create GCE compute resource theforeman.foreman.compute_resource: name: GCE compute resource description: Google Cloud Engine locations: - GCE organizations: - ACME provider: GCE provider_params: project: orcharhino email: [email protected] key_path: "/usr/share/foreman/gce_orcharhino_key.json" zone: europe-west3-b server_url: "https://foreman.example.com" username: "admin" password: "changeme" state: present ''' RETURN = ''' entity: description: Final state of the affected entities grouped by their type. returned: success type: dict contains: compute_resources: description: List of compute resources. type: list elements: dict ''' from ansible_collections.theforeman.foreman.plugins.module_utils.foreman_helper import ForemanTaxonomicEntityAnsibleModule def get_provider_info(provider): provider_name = provider.lower() if provider_name == 'libvirt': return 'Libvirt', ['url', 'display_type', 'set_console_password'] elif provider_name == 'ovirt': return 'Ovirt', ['url', 'user', 'password', 'datacenter', 'use_v4', 'ovirt_quota', 'keyboard_layout', 'public_key'] elif provider_name == 'proxmox': return 'Proxmox', ['url', 'user', 'password', 'ssl_verify_peer'] elif provider_name == 'vmware': return 'Vmware', ['url', 'user', 'password', 'datacenter', 'caching_enabled', 'set_console_password'] elif provider_name == 'ec2': return 'EC2', ['user', 'password', 'region'] elif provider_name == 'azurerm': return 'AzureRm', ['user', 'password', 'tenant', 'region', 'app_ident', 'cloud', 'sub_id'] elif provider_name == 'gce': return 'GCE', ['project', 'email', 'key_path', 'zone'] else: return '', [] class ForemanComputeResourceModule(ForemanTaxonomicEntityAnsibleModule): pass def main(): module = ForemanComputeResourceModule( foreman_spec=dict( name=dict(required=True), updated_name=dict(), description=dict(), provider=dict(choices=['vmware', 'libvirt', 'ovirt', 'proxmox', 'EC2', 'AzureRm', 'GCE']), display_type=dict(invisible=True), datacenter=dict(invisible=True), url=dict(invisible=True), caching_enabled=dict(invisible=True), user=dict(invisible=True), password=dict(invisible=True), region=dict(invisible=True), tenant=dict(invisible=True), app_ident=dict(invisible=True), use_v4=dict(invisible=True), ovirt_quota=dict(invisible=True), project=dict(invisible=True), email=dict(invisible=True), key_path=dict(invisible=True), zone=dict(invisible=True), cloud=dict(invisible=True), ssl_verify_peer=dict(invisible=True), set_console_password=dict(invisible=True), keyboard_layout=dict(invisible=True), public_key=dict(invisible=True), sub_id=dict(invisible=True), ), argument_spec=dict( provider_params=dict(type='dict', options=dict( url=dict(), display_type=dict(), user=dict(), password=dict(no_log=True), region=dict(), tenant=dict(), app_ident=dict(), datacenter=dict(), caching_enabled=dict(type='bool'), use_v4=dict(type='bool'), ovirt_quota=dict(), project=dict(), email=dict(), key_path=dict(no_log=False), zone=dict(), cloud=dict(choices=['azure', 'azureusgovernment', 'azurechina', 'azuregermancloud']), ssl_verify_peer=dict(type='bool'), set_console_password=dict(type='bool'), keyboard_layout=dict(choices=['ar', 'de-ch', 'es', 'fo', 'fr-ca', 'hu', 'ja', 'mk', 'no', 'pt-br', 'sv', 'da', 'en-gb', 'et', 'fr', 'fr-ch', 'is', 'lt', 'nl', 'pl', 'ru', 'th', 'de', 'en-us', 'fi', 'fr-be', 'hr', 'it', 'lv', 'nl-be', 'pt', 'sl', 'tr']), public_key=dict(), sub_id=dict(), ), mutually_exclusive=[['user', 'sub_id']], ), state=dict(type='str', default='present', choices=['present', 'absent', 'present_with_defaults']), ), required_if=( ['state', 'present_with_defaults', ['provider', 'provider_params']], ), ) if not module.desired_absent: if 'provider' in module.foreman_params: module.foreman_params['provider'], provider_param_keys = get_provider_info(provider=module.foreman_params['provider']) provider_params = module.foreman_params.pop('provider_params', {}) if module.foreman_params['provider'] == 'AzureRm' and 'user' in provider_params: provider_params['sub_id'] = provider_params.pop('user') for key in provider_param_keys: if key in provider_params: module.foreman_params[key] = provider_params.pop(key) if provider_params: module.fail_json(msg="Provider {0} does not support the following given parameters: {1}".format( module.foreman_params['provider'], list(provider_params.keys()))) with module.api_connection(): entity = module.lookup_entity('entity') if not module.desired_absent and 'provider' not in module.foreman_params and entity is None: module.fail_json(msg='To create a compute resource a valid provider must be supplied') module.run() if __name__ == '__main__': main()
gpl-3.0
-3,207,990,238,659,505,700
29.150515
158
0.592491
false
jlustigy/coronagraph
docs/conf.py
1
6174
# -*- coding: utf-8 -*- # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/stable/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) import sys import os import shlex import coronagraph on_rtd = os.environ.get('READTHEDOCS', None) == 'True' sys.path.insert(0, os.path.abspath('..')) sys.path.insert(0, os.path.abspath('.')) import sphinx_rtd_theme # -- Project information ----------------------------------------------------- project = 'coronagraph' copyright = '2018, Jacob Lustig-Yaeger' author = 'Jacob Lustig-Yaeger' # The short X.Y version version = coronagraph.__version__ # The full version, including alpha/beta/rc tags release = coronagraph.__version__ # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'matplotlib.sphinxext.plot_directive', 'sphinx.ext.napoleon', 'nbsphinx', 'IPython.sphinxext.ipython_console_highlighting', 'm2r' ] nbsphinx_prolog = """ {% set docname = env.doc2path(env.docname, base=None) %} .. note:: This tutorial was generated from a Jupyter notebook that can be downloaded `here <https://github.com/jlustigy/coronagraph/blob/master/docs/{{ docname }}>`_. """ # Custom additions for exceptions plot_include_source = False plot_html_show_source_link = False plot_html_show_formats = False # Remove ipython notebook prompt numbers nbsphinx_prolog = """ .. raw:: html <style> .nbinput .prompt, .nboutput .prompt { display: none; } </style> """ napoleon_use_ivar = True # Make the order of the autodocs in the order they appear in the code autodoc_member_order = 'bysource' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] #source_suffix = '.rst' # The master toctree document. master_doc = 'index' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path . exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] #html_favicon = "_static/favicon.png" html_logo = "_static/logo1.png" html_theme_options = {"logo_only": True} # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = 'coronagraphdoc' # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'coronagraph.tex', 'coronagraph Documentation', 'Jacob Lustig-Yaeger', 'manual'), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'coronagraph', 'coronagraph Documentation', [author], 1) ] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'coronagraph', 'coronagraph Documentation', author, 'coronagraph', 'One line description of project.', 'Miscellaneous'), ] # -- Extension configuration -------------------------------------------------
mit
-1,955,281,090,895,551,200
29.264706
102
0.653385
false
clld/tsezacp
tsezacp/models.py
1
3804
from zope.interface import implementer from sqlalchemy import ( Column, Unicode, Integer, ForeignKey, ) from sqlalchemy.orm import relationship, backref from sqlalchemy.ext.declarative import declared_attr from clld import interfaces from clld.db.meta import Base, CustomModelMixin from clld.db.models.common import Sentence, Contribution, Unit, IdNameDescriptionMixin #----------------------------------------------------------------------------- # specialized common mapper classes #----------------------------------------------------------------------------- """ CREATE TABLE "texts_data_dictionary_entry" ( "id" integer NOT NULL PRIMARY KEY, "Number" integer NOT NULL, "Value" varchar(50) NOT NULL, "Part_of_Speech" varchar(50) NOT NULL, "Gloss" varchar(50) NOT NULL, "Notes" text NOT NULL, "Changed" date NOT NULL ); CREATE TABLE "texts_data_glossary" ( "id" integer NOT NULL PRIMARY KEY, "Number" integer NOT NULL, "Value" varchar(50) NOT NULL, "Part_of_Speech" varchar(50) NOT NULL, "Gloss" varchar(50) NOT NULL, "Notes" text NOT NULL, "Changed" date NOT NULL ); CREATE TABLE "texts_data_line" ( "id" integer NOT NULL PRIMARY KEY, "to_Text_id" integer NOT NULL REFERENCES "texts_data_text" ("id"), "Line_Position" integer NOT NULL, "Tsez_Line" varchar(400) NOT NULL, "English_Translation" varchar(400) NOT NULL, "Russian_Translation" varchar(400) NOT NULL ); CREATE TABLE "texts_data_morpheme" ( "id" integer NOT NULL PRIMARY KEY, "to_Word_id" integer NOT NULL REFERENCES "texts_data_word" ("id"), "Position" integer NOT NULL, "Value" varchar(10) NOT NULL, "Gloss" varchar(10) NOT NULL, "Part_of_Speech" varchar(10) NOT NULL ); CREATE TABLE "texts_data_text" ( "id" integer NOT NULL PRIMARY KEY, "Number" integer NOT NULL, "Title_in_Tsez" varchar(200) NOT NULL, "Title_in_English" varchar(200) NOT NULL, "Title_in_Russian" varchar(200) NOT NULL ); CREATE TABLE "texts_data_word" ( "id" integer NOT NULL PRIMARY KEY, "to_Line_id" integer NOT NULL REFERENCES "texts_data_line" ("id"), "Lex_Position" integer NOT NULL, "Word_in_Phrase" varchar(20) NOT NULL, "Word_Clear" varchar(15) NOT NULL ); """ @implementer(interfaces.IContribution) class Text(CustomModelMixin, Contribution): pk = Column(Integer, ForeignKey('contribution.pk'), primary_key=True) ord = Column(Integer, nullable=False) russian = Column(Unicode) @implementer(interfaces.ISentence) class Line(CustomModelMixin, Sentence): pk = Column(Integer, ForeignKey('sentence.pk'), primary_key=True) ord = Column(Integer, nullable=False) text_pk = Column(Integer, ForeignKey('text.pk')) russian = Column(Unicode) @declared_attr def text(cls): return relationship(Text, backref=backref('lines', order_by=cls.ord)) class WordInLine(Base, IdNameDescriptionMixin): line_pk = Column(Integer, ForeignKey('line.pk')) ord = Column(Integer, nullable=False) @declared_attr def line(cls): return relationship(Line, backref=backref('words', order_by=cls.ord)) @implementer(interfaces.IUnit) class Morpheme(CustomModelMixin, Unit): pk = Column(Integer, ForeignKey('unit.pk'), primary_key=True) pos = Column(Unicode) notes = Column(Unicode) class MorphemeInWord(Base, IdNameDescriptionMixin): word_pk = Column(Integer, ForeignKey('wordinline.pk')) ord = Column(Integer, nullable=False) pos = Column(Unicode) normgloss = Column(Unicode) morpheme_pk = Column(Integer, ForeignKey('morpheme.pk')) morpheme = relationship(Morpheme, backref='occurrences') @declared_attr def word(cls): return relationship(WordInLine, backref=backref('morphemes', order_by=cls.ord))
apache-2.0
-3,154,290,188,002,359,300
31.793103
87
0.667981
false
tswicegood/atx_restaurant_ratings
example/example_usage/tests/scraper.py
1
3188
import datetime import random import types from django.template.loader import render_to_string from django.test import TestCase from mock import patch, MagicMock from atx_restaurant_ratings import scraper def random_date(): now = datetime.date.today() r = random.randint(-1000, 1000) return (now - datetime.timedelta(days=r)).strftime('%m/%d/%Y') def generate_random_start_end_doc(start_date=None, end_date=None): if start_date is None: start_date = random_date() if end_date is None: end_date = random_date() start = MagicMock(text=start_date) end = MagicMock(text=end_date) find = MagicMock(return_value=[start, end]) return start_date, end_date, MagicMock(find=find) class TestOf_get_start_and_end_dates(TestCase): def test_first_value_is_the_start_date(self): start_date, _, doc = generate_random_start_end_doc() with patch.object(scraper, 'pq') as mock: mock.return_value = doc result = scraper.get_start_and_end_dates() self.assertEqual(result[0], start_date) def test_second_value_is_the_end_date(self): _, end_date, doc = generate_random_start_end_doc() with patch.object(scraper, 'pq') as mock: mock.return_value = doc result = scraper.get_start_and_end_dates() self.assertEqual(result[1], end_date) def test_strips_any_extranous_whitespace_off_of_values(self): a, b, doc = generate_random_start_end_doc(' Alice', 'Bob ') with patch.object(scraper, 'pq') as mock: mock.return_value = doc actual = scraper.get_start_and_end_dates() self.assertEqual(['Alice', 'Bob'], actual) class TestOf_get_all_rows_of_data(TestCase): def test_returns_all_matching_rows(self): r = random.randint(1, 10) content = render_to_string('for_testing/empty_rows.html', {'range': range(r)}) response = MagicMock(content=content) with patch.object(scraper, 'requests') as mock: mock.post.return_value = response rows = scraper.get_all_rows_of_data() self.assertEqual(r, len(rows)) def generator_fake_row(): def generate_fake_cell(): r = random.randint(1, 10) return MagicMock(text="Random Text %s" % r) row = MagicMock() row.findall.return_value = [generate_fake_cell() for i in range(6)] return row def generate_fake_rows(): r = random.randint(1, 10) return [generator_fake_row() for i in range(r)] class TestOf_extract_raw_data(TestCase): def test_returns_a_generator(self): result = scraper.extract_raw_data(generate_fake_rows()) self.assertTrue(type(result) is types.GeneratorType) def test_each_yielded_item_is_a_dictionary(self): result = scraper.extract_raw_data(generate_fake_rows()) for a in result: self.assertTrue(type(a) is dict) def test_keys_for_dict_are_the_labels(self): row = scraper.extract_raw_data(generate_fake_rows()).next() expected = sorted(scraper.LABELS) actual = sorted(row.keys()) self.assertEqual(expected, actual)
apache-2.0
1,051,943,836,252,412,900
31.865979
71
0.639272
false
nttks/edx-platform
cms/djangoapps/contentstore/views/user.py
1
8092
from django.core.exceptions import PermissionDenied from django.contrib.auth.models import User from django.contrib.auth.decorators import login_required from django.views.decorators.http import require_http_methods from django.utils.translation import ugettext as _ from django.views.decorators.http import require_POST from django.views.decorators.csrf import ensure_csrf_cookie from edxmako.shortcuts import render_to_response from xmodule.modulestore.django import modulestore from opaque_keys.edx.keys import CourseKey from opaque_keys.edx.locator import LibraryLocator from openedx.core.djangoapps.ga_optional.api import is_available from openedx.core.djangoapps.ga_optional.models import LIBRARY_OPTION_KEY from util.json_request import JsonResponse, expect_json from student.roles import CourseInstructorRole, CourseStaffRole, LibraryUserRole from course_creators.views import user_requested_access from student.auth import STUDIO_EDIT_ROLES, STUDIO_VIEW_USERS, get_user_permissions from student.models import CourseEnrollment from django.http import HttpResponseNotFound from student import auth __all__ = ['request_course_creator', 'course_team_handler'] @require_POST @login_required def request_course_creator(request): """ User has requested course creation access. """ user_requested_access(request.user) return JsonResponse({"Status": "OK"}) @login_required @ensure_csrf_cookie @require_http_methods(("GET", "POST", "PUT", "DELETE")) def course_team_handler(request, course_key_string=None, email=None): """ The restful handler for course team users. GET html: return html page for managing course team json: return json representation of a particular course team member (email is required). POST or PUT json: modify the permissions for a particular course team member (email is required, as well as role in the payload). DELETE: json: remove a particular course team member from the course team (email is required). """ course_key = CourseKey.from_string(course_key_string) if course_key_string else None # No permissions check here - each helper method does its own check. if 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'): return _course_team_user(request, course_key, email) elif request.method == 'GET': # assume html return _manage_users(request, course_key) else: return HttpResponseNotFound() def user_with_role(user, role): """ Build user representation with attached role """ return { 'id': user.id, 'username': user.username, 'email': user.email, 'role': role } def _manage_users(request, course_key): """ This view will return all CMS users who are editors for the specified course """ # check that logged in user has permissions to this item user_perms = get_user_permissions(request.user, course_key) if not user_perms & STUDIO_VIEW_USERS: raise PermissionDenied() course_module = modulestore().get_course(course_key) instructors = set(CourseInstructorRole(course_key).users_with_role()) # the page only lists staff and assumes they're a superset of instructors. Do a union to ensure. staff = set(CourseStaffRole(course_key).users_with_role()).union(instructors) formatted_users = [] for user in instructors: formatted_users.append(user_with_role(user, 'instructor')) for user in staff - instructors: formatted_users.append(user_with_role(user, 'staff')) return render_to_response('manage_users.html', { 'context_course': course_module, 'show_transfer_ownership_hint': request.user in instructors and len(instructors) == 1, 'users': formatted_users, 'allow_actions': bool(user_perms & STUDIO_EDIT_ROLES), 'library_option': is_available(LIBRARY_OPTION_KEY, course_key) }) @expect_json def _course_team_user(request, course_key, email): """ Handle the add, remove, promote, demote requests ensuring the requester has authority """ # check that logged in user has permissions to this item requester_perms = get_user_permissions(request.user, course_key) permissions_error_response = JsonResponse({"error": _("Insufficient permissions")}, 403) if (requester_perms & STUDIO_VIEW_USERS) or (email == request.user.email): # This user has permissions to at least view the list of users or is editing themself pass else: # This user is not even allowed to know who the authorized users are. return permissions_error_response try: user = User.objects.get(email=email) except Exception: msg = { "error": _("Could not find user by email address '{email}'.").format(email=email), } return JsonResponse(msg, 404) is_library = isinstance(course_key, LibraryLocator) # Ordered list of roles: can always move self to the right, but need STUDIO_EDIT_ROLES to move any user left if is_library: role_hierarchy = (CourseInstructorRole, CourseStaffRole, LibraryUserRole) else: role_hierarchy = (CourseInstructorRole, CourseStaffRole) if request.method == "GET": # just return info about the user msg = { "email": user.email, "active": user.is_active, "role": None, } # what's the highest role that this user has? (How should this report global staff?) for role in role_hierarchy: if role(course_key).has_user(user): msg["role"] = role.ROLE break return JsonResponse(msg) # All of the following code is for editing/promoting/deleting users. # Check that the user has STUDIO_EDIT_ROLES permission or is editing themselves: if not ((requester_perms & STUDIO_EDIT_ROLES) or (user.id == request.user.id)): return permissions_error_response # can't modify an inactive user if not user.is_active: msg = { "error": _('User {email} has registered but has not yet activated his/her account.').format(email=email), } return JsonResponse(msg, 400) if request.method == "DELETE": new_role = None else: # only other operation supported is to promote/demote a user by changing their role: # role may be None or "" (equivalent to a DELETE request) but must be set. # Check that the new role was specified: if "role" in request.json or "role" in request.POST: new_role = request.json.get("role", request.POST.get("role")) else: return JsonResponse({"error": _("No `role` specified.")}, 400) old_roles = set() role_added = False for role_type in role_hierarchy: role = role_type(course_key) if role_type.ROLE == new_role: if (requester_perms & STUDIO_EDIT_ROLES) or (user.id == request.user.id and old_roles): # User has STUDIO_EDIT_ROLES permission or # is currently a member of a higher role, and is thus demoting themself auth.add_users(request.user, role, user) role_added = True else: return permissions_error_response elif role.has_user(user): # Remove the user from this old role: old_roles.add(role) if new_role and not role_added: return JsonResponse({"error": _("Invalid `role` specified.")}, 400) for role in old_roles: if isinstance(role, CourseInstructorRole) and role.users_with_role().count() == 1: msg = {"error": _("You may not remove the last Admin. Add another Admin first.")} return JsonResponse(msg, 400) auth.remove_users(request.user, role, user) if new_role and not is_library: # The user may be newly added to this course. # auto-enroll the user in the course so that "View Live" will work. CourseEnrollment.enroll(user, course_key) return JsonResponse()
agpl-3.0
-6,771,547,991,346,686,000
39.258706
125
0.673381
false
catapult-project/catapult-csm
devil/devil/android/crash_handler.py
3
1297
# Copyright 2017 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging from devil import base_error from devil.android import device_errors logger = logging.getLogger(__name__) def RetryOnSystemCrash(f, device, retries=3): """Retries the given function on a device crash. If the provided function fails with a DeviceUnreachableError, this will wait for the device to come back online, then retry the function. Note that this uses the same retry scheme as timeout_retry.Run. Args: f: a unary callable that takes an instance of device_utils.DeviceUtils. device: an instance of device_utils.DeviceUtils. retries: the number of retries. Returns: Whatever f returns. """ num_try = 1 while True: try: return f(device) except device_errors.DeviceUnreachableError: if num_try > retries: logger.error('%d consecutive device crashes. No longer retrying.', num_try) raise try: logger.warning('Device is unreachable. Waiting for recovery...') device.WaitUntilFullyBooted() except base_error.BaseError: logger.exception('Device never recovered. X(') num_try += 1
bsd-3-clause
6,952,356,882,612,483,000
29.162791
78
0.697764
false
gkc1000/pyscf
pyscf/nao/m_csphar.py
1
2662
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division import numpy as np from pyscf.nao.m_fact import sgn, onedivsqrt4pi try: import numba from pyscf.nao.m_numba_utils import csphar_numba use_numba = True except: use_numba = False # # # def csphar(r,lmax): """ Computes (all) complex spherical harmonics up to the angular momentum lmax Args: r : Cartesian coordinates defining correct theta and phi angles for spherical harmonic lmax : Integer, maximal angular momentum Result: 1-d numpy array of complex128 elements with all spherical harmonics stored in order 0,0; 1,-1; 1,0; 1,+1 ... lmax,lmax, althogether 0 : (lmax+1)**2 elements. """ if use_numba: return csphar_numba(r, lmax) else: x=r[0] y=r[1] z=r[2] dd=np.sqrt(x*x+y*y+z*z) res = np.zeros(((lmax+1)**2), dtype=np.complex128) res[0] = onedivsqrt4pi if dd < 1.0e-10 : ll=(lmax+1)**2 return res if x == 0.0 : phi=0.5*np.pi if y<0.0: phi=-phi else: phi = np.arctan( y/x ) if x<0.0: phi=phi+np.pi ss=np.sqrt(x*x+y*y)/dd cc=z/dd if lmax<1 : return res for l in range(1,lmax+1): al=1.0*l il2=(l+1)**2-1 il1=l**2-1 res[il2] = -ss*np.sqrt((al-0.5)/al)*res[il1] res[il2-1] = cc*np.sqrt(2.0*al-1.0)*res[il1] if lmax>1: for m in range(lmax-1): if m<lmax: for l in range(m+1,lmax): ind=l*(l+1)+m aa=1.0*(l**2-m**2) bb=1.0*((l+1)**2-m**2) zz=(l+l+1.0)*cc*res[ind].real-np.sqrt(aa)*res[ind-2*l].real res[ind+2*(l+1)]=zz/np.sqrt(bb) for l in range(lmax+1): ll2=l*(l+1) rt2lp1=np.sqrt(l+l+1.0) for m in range(l+1): cs=np.sin(m*phi)*rt2lp1 cc=np.cos(m*phi)*rt2lp1 res[ll2+m]=np.complex(cc,cs)*res[ll2+m] res[ll2-m]=sgn[m]*np.conj(res[ll2+m]) return res;
apache-2.0
7,247,452,411,345,700,000
27.319149
163
0.572126
false
VCTLabs/openadams
_oatr_importwizard.py
1
7291
# -*- coding: utf-8 -*- # $Id$ # ------------------------------------------------------------------- # Copyright 2012 Achim K�hler # # This file is part of openADAMS. # # openADAMS is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published # by the Free Software Foundation, either version 2 of the License, # or (at your option) any later version. # # openADAMS is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with openADAMS. If not, see <http://www.gnu.org/licenses/>. # ------------------------------------------------------------------- from PyQt4 import QtGui, QtCore, QtSql from PyQt4.QtCore import Qt import filepicker import _oatr_tableview import _oatr_commons class cTestrunnerImportWizard(QtCore.QObject): def __init__(self, parent=None): super(cTestrunnerImportWizard, self).__init__() self.testsuiteWizardPage = cTestsuiteWizardPage() self.infoWizardPage = cInfoWizardPage() self.wizard = QtGui.QWizard() self.wizard.addPage(self.createImportDatabasePage()) self.wizard.addPage(self.testsuiteWizardPage) self.wizard.addPage(self.infoWizardPage) self.wizard.setWindowTitle(self.tr("Create new testrun")) self.wizard.currentIdChanged.connect(self.idChangedHandler) def idChangedHandler(self, currentId): if currentId == 0: # on first page, disable continue button when import filename is invalid self.wizard.button(QtGui.QWizard.NextButton).setEnabled(self.inputFilePicker.isValidFilename()) elif currentId == 1: self.testsuiteWizardPage.initTable(self.inputFilePicker.getFilename()) self.wizard.button(QtGui.QWizard.NextButton).setDisabled(self.testsuiteWizardPage.tableIsEmpty()) elif currentId == 2: self.wizard.button(QtGui.QWizard.FinishButton).setEnabled(False) # user has to enter fileInfo = QtCore.QFileInfo(self.inputFilePicker.getFilename()) fileInfo.setFile(fileInfo.dir(), fileInfo.baseName()+'.%s' % _oatr_commons.TR_FILE_SUFFIX) fileName = fileInfo.filePath() self.infoWizardPage.outputFilePicker.setFileName(fileName) else: pass def createImportDatabasePage(self): page = QtGui.QWizardPage() page.setTitle(self.tr("Select database with testsuite to run")) layout = QtGui.QHBoxLayout() self.inputFilePicker = filepicker.cFilePicker() self.inputFilePicker.sigValidFilename.connect(self.validImportFilename) widgets = self.inputFilePicker.getWidgets() widgets['label'].setText(self.tr("Database")) widgets['dialog'].setNameFilter(self.tr("Database files (*.db);;All files (*.*)")) widgets['dialog'].setFileMode(QtGui.QFileDialog.ExistingFile) map(layout.addWidget, [w for w in (widgets['label'], widgets['combobox'], widgets['button'])]) layout.setStretch(1, 10) page.setLayout(layout) return page def show(self): if self.wizard.exec_() == QtGui.QDialog.Accepted: return {'srcDatabase': self.inputFilePicker.getFilename(), 'destDatabase': self.infoWizardPage.outputFilePicker.getFilename(), 'testsuiteId': self.testsuiteWizardPage.getSelectedTestsuiteId(), 'title': unicode(self.infoWizardPage.leTitle.text()), 'description': unicode(self.infoWizardPage.teDescription.toPlainText())} else: return None def validImportFilename(self, isValid): self.wizard.button(QtGui.QWizard.NextButton).setEnabled(isValid) class cTestsuiteWizardPage(QtGui.QWizardPage): def __init__(self): super(cTestsuiteWizardPage, self).__init__() self.setTitle(self.tr("Select testsuite to run")) layout = QtGui.QHBoxLayout() self.tableView = _oatr_tableview.cTestsuiteTableView(self, model=None) layout.addWidget(self.tableView) self.setLayout(layout) def initTable(self, databaseName): self.database = QtSql.QSqlDatabase.addDatabase("QSQLITE", 'importconnection') self.database.setHostName("") self.database.setDatabaseName(databaseName) self.database.open() model = QtSql.QSqlTableModel(self, self.database) model.setTable('testsuites') self.tableView.setModel(model) hiddencols = (1,3,4,5,6,7) map(self.tableView.setColumnHidden, hiddencols, [True]*len(hiddencols)) self.tableView.setHeader() model.reset() model.select() if not self.tableIsEmpty(): self.tableView.selectRow(0) def tableIsEmpty(self): return self.tableView.model().rowCount() == 0 def getSelectedTestsuiteId(self): index = self.tableView.model().index(self.tableView.currentIndex().row(), 0) return self.tableView.model().data(index).toInt()[0] class cInfoWizardPage(QtGui.QWizardPage): def __init__(self): super(cInfoWizardPage, self).__init__() self.setTitle(self.tr("Enter testrun information")) layout = QtGui.QGridLayout() self.outputFilePicker = filepicker.cFilePicker() widgets = self.outputFilePicker.getWidgets() widgets['label'].setText(self.tr("Testrun file")) widgets['dialog'].setNameFilter(self.tr("Testrun files (*.%s);;All files (*.*)" % _oatr_commons.TR_FILE_SUFFIX)) widgets['dialog'].setFileMode(QtGui.QFileDialog.AnyFile) widgets['dialog'].setAcceptMode(QtGui.QFileDialog.AcceptSave) layout.addWidget(widgets['label'], 0, 0) layout.addWidget(widgets['combobox'], 0, 1) layout.addWidget(widgets['button'], 0, 2) layout.addWidget(QtGui.QLabel(self.tr("Title")), 1, 0) self.leTitle = QtGui.QLineEdit() self.registerField("title*", self.leTitle); # title is mandatory layout.addWidget(self.leTitle, 1, 1, 1, 2) layout.addWidget(QtGui.QLabel(self.tr("Description"), alignment=Qt.AlignTop), 2, 0) self.teDescription = QtGui.QTextEdit() layout.addWidget(self.teDescription, 2, 1, 1, 2) layout.setColumnStretch(1, 1) self.setLayout(layout) def validatePage(self): fileName = self.outputFilePicker.getFilename() if QtCore.QFile.exists(fileName): r = QtGui.QMessageBox.warning(self, self.tr("Overwrite file"), self.tr("File %s already exists. Okay to overwrite?" % fileName), QtGui.QMessageBox.Yes|QtGui.QMessageBox.No) if r == QtGui.QMessageBox.No: return False if not QtCore.QFile.remove(fileName): QtGui.QMessageBox.critical(self, self.tr("Failure"), self.tr("Failed to remove file %s" % fileName)) return False return True
gpl-2.0
7,267,512,760,369,657,000
46.032258
120
0.64124
false
Mirantis/stackalytics
stackalytics/dashboard/decorators.py
1
19240
# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import cProfile import functools import json import operator import time import flask from oslo_config import cfg from oslo_log import log as logging import six from werkzeug import exceptions from stackalytics.dashboard import helpers from stackalytics.dashboard import parameters from stackalytics.dashboard import vault from stackalytics.processor import utils from stackalytics import version as stackalytics_version LOG = logging.getLogger(__name__) def _check_param_in(params, name, collection, allow_all=False): for single in (params.get(name) or []): single = single.lower() if allow_all and single == 'all': continue if single not in collection: params[name] = [] flask.abort(404) def _validate_params(params): vault_inst = vault.get_vault() memory_storage_inst = vault.get_memory_storage() _check_param_in(params, 'release', vault_inst['releases'], True) _check_param_in(params, 'project_type', vault_inst['project_types_index']) _check_param_in(params, 'module', vault_inst['module_id_index']) _check_param_in(params, 'company', memory_storage_inst.get_companies_lower()) _check_param_in(params, 'user_id', memory_storage_inst.get_user_ids()) _check_param_in(params, 'metric', parameters.METRIC_TO_RECORD_TYPE, True) def _get_single(params): if params: return params[0] return None def _prepare_params(kwargs, ignore): params = kwargs.get('_params') if not params: params = {'action': flask.request.path} for key in parameters.FILTER_PARAMETERS: params[key] = parameters.get_parameter(kwargs, key, key) if params['start_date']: params['start_date'] = [utils.round_timestamp_to_day( params['start_date'][0])] if params['end_date']: params['end_date'] = [utils.round_timestamp_to_day( params['end_date'][0])] _validate_params(params) kwargs['_params'] = params if ignore: return dict([(k, v if k not in ignore else []) for k, v in six.iteritems(params)]) else: return params def cached(ignore=None): def decorator(func): @functools.wraps(func) def prepare_params_decorated_function(*args, **kwargs): params = _prepare_params(kwargs, ignore) cache_inst = vault.get_vault()['cache'] key = json.dumps(params) value = cache_inst.get(key) if not value: value = func(*args, **kwargs) cache_inst[key] = value vault.get_vault()['cache_size'] += len(key) + len(value) LOG.debug('Cache size: %(size)d, entries: %(len)d', {'size': vault.get_vault()['cache_size'], 'len': len(cache_inst.keys())}) return value return prepare_params_decorated_function return decorator def record_filter(ignore=None): def decorator(f): def _filter_records_by_days(start_date, end_date, memory_storage_inst): if start_date: start_date = utils.date_to_timestamp_ext(start_date[0]) else: start_date = memory_storage_inst.get_first_record_day() if end_date: end_date = utils.date_to_timestamp_ext(end_date[0]) else: end_date = utils.date_to_timestamp_ext('now') start_day = utils.timestamp_to_day(start_date) end_day = utils.timestamp_to_day(end_date) return memory_storage_inst.get_record_ids_by_days( six.moves.range(start_day, end_day + 1)) def _filter_records_by_modules(memory_storage_inst, mr): selected = set([]) for m, r in mr: if r is None: selected |= memory_storage_inst.get_record_ids_by_modules( [m]) else: selected |= ( memory_storage_inst.get_record_ids_by_module_release( m, r)) return selected def _intersect(first, second): if first is not None: return first & second return second @functools.wraps(f) def record_filter_decorated_function(*args, **kwargs): memory_storage_inst = vault.get_memory_storage() record_ids = None params = _prepare_params(kwargs, ignore) release = params['release'] if release: if 'all' not in release: record_ids = ( memory_storage_inst.get_record_ids_by_releases( c.lower() for c in release)) project_type = params['project_type'] mr = None if project_type: mr = set(vault.resolve_modules(vault.resolve_project_types( project_type), release)) module = params['module'] if module: mr = _intersect(mr, set(vault.resolve_modules( module, release))) if mr is not None: record_ids = _intersect( record_ids, _filter_records_by_modules( memory_storage_inst, mr)) user_id = params['user_id'] user_id = [u for u in user_id if vault.get_user_from_runtime_storage(u)] if user_id: record_ids = _intersect( record_ids, memory_storage_inst.get_record_ids_by_user_ids(user_id)) company = params['company'] if company: record_ids = _intersect( record_ids, memory_storage_inst.get_record_ids_by_companies(company)) metric = params['metric'] if 'all' not in metric: for metric in metric: if metric in parameters.METRIC_TO_RECORD_TYPE: record_ids = _intersect( record_ids, memory_storage_inst.get_record_ids_by_types( parameters.METRIC_TO_RECORD_TYPE[metric])) blueprint_id = params['blueprint_id'] if blueprint_id: record_ids = _intersect( record_ids, memory_storage_inst.get_record_ids_by_blueprint_ids( blueprint_id)) start_date = params['start_date'] end_date = params['end_date'] if start_date or end_date: record_ids = _intersect( record_ids, _filter_records_by_days(start_date, end_date, memory_storage_inst)) kwargs['record_ids'] = record_ids kwargs['records'] = memory_storage_inst.get_records(record_ids) return f(*args, **kwargs) return record_filter_decorated_function return decorator def incremental_filter(result, record, param_id, context): result[getattr(record, param_id)]['metric'] += 1 def loc_filter(result, record, param_id, context): result[getattr(record, param_id)]['metric'] += record.loc def mark_filter(result, record, param_id, context): result_by_param = result[getattr(record, param_id)] value = 0 record_type = record.type if record_type == 'Code-Review': result_by_param['metric'] += 1 value = record.value elif record_type == 'Abandon': result_by_param['metric'] += 1 value = 'x' elif record.type == 'Workflow': if record.value == 1: value = 'A' else: value = 'WIP' result_by_param[value] = result_by_param.get(value, 0) + 1 if record.disagreement: result_by_param['disagreements'] = ( result_by_param.get('disagreements', 0) + 1) def mark_finalize(record): new_record = record.copy() positive = 0 numeric = 0 mark_distribution = [] for key in [-2, -1, 1, 2, 'A', 'x']: if key in record: if key in [1, 2]: positive += record[key] if key in [-2, -1, 1, 2]: numeric += record[key] mark_distribution.append(str(record[key])) else: mark_distribution.append('0') new_record[key] = 0 new_record['disagreements'] = record.get('disagreements', 0) if numeric: positive_ratio = '%.1f%%' % ( (positive * 100.0) / numeric) new_record['disagreement_ratio'] = '%.1f%%' % ( (record.get('disagreements', 0) * 100.0) / numeric) else: positive_ratio = helpers.INFINITY_HTML new_record['disagreement_ratio'] = helpers.INFINITY_HTML new_record['mark_ratio'] = ( '|'.join(mark_distribution) + ' (' + positive_ratio + ')') new_record['positive_ratio'] = positive_ratio return new_record def ci_filter(result, record, param_id, context): result_by_param = result[getattr(record, param_id)] result_by_param['metric'] += 1 key = 'success' if record.value else 'failure' result_by_param[key] = result_by_param.get(key, 0) + 1 def ci_finalize(record): new_record = record.copy() metric = record.get('metric') if metric: new_record['success_ratio'] = '%.1f%%' % ( (record.get('success', 0) * 100.0) / metric) else: new_record['success_rate'] = helpers.INFINITY_HTML return new_record def person_day_filter(result, record, param_id, context): day = utils.timestamp_to_day(record.date) # fact that record-days are grouped by days in some order is used if context.get('last_processed_day') != day: context['last_processed_day'] = day context['counted_user_ids'] = set() user_id = record.user_id value = getattr(record, param_id) if user_id not in context['counted_user_ids']: context['counted_user_ids'].add(user_id) result[value]['metric'] += 1 def generate_records_for_person_day(record_ids): memory_storage_inst = vault.get_memory_storage() id_dates = [] for record in memory_storage_inst.get_records(record_ids): id_dates.append((record.date, record.record_id)) id_dates.sort(key=operator.itemgetter(0)) for record in memory_storage_inst.get_records( record_id for date, record_id in id_dates): yield record def aggregate_filter(): def decorator(f): @functools.wraps(f) def aggregate_filter_decorated_function(*args, **kwargs): metric_param = (flask.request.args.get('metric') or parameters.get_default('metric')) metric = metric_param.lower() metric_to_filters_map = { 'commits': (None, None), 'loc': (loc_filter, None), 'marks': (mark_filter, mark_finalize), 'emails': (incremental_filter, None), 'bpd': (incremental_filter, None), 'bpc': (incremental_filter, None), 'filed-bugs': (incremental_filter, None), 'resolved-bugs': (incremental_filter, None), 'members': (incremental_filter, None), 'person-day': (person_day_filter, None), 'ci': (ci_filter, ci_finalize), 'patches': (None, None), 'translations': (loc_filter, None), } if metric not in metric_to_filters_map: metric = parameters.get_default('metric') kwargs['metric_filter'] = metric_to_filters_map[metric][0] kwargs['finalize_handler'] = metric_to_filters_map[metric][1] if metric == 'person-day': kwargs['records'] = generate_records_for_person_day( kwargs['record_ids']) return f(*args, **kwargs) return aggregate_filter_decorated_function return decorator def exception_handler(): def decorator(f): @functools.wraps(f) def exception_handler_decorated_function(*args, **kwargs): try: return f(*args, **kwargs) except Exception as e: if isinstance(e, exceptions.HTTPException): raise # ignore Flask exceptions LOG.error(e, exc_info=True) flask.abort(404) return exception_handler_decorated_function return decorator def templated(template=None, return_code=200): def decorator(f): @functools.wraps(f) def templated_decorated_function(*args, **kwargs): vault_inst = vault.get_vault() template_name = template if template_name is None: template_name = (flask.request.endpoint.replace('.', '/') + '.html') ctx = f(*args, **kwargs) if ctx is None: ctx = {} try: _prepare_params(kwargs, []) except Exception: if return_code == 200: raise # do not re-raise on error page # put parameters into template ctx['metric'] = parameters.get_single_parameter( kwargs, 'metric', use_default=True) ctx['metric_label'] = parameters.METRIC_LABELS.get(ctx['metric']) project_type = parameters.get_single_parameter( kwargs, 'project_type', use_default=True) ctx['project_type'] = project_type ctx['project_type_inst'] = vault.get_project_type(project_type) ctx['release'] = parameters.get_single_parameter( kwargs, 'release', use_default=True) company = parameters.get_single_parameter(kwargs, 'company') ctx['company'] = company if company: ctx['company_original'] = ( vault.get_memory_storage().get_original_company_name( ctx['company'])) module = parameters.get_single_parameter(kwargs, 'module') ctx['module'] = module if module and module in vault_inst['module_id_index']: ctx['module_inst'] = vault_inst['module_id_index'][module] ctx['user_id'] = parameters.get_single_parameter(kwargs, 'user_id') if ctx['user_id']: ctx['user_inst'] = vault.get_user_from_runtime_storage( ctx['user_id']) ctx['page_title'] = helpers.make_page_title( ctx['project_type_inst'], ctx.get('release'), ctx.get('module_inst'), ctx.get('company_original'), ctx.get('user_inst')) ctx['stackalytics_version'] = ( stackalytics_version.version_info.version_string()) ctx['stackalytics_release'] = ( stackalytics_version.version_info.release_string()) update_time = vault_inst['runtime_storage_update_time'] ctx['runtime_storage_update_time'] = update_time ctx['runtime_storage_update_time_str'] = helpers.format_datetime( update_time) if update_time else None # deprecated -- top mentor report ctx['review_nth'] = parameters.get_single_parameter( kwargs, 'review_nth') return flask.render_template(template_name, **ctx), return_code return templated_decorated_function return decorator def jsonify(root='data'): def decorator(func): @functools.wraps(func) def jsonify_decorated_function(*args, **kwargs): value = func(*args, **kwargs) if isinstance(value, tuple): result = dict([(root[i], value[i]) for i in six.moves.range(min(len(value), len(root)))]) else: result = {root: value} return json.dumps(result) return jsonify_decorated_function return decorator def profiler_decorator(func): @functools.wraps(func) def profiler_decorated_function(*args, **kwargs): profiler = None profile_filename = cfg.CONF.collect_profiler_stats if profile_filename: LOG.debug('Profiler is enabled') profiler = cProfile.Profile() profiler.enable() result = func(*args, **kwargs) if profile_filename: profiler.disable() profiler.dump_stats(profile_filename) LOG.debug('Profiler stats is written to file %s', profile_filename) return result return profiler_decorated_function def response(): def decorator(func): @functools.wraps(func) @profiler_decorator def response_decorated_function(*args, **kwargs): callback = flask.app.request.args.get('callback', False) data = func(*args, **kwargs) if callback: data = str(callback) + '(' + data + ')' mimetype = 'application/javascript' else: mimetype = 'application/json' resp = flask.current_app.response_class(data, mimetype=mimetype) update_time = vault.get_vault()['vault_next_update_time'] now = utils.date_to_timestamp('now') if now < update_time: max_age = update_time - now else: max_age = 0 resp.headers['cache-control'] = 'public, max-age=%d' % (max_age,) resp.headers['expires'] = time.strftime( '%a, %d %b %Y %H:%M:%S GMT', time.gmtime(vault.get_vault()['vault_next_update_time'])) resp.headers['access-control-allow-origin'] = '*' return resp return response_decorated_function return decorator def query_filter(query_param='query'): def decorator(f): @functools.wraps(f) def query_filter_decorated_function(*args, **kwargs): query = flask.request.args.get(query_param) if query: kwargs['query_filter'] = lambda x: x.lower().find(query) >= 0 else: kwargs['query_filter'] = lambda x: True return f(*args, **kwargs) return query_filter_decorated_function return decorator
apache-2.0
5,466,360,063,047,401,000
32.873239
79
0.553846
false
nuagenetworks/tempest
tempest/lib/services/compute/networks_client.py
1
1218
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils as json from tempest.lib.common import rest_client class NetworksClient(rest_client.RestClient): def list_networks(self): resp, body = self.get("os-networks") body = json.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body) def show_network(self, network_id): resp, body = self.get("os-networks/%s" % network_id) body = json.loads(body) self.expected_success(200, resp.status) return rest_client.ResponseBody(resp, body)
apache-2.0
3,407,192,014,664,464,000
35.909091
78
0.704433
false
puttarajubr/commcare-hq
corehq/apps/sms/backend/http_api.py
1
5139
import re from urllib import urlencode from urllib2 import urlopen import sys from corehq.apps.sms.mixin import SMSBackend, BackendProcessingException from corehq.apps.sms.forms import BackendForm from corehq.apps.reminders.forms import RecordListField from django.forms.fields import * from django.core.exceptions import ValidationError from dimagi.ext.couchdbkit import * from dimagi.utils.django.fields import TrimmedCharField from corehq.apps.sms.util import clean_phone_number, strip_plus from django.utils.translation import ugettext as _, ugettext_noop from crispy_forms import layout as crispy from django.conf import settings BANNED_URL_REGEX = ( r".*://.*commcarehq.org.*", r".*://10.*", r".*://172.16.*", r".*://192.168.*", r".*://127.0.0.1.*", r".*://.*localhost.*", ) class HttpBackendForm(BackendForm): url = TrimmedCharField( label=ugettext_noop("URL"), ) message_param = TrimmedCharField( label=ugettext_noop("Message Parameter"), ) number_param = TrimmedCharField( label=ugettext_noop("Phone Number Parameter"), ) include_plus = BooleanField( required=False, label=ugettext_noop("Include '+' in Phone Number"), ) method = ChoiceField( label=ugettext_noop("HTTP Request Method"), choices=( ("GET","GET"), ("POST","POST") ), ) additional_params = RecordListField( input_name="additional_params", label=ugettext_noop("Additional Parameters"), ) def __init__(self, *args, **kwargs): if "initial" in kwargs and "additional_params" in kwargs["initial"]: additional_params_dict = kwargs["initial"]["additional_params"] kwargs["initial"]["additional_params"] = [{"name" : key, "value" : value} for key, value in additional_params_dict.items()] super(HttpBackendForm, self).__init__(*args, **kwargs) def clean_url(self): value = self.cleaned_data.get("url") for regex in BANNED_URL_REGEX: if re.match(regex, value): raise ValidationError(_("Invalid URL.")) return value def clean_additional_params(self): value = self.cleaned_data.get("additional_params") result = {} for pair in value: name = pair["name"].strip() value = pair["value"].strip() if name == "" or value == "": raise ValidationError("Please enter both name and value.") if name in result: raise ValidationError("Parameter name entered twice: %s" % name) result[name] = value return result @property def gateway_specific_fields(self): return crispy.Fieldset( _("HTTP Settings"), 'url', 'method', 'message_param', 'number_param', 'include_plus', 'additional_params', ) class HttpBackend(SMSBackend): url = StringProperty() # the url to send to message_param = StringProperty() # the parameter which the gateway expects to represent the sms message number_param = StringProperty() # the parameter which the gateway expects to represent the phone number to send to include_plus = BooleanProperty(default=False) # True to include the plus sign in front of the number, False not to (optional, defaults to False) method = StringProperty(choices=["GET","POST"], default="GET") # "GET" or "POST" (optional, defaults to "GET") additional_params = DictProperty() # a dictionary of additional parameters that will be sent in the request (optional) @classmethod def get_api_id(cls): return "HTTP" @classmethod def get_generic_name(cls): return "HTTP" @classmethod def get_template(cls): return "sms/http_backend.html" @classmethod def get_form_class(cls): return HttpBackendForm def send(self, msg, *args, **kwargs): if self.additional_params is not None: params = self.additional_params.copy() else: params = {} phone_number = msg.phone_number if self.include_plus: phone_number = clean_phone_number(phone_number) else: phone_number = strip_plus(phone_number) try: text = msg.text.encode("iso-8859-1") except UnicodeEncodeError: text = msg.text.encode("utf-8") params[self.message_param] = text params[self.number_param] = phone_number url_params = urlencode(params) try: if self.method == "GET": response = urlopen("%s?%s" % (self.url, url_params), timeout=settings.SMS_GATEWAY_TIMEOUT).read() else: response = urlopen(self.url, url_params, timeout=settings.SMS_GATEWAY_TIMEOUT).read() except Exception as e: msg = "Error sending message from backend: '{}'\n\n{}".format(self.name, str(e)) raise BackendProcessingException(msg), None, sys.exc_info()[2]
bsd-3-clause
7,341,694,058,746,408,000
34.441379
148
0.610235
false
yahyanik/phd_research
detection.py
1
3222
import numpy as np import cv2 #import matplotlib.pyplot as plt from collections import deque import imutils import argparse import sys ap = argparse.ArgumentParser() ap.add_argument("-b", "--buffer", type=int, default=32, help="max buffer size") ap.add_argument("-v", "--video", default=0, help="path to the (optional) video file") args = vars(ap.parse_args()) counter = 0 #pts = deque(maxlen=args["buffer"]) b = args["buffer"] #gives the int value of the input command pts = [[0 for x in range(b)] for y in range(b)] #creating the 2 dimentional list for directions face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') #loading cascade fiels eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml') #face_cascade = cv2.CascadeClassifier('haarcascade_fullbody.xml') #loading cascade fiels #eye_cascade = cv2.CascadeClassifier('haarcascade_upperbody.xml') cap = cv2.VideoCapture(0) #loading the video file while True: ret, img = cap.read() blurred = cv2.GaussianBlur(img, (11, 11), 0) #blur is useful to reduce the false positive and negatives gray = cv2.cvtColor(blurred,cv2.COLOR_BGR2GRAY) direction = "" faces = face_cascade.detectMultiScale(gray) #using the cascaded file count = 0 for (x,y,w,h) in faces: # for each person detected cv2.rectangle(img, (x,y),(x+w,y+h), (255,0,0),2) #blue and with width of 2 count = count +1 center = [(x+x+w)/2 , (y+y+h)/2] # pts.appendleft(center) del pts[(count-1)][-1] pts[(count-1)].insert(0,center) #pts is a list that needs 0 in the bebining, but counter is 1 if counter >= 10 and pts[0][-10] != 0 : # if counter >= 10 and pts[-10] != 0 : dX = (pts[0][-10])[0] - (pts[0][0])[0] #change the list tu small and then read the x dimention from it dY = (pts[0][-10])[1] - (pts[0][0])[1] # dX = (pts[-10])[0] - (pts[0])[0] #change the list tu small and then read the x dimention from it # dY = (pts[-10])[1] - (pts[0])[1] (dirX, dirY) = ("", "") #to detect the way person moves if np.abs(dX) > 20: dirX = "East" if np.sign(dX) == 1 else "West" if np.abs(dY) > 20: dirY = "North" if np.sign(dY) == 1 else "South" if dirX != "" and dirY != "": direction = "{}-{}".format(dirY, dirX) else: direction = dirX if dirX != "" else dirY # roi_gray = gray[y:y+h,x:x+w] # roi_color = img[y:y+h,x:x+w] # eyes = eye_cascade.detectMultiScale(roi_gray) # for (ex, ey,ew,eh) in eyes: # cv2.rectangle(roi_color, (ex,ey),(ex+ew,ey+eh),(0,255,0), 2) font = cv2.FONT_HERSHEY_SCRIPT_SIMPLEX cv2.putText(img,str(count),(x,y), font, 1, (255,255,0),2, cv2.LINE_AA)#avali bozorgi va adade dovom ghotr cv2.putText(img, direction, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,0.65, (0, 0, 255), 3) # to print on the picture how many people are there in the frame and their direction cv2.imshow('img',img) counter = counter+1 k = cv2.waitKey(4) if k == 2: break cap.release() cv2.destroyAllWindows()
mit
-7,583,451,804,171,879,000
44.380282
122
0.598076
false
KFPA/ScrapyNews
IPProxyPool-master/spider/HtmlDownloader.py
1
1429
# coding:utf-8 import random import config import json from db.DataStore import sqlhelper __author__ = 'qiye' import requests import chardet class Html_Downloader(object): @staticmethod def download(url): try: r = requests.get(url=url, headers=config.get_header(), timeout=config.TIMEOUT) r.encoding = chardet.detect(r.content)['encoding'] if (not r.ok) or len(r.content) < 500: raise ConnectionError else: return r.text except Exception: count = 0 # 重试次数 proxylist = sqlhelper.select(10) if not proxylist: return None while count < config.RETRY_TIME: try: proxy = random.choice(proxylist) ip = proxy[0] port = proxy[1] proxies = {"http": "http://%s:%s" % (ip, port), "https": "http://%s:%s" % (ip, port)} r = requests.get(url=url, headers=config.get_header(), timeout=config.TIMEOUT, proxies=proxies) r.encoding = chardet.detect(r.content)['encoding'] if (not r.ok) or len(r.content) < 500: raise ConnectionError else: return r.text except Exception: count += 1 return None
apache-2.0
-3,644,093,127,243,470,300
29.234043
115
0.500352
false
robertpyke/PyThesis
thesis/tests/mappable_point.py
1
5835
import unittest import transaction import os import csv from pyramid import testing from thesis.models import DBSession from sqlalchemy import create_engine from thesis.models import ( Base, MappablePoint, Layer ) class TestMappableItem(unittest.TestCase): def setUp(self): self.config = testing.setUp() engine = create_engine('postgresql+psycopg2://thesis_db_user:[email protected]:5432/thesis_test_db') DBSession.configure(bind=engine) Base.metadata.create_all(engine) with transaction.manager: # Add TestLayer1 test_layer_1 = Layer(name='TestLayer1') test_layer_1.mappable_points = [ MappablePoint('Point(30 10)'), MappablePoint('Point(20 10)'), ] DBSession.add(test_layer_1) # Add TestLayer2 test_layer_2 = Layer(name='TestLayer2') test_layer_2.mappable_points = [ MappablePoint('Point(10 15)'), MappablePoint('Point(10 15)'), MappablePoint('Point(30 15)'), ] DBSession.add(test_layer_2) # Add Emu Layer tests_path = os.path.dirname(os.path.abspath(__file__)) test_fixtures_path = os.path.join(tests_path, 'fixtures') emu_csv_path = os.path.join(test_fixtures_path, 'emu.csv') emu_layer = Layer(name='Emu') with open(emu_csv_path, 'rb') as csvfile: emu_reader = csv.reader(csvfile) rownum = 0 header = None for row in emu_reader: # Save header row. if rownum == 0: header = row else: colnum = 0 latitude = 0 longitude = 0 for col in row: column_label = header[colnum] if column_label == "LNGDEC": longitude = col elif column_label == "LATDEC": latitude = col # print '%-8s: %s' % (column_label, col) colnum += 1 if longitude and latitude: mappable_point = MappablePoint('Point(%s %s)' % (longitude, latitude)) emu_layer.mappable_points.append(mappable_point) rownum += 1 DBSession.add(emu_layer) def tearDown(self): DBSession.remove() testing.tearDown() engine = create_engine('postgresql+psycopg2://thesis_db_user:[email protected]:5432/thesis_test_db') DBSession.configure(bind=engine) # Drop all the models Base.metadata.drop_all(engine) def test_search_layers_by_name(self): test_layer_1 = DBSession.query(Layer).\ filter_by(name='TestLayer1').one() self.assertEqual(test_layer_1.name, 'TestLayer1') self.assertEqual(len(test_layer_1.mappable_points), 2) test_layer_2 = DBSession.query(Layer).\ filter_by(name='TestLayer2').one() self.assertEqual(test_layer_2.name, 'TestLayer2') self.assertEqual(len(test_layer_2.mappable_points), 3) def test_emu_fixure_loaded(self): test_emu_layer = DBSession.query(Layer).\ filter_by(name='Emu').one() self.assertGreater(len(test_emu_layer.mappable_points), 5) def test_get_layer_points_as_geo_json(self): test_layer_1 = DBSession.query(Layer).filter_by(name='TestLayer1').one() test_layer_2 = DBSession.query(Layer).filter_by(name='TestLayer2').one() q = MappablePoint.get_points_as_geojson(test_layer_1) result = q.all() self.assertEqual(len(result), 2) # self.assertEqual(result[0].locations, '{"type":"MultiPoint","coordinates":[[20,10]]}') # self.assertEqual(result[1].locations, '{"type":"MultiPoint","coordinates":[[30,10]]}') q2 = MappablePoint.get_points_as_geojson(test_layer_2) result2 = q2.all() self.assertEqual(len(result2), 2) self.assertEqual(result2[0].centroid, '{"type":"Point","coordinates":[10,15]}') self.assertEqual(result2[1].centroid, '{"type":"Point","coordinates":[30,15]}') # self.assertEqual(result2[0].locations, '{"type":"MultiPoint","coordinates":[[10,15],[10,15]]}') # self.assertEqual(result2[1].locations, '{"type":"MultiPoint","coordinates":[[30,15]]}') def test_get_layer_points_as_wkt(self): test_layer_1 = DBSession.query(Layer).filter_by(name='TestLayer1').one() q = MappablePoint.get_points_as_wkt(test_layer_1) result = q.all() self.assertEqual(len(result), 2) self.assertEqual(result[0].centroid, 'POINT(20 10)') # self.assertEqual(result[0].locations, 'MULTIPOINT(20 10)') self.assertEqual(result[1].centroid, 'POINT(30 10)') # self.assertEqual(result[1].locations, 'MULTIPOINT(30 10)') # SELECT ST_AsGeoJSON(location) from mappable_points WHERE location && ST_MakeEnvelope(-20,-20,20,20); # Each individual point as GeoJSON # SELECT ST_AsGeoJSON(location) from mappable_points; # GeoJSON of clusters snapped to grid within an envelope # SELECT ST_AsGeoJSON(ST_COLLECT(location)) from mappable_points WHERE location && ST_MakeEnvelope(-20,-20,20,20) GROUP BY ST_SNAPTOGRID(location, 1); # Centroid of clusters snapped to grid # SELECT ST_AsText(ST_Centroid(ST_COLLECT(location))) from mappable_points GROUP BY ST_SNAPTOGRID(location, 1); # All points as one collection as GeoJSON # SELECT ST_AsGeoJSON(ST_Collect(location)) from mappable_points;
mit
6,776,243,312,608,989,000
35.698113
150
0.581662
false
fnoeding/exoself
src/compiler/ast2llvm.py
1
36178
# # The BSD License # # Copyright (c) 2008, Florian Noeding # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # Redistributions in binary form must reproduce the above copyright notice, this # list of conditions and the following disclaimer in the documentation and/or # other materials provided with the distribution. # Neither the name of the of the author nor the names of its contributors may be # used to endorse or promote products derived from this software without specific # prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # from __future__ import with_statement import setuppaths from llvm import * from llvm.core import * from llvm.ee import * import os.path import re from esfunction import ESFunction from esvalue import ESValue from esvariable import ESVariable from estype import ESType from errors import * import astwalker from tree import Tree, TreeType import typeannotator import llvmdebug class ModuleTranslator(astwalker.ASTWalker): def _addHelperFunctionsPreTranslation(self): # int puts(char *); returnTypes = [self._findSymbol(name=u'int32', type_=ESType)] paramTypes = [self._findSymbol(name=u'int8', type_=ESType).derivePointer()] esType = ESType.createFunction(returnTypes, paramTypes) esFunc = ESFunction(u'puts', '', '', esType, [u's'], mangling='C', linkage='extern') self._addSymbol(name=u'puts', symbol=esFunc) type = esType.toLLVMType() func = self._module.add_function(type, 'puts') # void abort(); returnTypes = [self._findSymbol(name=u'void', type_=ESType)] paramTypes = [] esType = ESType.createFunction(returnTypes, paramTypes) esFunc = ESFunction(u'abort', '', '', esType, [], mangling='C', linkage='extern') type = esType.toLLVMType() func = self._module.add_function(type, 'abort') def _addHelperFunctionsPostTranslation(self): # if this module contains a main function emit code which will call it flist = self._findSymbol(name=u'main', type_=ESFunction, mayFail=True) if flist: assert(len(flist) == 1) esMain = flist[0] s = [] s.append('The main function defined in this module has an unsupported signature.') s.append('supported signatures:') s.append('\tdef main() as int32') s.append('\tdef main() as void') int32 = self._findSymbol(name=u'int32', type_=ESType) void = self._findSymbol(name=u'void', type_=ESType) validA = ESType.createFunction([int32], []) validB = ESType.createFunction([void], []) ok = False for x in [validA, validB]: if x.isEquivalentTo(esMain.esType, False): ok = True if not ok: self._raiseException(RecoverableCompileError, postText=s) # has arguments? if len(esMain.esType.getFunctionParameterTypes()) == 0: functionType= Type.function(Type.int(32), []) function = self._module.add_function(functionType, 'main') entryBB = function.append_basic_block('entry') BB = function.append_basic_block('bb') b = Builder.new(entryBB) if self._debugMode: dbgSubProg = self._debugInfoBuilder.addFunctionInfoStart(module=self._module, builder=b, lineNumber=0, name='main', displayName='main') b.branch(BB) b = Builder.new(BB) r = b.call(esMain.llvmRef, []) retTypes = esMain.esType.getFunctionReturnTypes() assert(len(retTypes) == 1) if retTypes[0].toLLVMType() != Type.void(): b.ret(r) else: b.ret(Constant.int(Type.int(32), 0)) if self._debugMode: self._debugInfoBuilder.addFunctionInfoEnd(module=self._module, builder=b, subprogram=dbgSubProg) else: # TODO implement version with parameters self._raiseException(RecoverableCompileError, postText=s) def _addModuleXTors(self): # create *appending* global_ctors and global_dtors variables ft = Type.pointer(Type.function(Type.void(), [])) st = Type.struct([Type.int(32), ft]) def addXTors(xtors, what): assert what in ['ctors', 'dtors'] if not xtors: return t = Type.array(st, len(xtors)) gvar = self._module.add_global_variable(t, 'llvm.global_%s' % what) gvar.linkage = LINKAGE_APPENDING elems = [] for x in xtors: prio = Constant.int(Type.int(32), 65535) func = x.llvmRef elems.append(Constant.struct([prio, func])) init = Constant.array(st, elems) gvar.initializer = init addXTors(self._moduleCTors, 'ctors') addXTors(self._moduleDTors, 'dtors') def _setupDebugInformation(self): if not self._debugMode: return self._debugInfoBuilder = llvmdebug.DebugInfoBuilder() self._debugInfoBuilder.setupModule(self._module, self._targetData) self._debugInfoBuilder.addGlobalInfo(self._module) self._debugInfoBuilder.addCompileUnitInfo(self._module, self._filename) def _findCurrentFunction(self): for x in reversed(self._nodes): if x.type == TreeType.DEFFUNC: return x.esFunction assert(0 and 'no function found - type checker should have prevented this!') def _onModuleStart(self, ast, packageName, moduleName, statements): self._errors = 0 self._warnings = 0 self._module = Module.new(ast.moduleName) self._moduleNode = ast self._moduleCTors = ast.moduleCTors self._moduleDTors = ast.moduleDTors # setup target and data layout self._targetData = TargetData.new('e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128')# FIXME; this is just the llvm-gcc default for x86_64-unknown-linux-gnu self._module.data_layout = str(self._targetData) # FIXME self._module.target = 'x86_64-unknown-linux-gnu' # FIXME # setup debug Info self._setupDebugInformation() # add some helper functions / prototypes / ... to the module self._addHelperFunctionsPreTranslation() # first add global variables for x in statements: if x.type == TreeType.DEFGLOBAL: self._dispatch(x) # imported global variables don't have llvmRef entries: add them for k, v in ast.symbolTable.getAllSymbols().iteritems(): if not isinstance(v, ESVariable): continue if getattr(v, 'llvmRef', None): continue # can't reuse _onDefGlobal at the moment, since we need to declare an "extern" global variable llvmType = v.toLLVMType() mangledName = v.mangleName() v.llvmRef = self._module.add_global_variable(llvmType, mangledName) # use default linkage: external # translate for x in statements: # skip globals, as they were already handled above if x.type == TreeType.DEFGLOBAL: continue try: self._dispatch(x) except RecoverableCompileError, e: print e.message.rstrip() self._errors += 1 except CompileError, e: print e.message.rstrip() self._errors += 1 break if self._errors: raise CompileError('errors occured during compilation: aborting') # set module ctors, dtors self._addModuleXTors() # finally add some more helper functions / prototypes / ... to the module self._addHelperFunctionsPostTranslation() def _onImportAll(self, ast, moduleName): pass def _onDefFunction(self, ast, modifierKeys, modifierValues, name, returnTypeName, parameterNames, parameterTypeNames, block): esFunction = ast.esFunction esType = esFunction.esType try: # make really sure there is no function with this name llvmRef = self._module.get_function_named(esFunction.mangledName) except LLVMException: llvmRef = None if llvmRef: if not llvmRef.is_declaration: s1 = 'mangled name already in use: %s' % esFunction.mangledName s2 ='This can be caused by defining a function with the same signature multiple times. If that\'s not the case please submit a bugreport with a testcase.' self._raiseException(CompileError, tree=ast.getChild(1), inlineText=s1, postText=s2) else: llvmRef = self._module.add_function(esType.toLLVMType(), esFunction.mangledName) esFunction.llvmRef = llvmRef # provide access through symbol table ast.llvmRef = llvmRef # provide direct access through ast node # set parameter names for i,x in enumerate(parameterNames): llvmRef.args[i].name = x.text if not block: return entryBB = llvmRef.append_basic_block('entry') bEntry = Builder.new(entryBB) if self._debugMode: dbgSubProg = self._debugInfoBuilder.addFunctionInfoStart(module=self._module, builder=bEntry, lineNumber=ast.line, name=esFunction.name, displayName=esFunction.name) ast.dbgSubProg = dbgSubProg # add variables for i,x in enumerate(parameterNames): var = self._findSymbol(name=x.text, type_=ESVariable) var.llvmRef = self._createAllocaForVar(x.text, var.toLLVMType(), llvmRef.args[i]) if self._debugMode: self._debugInfoBuilder.addLocalVariableInfo(module=self._module, builder=bEntry, llvmRef=var.llvmRef, esType=var.esType, subprogram=dbgSubProg, name=x.text, lineNumber=x.line, varType='arg') # branch from entry to real code block and dispatch function body bb = llvmRef.append_basic_block('bb') self._currentBuilder = Builder.new(bb) bEntry.branch(bb) self._dispatch(block) returnTypes = esFunction.esType.getFunctionReturnTypes() bb = self._currentBuilder.block if not (bb.instructions and bb.instructions[-1].is_terminator): if len(returnTypes) == 1 and returnTypes[0].toLLVMType() == Type.void(): self._currentBuilder.ret_void() else: s = self._generateContext(preText='warning:', postText='control flow possibly reaches end of non-void function. Inserting trap instruction...', lineBase1=block.line, numAfter=3) trapFunc = Function.intrinsic(self._module, INTR_TRAP, []); self._currentBuilder.call(trapFunc, []) self._currentBuilder.ret(Constant.int(Type.int(32), -1)) # and return, otherwise func.verify will fail if self._debugMode: self._debugInfoBuilder.addFunctionInfoEnd(module=self._module, builder=self._currentBuilder, subprogram=dbgSubProg) llvmRef.verify() def _onBlock(self, ast, blockContent): for x in blockContent: if self._debugMode: self._debugInfoBuilder.addStopPoint(self._module, self._currentBuilder, x.line, x.charPos) self._dispatch(x) def _onReturn(self, ast, expressions): esFunction = None for n in reversed(self._nodes): if n.type == TreeType.DEFFUNC: esFunction = n.esFunction break assert(esFunction) returnTypes = esFunction.esType.getFunctionReturnTypes() assert(len(returnTypes) == 1) if returnTypes[0].toLLVMType() == Type.void(): assert(not expressions) self._currentBuilder.ret_void() else: self._dispatch(expressions[0]) llvmValue = expressions[0].llvmValue self._currentBuilder.ret(llvmValue) def _onAssert(self, ast, expression): self._dispatch(expression) # TODO add a compiler switch to disable asserts, so they become noop's # TODO add a compiler switch to disable inclusion of context data # if value is statically available bail out now / warn # this does not work... investigate later #if value == Constant.int(Type.int(1), 0): # print 'assert is always False in %s:%d' % ('???', ast.line()) # find current function llvmFunc = self._findCurrentFunction().llvmRef # now implement an if thenBB = llvmFunc.append_basic_block('assert_true') # trap path elseBB = llvmFunc.append_basic_block('assert_false') cond = self._currentBuilder.not_(expression.llvmValue) self._currentBuilder.cbranch(cond, thenBB, elseBB) thenBuilder = Builder.new(thenBB) # build error string if ast.line: errorStringConst = 'assert failed! file %s line %d:\n' % (self._filename, ast.line) start = max(ast.line - 1 - 5, 0) stop = min(ast.line - 1 + 1, len(self._sourcecodeLines)) for i in range(start, stop): errorStringConst += '% 5d: %s' % (i + 1, self._sourcecodeLines[i]) if i != stop - 1: errorStringConst += '\n' errorStringConst += ' # <----- failed\n' else: errorStringConst = '(unknown) assert failed!' errorStringConst = Constant.stringz(errorStringConst); errorString = self._module.add_global_variable(errorStringConst.type, 'assertErrorString') errorString.initializer = errorStringConst errorString.global_constant = True word = self._findSymbol(name=u'word', type_=ESType).toLLVMType() idx = [Constant.int(word, 0), Constant.int(word, 0)] errorStringGEP = errorString.gep(idx) puts = self._module.get_function_named('puts') thenBuilder.call(puts, [errorStringGEP]) # emit abort abortFunc = self._module.get_function_named('abort') thenBuilder.call(abortFunc, []) thenBuilder.branch(elseBB) # we'll never get here - but create proper structure of IR self._currentBuilder = Builder.new(elseBB) def _onIf(self, ast, expressions, blocks, elseBlock): llvmFunc = self._findCurrentFunction().llvmRef mergeBB = llvmFunc.append_basic_block('if_merge') for i in range(len(expressions)): thenBB = llvmFunc.append_basic_block('if_then') elseBB = llvmFunc.append_basic_block('if_else') self._dispatch(expressions[i]) self._currentBuilder.cbranch(expressions[i].llvmValue, thenBB, elseBB) # generate code for then branch self._currentBuilder = Builder.new(thenBB) self._dispatch(blocks[i]) # branch to mergeBB, but only if there was no terminator instruction currentBB = self._currentBuilder.block if not (currentBB.instructions and currentBB.instructions[-1].is_terminator): self._currentBuilder.branch(mergeBB) # continue with next else if / else self._currentBuilder = Builder.new(elseBB) if elseBlock: self._dispatch(elseBlock) # close last elseBB currentBB = self._currentBuilder.block if not (currentBB.instructions and currentBB.instructions[-1].is_terminator): self._currentBuilder.branch(mergeBB) # continue in mergeBB self._currentBuilder = Builder.new(mergeBB) def _onFor(self, ast, variableName, rangeStart, rangeStop, rangeStep, block): if rangeStart: self._dispatch(rangeStart) start = rangeStart.llvmValue else: start = Constant.int(Type.int(32), 0) # FIXME allow other types self._dispatch(rangeStop) stop = rangeStop.llvmValue if rangeStep: self._dispatch(rangeStep) step = rangeStep.llvmValue else: step = Constant.int(Type.int(32), 1) # FIXME allow other types inductVar = self._findSymbol(fromTree=variableName, type_=ESVariable) if not hasattr(inductVar, 'llvmRef'): inductVar.llvmRef = self._createAllocaForVar(variableName.text, inductVar.esType.toLLVMType()) # setup loop by initializing induction variable self._currentBuilder.store(start, inductVar.llvmRef) # create blocks llvmFunc = self._findCurrentFunction().llvmRef headBB = llvmFunc.append_basic_block('head') # decide between Up and Down headDownBB = llvmFunc.append_basic_block('headDown') headUpBB = llvmFunc.append_basic_block('headUp') bodyBB = llvmFunc.append_basic_block('body') stepBB = llvmFunc.append_basic_block('step') # TODO: think about implementing an 'else' block, that gets called when the loop does not get executed mergeBB = llvmFunc.append_basic_block('merge') self._currentBuilder.branch(headBB) # setup continue / break targets ast.breakTarget = mergeBB ast.continueTarget = stepBB # count up or down? b = Builder.new(headBB) cond = b.icmp(IPRED_SGT, step, Constant.int(step.type, 0)) b.cbranch(cond, headUpBB, headDownBB) # count down check b = Builder.new(headDownBB) cond = b.icmp(IPRED_SGT, b.load(inductVar.llvmRef), stop) b.cbranch(cond, bodyBB, mergeBB) # count up check b = Builder.new(headUpBB) cond = b.icmp(IPRED_SLT, b.load(inductVar.llvmRef), stop) b.cbranch(cond, bodyBB, mergeBB) # build loop body self._currentBuilder = Builder.new(bodyBB) self._dispatch(block) # end loop body with branch to stepBB self._currentBuilder.branch(stepBB) # now increment inductVar and branch back to head for another round b = Builder.new(stepBB) r = b.add(b.load(inductVar.llvmRef), step) b.store(r, inductVar.llvmRef) b.branch(headBB) # done! continue outside loop body self._currentBuilder = Builder.new(mergeBB) def _onWhile(self, ast, expression, block): # create blocks llvmFunc = self._findCurrentFunction().llvmRef headBB = llvmFunc.append_basic_block('head') bodyBB = llvmFunc.append_basic_block('body') mergeBB = llvmFunc.append_basic_block('merge') # branch to headBB / enter loop self._currentBuilder.branch(headBB) # create test self._currentBuilder = Builder.new(headBB) self._dispatch(expression) self._currentBuilder.cbranch(expression.llvmValue, bodyBB, mergeBB) # build body self._currentBuilder = Builder.new(bodyBB) ast.breakTarget = mergeBB ast.continueTarget = headBB self._dispatch(block) self._currentBuilder.branch(headBB) # continue with mergeBB self._currentBuilder = Builder.new(mergeBB) def _onBreak(self, ast): target = None for n in reversed(self._nodes): if hasattr(n, 'breakTarget'): target = n.breakTarget break assert(target and 'type checker should make sure that there is a break target') self._currentBuilder.branch(target) def _onContinue(self, ast): target = None for n in reversed(self._nodes): if hasattr(n, 'continueTarget'): target = n.continueTarget break assert(target and 'type checker should make sure that there is a break target') self._currentBuilder.branch(target) def _onPass(self, ast): pass def _onIntegerConstant(self, ast, value, suffix): ast.llvmValue = Constant.int(ast.esType.toLLVMType(), value) def _onFloatConstant(self, ast, value, suffix): ast.llvmValue = Constant.real(ast.esType.toLLVMType(), str(value)) def _onStringConstant(self, ast, constant): # FIXME s = constant.text assert(s.startswith('ar"')) s = s[3:-1] stringConst = Constant.stringz(s) string = self._module.add_global_variable(stringConst.type, 'internalStringConstant') string.initializer = stringConst string.global_constant = True string.linkage = LINKAGE_INTERNAL word = self._findSymbol(name=u'word', type_=ESType).toLLVMType() idx = [Constant.int(word, 0), Constant.int(word, 0)] ast.llvmValue = string.gep(idx) def _onVariable(self, ast, variableName): # first try to find a function (which resolves to it's address), then a normal variable flist = self._findSymbol(fromTree=variableName, type_=ESFunction, mayFail=True) if flist: if len(flist) > 1: self._raiseException(RecoverableCompileError, tree=variableName, inlineText='taking the address of a overloaded function is not implemented, yet') f = flist[0] ast.llvmValue = f.llvmRef ast.llvmRef = f.llvmRef else: var = self._findSymbol(fromTree=variableName, type_=ESVariable) ast.llvmValue = self._currentBuilder.load(var.llvmRef) ast.llvmRef = var.llvmRef def _createAllocaForVar(self, name, llvmType, value=None): # FIXME if llvmType.kind == TYPE_INTEGER: defaultValue = Constant.int(llvmType, 0) elif llvmType.kind in [TYPE_FLOAT, TYPE_DOUBLE]: defaultValue = Constant.real(llvmType, 0) elif llvmType.kind == TYPE_POINTER: defaultValue = Constant.null(llvmType) elif llvmType.kind == TYPE_STRUCT: defaultValue= Constant.null(llvmType) else: assert(0 and 'unsupported variable type') if value == None: value = defaultValue # use the usual LLVM pattern to create mutable variables: use alloca # important: the mem2reg pass is limited to analyzing the entry block of functions, # so all variables must be defined there llvmFunc = self._findCurrentFunction().llvmRef entryBB = llvmFunc.get_entry_basic_block() entryBuilder = Builder.new(entryBB) entryBuilder.position_at_beginning(entryBB) ref = entryBuilder.alloca(llvmType, name) entryBuilder.store(value, ref) return ref def _onDefVariable(self, ast, variableName, typeName): var = self._findSymbol(fromTree=variableName, type_=ESVariable) var.llvmRef = self._createAllocaForVar(variableName.text, var.esType.toLLVMType()) if self._debugMode: # first find reference to dbgSubProg / enclosing function ast node dbgSubProg = None for n in reversed(self._nodes): if hasattr(n, 'dbgSubProg'): dbgSubProg = n.dbgSubProg break assert(dbgSubProg and '_onDefVariable works only inside functions') self._debugInfoBuilder.addLocalVariableInfo(module=self._module, builder=self._currentBuilder, llvmRef=var.llvmRef, esType=var.esType, subprogram=dbgSubProg, name=variableName.text, lineNumber=variableName.line, varType='auto') def _onDefGlobal(self, ast, variableName, typeName, expression): var = self._findSymbol(fromTree=variableName, type_=ESVariable) llvmType = var.toLLVMType() mangledName = var.mangleName() # FIXME use name mangling! var.llvmRef = self._module.add_global_variable(llvmType, mangledName) llvmRef = var.llvmRef #llvmRef.linkage = LINKAGE_COMMON if typeName: llvmRef.initializer = Constant.null(llvmType) else: try: self._dispatch(expression) llvmRef.initializer = expression.llvmValue except AttributeError, ae: assert('_currentBuilder' in str(ae)) # TODO replace with check based on AST self._raiseException(RecoverableCompileError, tree=expression, inlineText='expected trivial constant expression') def _onCallFunc(self, ast, calleeName, expressions): params = [] for x in expressions: self._dispatch(x) params.append(x.llvmValue) esFunction = ast.esFunction llvmFunc = getattr(esFunction, 'llvmRef', None) if not llvmFunc: # try to find function in this module try: llvmFunc = self._module.get_function_named(esFunction.mangledName) except LLVMException: llvmFunc = None if not llvmFunc: # was callee a function pointer? esVariable = self._findSymbol(fromTree=calleeName, type_=ESVariable, mayFail=True) if esVariable: llvmFunc = self._currentBuilder.load(esVariable.llvmRef) else: # function was not declared, yet... llvmFunc = self._module.add_function(esFunction.esType.toLLVMType(), esFunction.mangledName) ast.llvmValue = self._currentBuilder.call(llvmFunc, params) def _onBasicOperator(self, ast, op, arg1, arg2): tt = TreeType # arg1 is always valid, arg2 may be None self._dispatch(arg1) if arg2: self._dispatch(arg2) if op == tt.PLUS: if arg2: ast.llvmValue = self._currentBuilder.add(arg1.llvmValue, arg2.llvmValue) else: ast.llvmValue = arg1.llvmValue elif op == tt.MINUS: if arg2: ast.llvmValue = self._currentBuilder.sub(arg1.llvmValue, arg2.llvmValue) else: ast.llvmValue = self._currentBuilder.sub(Constant.null(arg1.llvmValue.type), arg1.llvmValue) elif op == tt.STAR: ast.llvmValue = self._currentBuilder.mul(arg1.llvmValue, arg2.llvmValue) elif op == tt.SLASH: if arg1.esType.isSignedInteger(): ast.llvmValue = self._currentBuilder.sdiv(arg1.llvmValue, arg2.llvmValue) elif arg1.esType.isUnsignedInteger(): ast.llvmValue = self._currentBuilder.udiv(arg1.llvmValue, arg2.llvmValue) elif arg1.esType.isFloatingPoint(): ast.llvmValue = self._currentBuilder.fdiv(arg1.llvmValue, arg2.llvmValue) else: raise NotImplementedError('FIXME? TODO?') elif op == tt.PERCENT: if arg1.esType.isSignedInteger(): ast.llvmValue = self._currentBuilder.srem(arg1.llvmValue, arg2.llvmValue) elif arg1.esType.isUnsignedInteger(): ast.llvmValue = self._currentBuilder.urem(arg1.llvmValue, arg2.llvmValue) elif arg1.esType.isFloatingPoint(): ast.llvmValue = self._currentBuilder.frem(arg1.llvmValue, arg2.llvmValue) else: raise NotImplementedError('TODO') elif op == tt.NOT: ast.llvmValue = self._currentBuilder.not_(arg1.llvmValue) elif op == tt.AND: ast.llvmValue = self._currentBuilder.and_(arg1.llvmValue, arg2.llvmValue) elif op == tt.OR: ast.llvmValue = self._currentBuilder.or_(arg1.llvmValue, arg2.llvmValue) elif op == tt.XOR: ast.llvmValue = self._currentBuilder.xor(arg1.llvmValue, arg2.llvmValue) elif op in [tt.LESS, tt.LESSEQUAL, tt.EQUAL, tt.NOTEQUAL, tt.GREATEREQUAL, tt.GREATER]: if arg1.esType.isSignedInteger() and arg2.esType.isSignedInteger(): preds = {} preds[tt.LESS] = IPRED_SLT preds[tt.LESSEQUAL] = IPRED_SLE preds[tt.EQUAL] = IPRED_EQ preds[tt.NOTEQUAL] = IPRED_NE preds[tt.GREATEREQUAL] = IPRED_SGE preds[tt.GREATER] = IPRED_SGT ast.llvmValue = self._currentBuilder.icmp(preds[op], arg1.llvmValue, arg2.llvmValue) elif (arg1.esType.isUnsignedInteger() and arg2.esType.isUnsignedInteger()) or (arg1.esType.isPointer() and arg2.esType.isPointer()): preds = {} preds[tt.LESS] = IPRED_ULT preds[tt.LESSEQUAL] = IPRED_ULE preds[tt.EQUAL] = IPRED_EQ preds[tt.NOTEQUAL] = IPRED_NE preds[tt.GREATEREQUAL] = IPRED_UGE preds[tt.GREATER] = IPRED_UGT ast.llvmValue = self._currentBuilder.icmp(preds[op], arg1.llvmValue, arg2.llvmValue) elif arg1.esType.isBoolean() and arg2.esType.isBoolean(): preds = {} preds[tt.EQUAL] = IPRED_EQ preds[tt.NOTEQUAL] = IPRED_NE ast.llvmValue = self._currentBuilder.icmp(preds[op], arg1.llvmValue, arg2.llvmValue) elif arg1.esType.isFloatingPoint() and arg2.esType.isFloatingPoint(): # TODO think about ordered and unordered comparisions... # for now ordered preds = {} preds[tt.LESS] = RPRED_OLT preds[tt.LESSEQUAL] = RPRED_OLE preds[tt.EQUAL] = RPRED_OEQ preds[tt.NOTEQUAL] = RPRED_ONE preds[tt.GREATEREQUAL] = RPRED_OGE preds[tt.GREATER] = RPRED_OGT ast.llvmValue = self._currentBuilder.fcmp(preds[op], arg1.llvmValue, arg2.llvmValue) else: print arg1.esType, arg2.esType raise NotImplementedError('TODO') elif op == tt.DOUBLESTAR: if arg2.llvmValue.type.kind == TYPE_INTEGER: # powi powiFunc = Function.intrinsic(self._module, INTR_POWI, [arg1.llvmValue.type]) ast.llvmValue = self._currentBuilder.call(powiFunc, [arg1.llvmValue, arg2.llvmValue]) else: # pow raise NotImplementedError('TODO') else: raise NotImplementedError('operator not implemented: %s / "%s"' % (op, ast.text)) def _simpleAssignment(self, var, llvmValue): if not hasattr(var, 'llvmRef'): # does not have an associated alloca, yet # we MUST NOT pass a value to _createAllocaForVar! That value is not available in the entry BB! var.llvmRef = self._createAllocaForVar(var.name, var.esType.toLLVMType()) if self._debugMode: # first find reference to dbgSubProg / enclosing function ast node dbgSubProg = None for n in reversed(self._nodes): if hasattr(n, 'dbgSubProg'): dbgSubProg = n.dbgSubProg break assert(dbgSubProg and '_onDefVariable works only inside functions') self._debugInfoBuilder.addLocalVariableInfo(module=self._module, builder=self._currentBuilder, llvmRef=var.llvmRef, esType=var.esType, subprogram=dbgSubProg, name=var.name, lineNumber=0, varType='auto') # FIXME fix line number self._currentBuilder.store(llvmValue, var.llvmRef) def _onAssign(self, ast, assigneeExpr, expression): self._dispatch(expression) # FIXME if assigneeExpr.type == TreeType.VARIABLE: variableName = assigneeExpr.children[0] var = self._findSymbol(fromTree=variableName, type_=ESVariable) self._simpleAssignment(var, expression.llvmValue) elif assigneeExpr.type == TreeType.DEREFERENCE: self._dispatch(assigneeExpr) #variableName = assigneeExpr.children[0] #var = self._findSymbol(fremTree=variableName, type_=ESVariable) self._currentBuilder.store(expression.llvmValue, assigneeExpr.llvmRef) else: assert(0 and 'FIXME? TODO?') def _onListAssign(self, ast, variableNames, expressions): # use a very simple aproach: # copy source variables into temporary variables # copy data from temporary variables to destination variables # this avoids difficult cases like: a,b = b,a or a,b,c = b,b,b # but a,b = c,d is a bit slower - but the optimizer should transform that to an efficient version # copy source -> temp temps = [] n = len(variableNames) assert(n == len(expressions)) for i in range(n): self._dispatch(expressions[i]) ref = self._currentBuilder.alloca(expressions[i].esType.toLLVMType(), u'listassign_tmp') self._currentBuilder.store(expressions[i].llvmValue, ref) esVar = ESVariable(u'listassign_tmp', '__local', '__local', expressions[i].esType) # TODO insert real pkg / module names esVar.llvmRef = ref temps.append(esVar) # copy temp -> destination # this is a simple assignment for i in range(n): if variableNames[i].type == TreeType.VARIABLE: var = self._findSymbol(fromTree=variableNames[i].children[0], type_=ESVariable) value = self._currentBuilder.load(temps[i].llvmRef) self._simpleAssignment(var, value) else: assert(0 and 'TODO') def _onCast(self, ast, expression, typeName): self._dispatch(expression) bool = self._findSymbol(name=u'bool', type_=ESType) targetT = ast.esType sourceT = expression.esType if targetT.isEquivalentTo(sourceT, True):# may be really the same or only structurally # FIXME TODO is this correct??? ast.llvmValue = expression.llvmValue return bad = False if targetT.isEquivalentTo(bool, False): if sourceT.isSignedInteger() or sourceT.isUnsignedInteger(): ast.llvmValue = self._currentBuilder.icmp(IPRED_NE, expression.llvmValue, Constant.int(expression.llvmValue.type, 0)) elif sourceT.isFloatingPoint(): # TODO think about ordered and unordered # for now use ordered ast.llvmValue = self._currentBuilder.fcmp(RPRED_ONE, expression.llvmValue, Constant.real(expression.llvmValue.type, '0')) else: bad = True elif targetT.isSignedInteger(): if sourceT.isEquivalentTo(bool, False): ast.llvmValue = self._currentBuilder.zext(expression.llvmValue, targetT.toLLVMType()) elif sourceT.isSignedInteger(): t = targetT.toLLVMType() s = sourceT.toLLVMType() tBits = t.width sBits = s.width if sBits > tBits: ast.llvmValue = self._currentBuilder.trunc(expression.llvmValue, t) elif sBits < tBits: ast.llvmValue = self._currentBuilder.sext(expression.llvmValue, t) else: assert(0 and 'dead code path; should have been caught by other checks!') elif sourceT.isFloatingPoint(): ast.llvmValue = self._currentBuilder.fptosi(expression.llvmValue, targetT.toLLVMType()) else: bad = True elif targetT.isUnsignedInteger(): if sourceT.isEquivalentTo(bool, False): bad = True # FIXME elif sourceT.isUnsignedInteger(): t = targetT.toLLVMType() s = sourceT.toLLVMType() tBits = t.width sBits = s.width if sBits > tBits: ast.llvmValue = self._currentBuilder.trunc(expression.llvmValue, t) elif sBits < tBits: ast.llvmValue = self._currentBuilder.zext(expression.llvmValue, t) else: assert(0 and 'dead code path; should have been caught by other checks!') elif sourceT.isSignedInteger(): t = targetT.toLLVMType() s = sourceT.toLLVMType() tBits = t.width sBits = s.width if sBits > tBits: raise NotImplementedError() elif sBits < tBits: ast.llvmValue = self._currentBuilder.sext(expression.llvmValue, t) else: # FIXME??? ast.llvmValue = expression.llvmValue else: bad = True elif targetT.isFloatingPoint(): if sourceT.isSignedInteger(): ast.llvmValue = self._currentBuilder.sitofp(expression.llvmValue, targetT.toLLVMType()) elif sourceT.isUnsignedInteger(): ast.llvmValue = self._currentBuilder.uitofp(expression.llvmValue, targetT.toLLVMType()) else: bad = True elif targetT.isPointer(): if sourceT.isPointer(): ast.llvmValue = self._currentBuilder.bitcast(expression.llvmValue, targetT.toLLVMType()) #ast.llvmValue = expression.llvmValue else: bad = True else: bad = True if bad: raise NotImplementedError('cast from %s to %s is not yet supported' % (sourceT, targetT)) def _onDereference(self, ast, expression, indexExpression): self._dispatch(expression) # we have a problem: The derefencing is ambiguous # either we want to load a value from memory --> we need ast.llvmValue # or we want to store a value to memory --> we need ast.llvmRef # when storing data to memory the load is wasteful - but it's result never get's used # so the optimizer will remove it # for now stay stay with the inefficient code... word = self._findSymbol(name=u'word', type_=ESType).toLLVMType() esType = expression.esType if esType.isPointer(): if indexExpression: self._dispatch(indexExpression) if indexExpression.llvmValue.type != word: llvmValue = indexExpression.llvmValue llvmType = llvmValue.type if llvmType.kind != TYPE_INTEGER: self._raiseException(RecoverableCompileError, tree=indexExpression, inlineText='index type must be integer') if llvmType.width == 32 and word.width == 64: llvmValue = self._currentBuilder.sext(llvmValue, word) elif llvmType.width == 64 and word.width == 32: self._raise(RecoverableCompileError, tree=indexExpression, inlineText='the target architecture only supports 32 bit indices') else: assert(0 and 'FIXME or should this never happen --> dead code path?') else: llvmValue = indexExpression.llvmValue idx = [llvmValue] else: idx = [Constant.int(word, 0)] toDeref = expression.llvmValue elif esType.isStruct(): if indexExpression.type == TreeType.NAME: memberIdx = esType.getStructMemberIndexByName(indexExpression.text) idx = [Constant.int(word, 0), Constant.int(Type.int(32), memberIdx)] else: raise NotImplementedError('TODO') toDeref = expression.llvmRef else: assert(0 and 'dead code path') # every variable is an alloca --> first get the real memory address realAddrWithOffset = self._currentBuilder.gep(toDeref, idx) ast.llvmRef = realAddrWithOffset # now load data from it ast.llvmValue = self._currentBuilder.load(realAddrWithOffset) def _onAlias(self, ast, name, typeName): pass def _onTypedef(self, ast, name, typeName): pass def _onAddressOf(self, ast, expression): self._dispatch(expression) # see _onDereference for the ambigous use of this instruction ast.llvmRef = expression.llvmRef ast.llvmValue = expression.llvmRef def _onNew(self, ast, typeName, numExpr): if numExpr: self._dispatch(numExpr) numElements = numExpr.llvmValue else: numElements = Constant.int(Type.int(32), 1) ast.llvmValue = self._currentBuilder.malloc_array(ast.esType.dereference().toLLVMType(), numElements) def _onDefStruct(self, ast, name, members): self._module.add_type_name(name.text, ast.esType.toLLVMType()) def _onNoneConstant(self, ast): ast.llvmValue = Constant.null(Type.pointer(Type.int(8))) def _onBooleanConstant(self, ast, value): ast.llvmValue = Constant.int(Type.int(1), value) def walkAST(self, ast, absFilename, sourcecode='', debugMode=False): assert(ast.type == TreeType.MODULESTART) self._module = None self._debugMode = debugMode astwalker.ASTWalker.walkAST(self, ast, absFilename, sourcecode) self._module.verify() return self._module def run(module, function): mp = ModuleProvider.new(module) ee = ExecutionEngine.new(mp) return ee.run_function(function, [])
bsd-3-clause
6,913,820,680,705,112,000
31.330652
238
0.71842
false
sinaptix/learnpython
testers/lab00/lab00_tester.py
1
1734
#!/usr/bin/python import subprocess import sys import os import filecmp mycmd = "python" myarg = "hello.py" student_out = 'student_out' student_err = 'student_err' golden = 'golden.txt' f_student_out = open(student_out, 'w') f_student_err = open(student_err, 'w') # Execute program try: retcode = subprocess.call(mycmd + ' ' + myarg, stdout=f_student_out, stderr=f_student_err) if retcode < 0: print("Child was terminated by signal", -retcode, file=f_student_stderr); else: if retcode != 0: print("Child returned", retcode, file=f_student_stderr); except OSError as e: print("Execution failed:", e, file=f_student_stderr); f_student_out.close() f_student_err.close() #sys.stdout.close() #sys.stderr.close() # Clean pre-existing pass/fail files try: os.remove('pass') except OSError: pass try: os.remove('fail') except OSError: pass # Check outputs if retcode != 0: passfail = 'fail' else: f_student_out = open(student_out, 'r') f_golden = open('golden.txt', 'r') set_student = set(f_student_out) set_golden = set(f_golden) same = set_student.intersection(set_golden); same.discard('\n') #diff = set_student.difference(set_golden).union(set_golden.difference(set_student)) # Writes in order {Golden, Student} diff = set_golden.difference(set_student).union(set_student.difference(set_golden)) diff.discard('\n') f_results = open('results.txt', 'w') print("Sames(%d):%s"%(len(same), same), file = f_results) print("Diffs(%d):%s"%(len(diff), diff), file = f_results) if (len(diff) == 0): passfail = 'pass' else: passfail = 'fail' f_passfail = open(passfail, 'w') f_passfail.close()
gpl-2.0
8,573,284,218,161,393,000
22.753425
94
0.643022
false
jkarabas/snarkx
snarkx/io/miscs.py
1
6382
import os import re import networkx as nx import string from os.path import abspath, expanduser, isdir, join, isfile, splitext from os import walk from random import randint from snarkx.common import FrozenDic from snarkx.io import GraphReaderBA, GraphReaderG6, GraphWriterBA, GraphWriterG6, GraphReaderS6, GraphWriterS6 __author__ = 'Jan Karabas' __project__ = 'snarkx' __all__ = [ 'IO_READERS', 'IO_WRITERS', 'file_name_hint', 'graph_string', 'traverse_directory', 'walk_path', ] IO_READERS = FrozenDic({ 'ba': GraphReaderBA, 'g6': GraphReaderG6, 's6': GraphReaderS6, }) IO_WRITERS = FrozenDic({ 'ba': GraphWriterBA, 'g6': GraphWriterG6, 's6': GraphWriterS6, }) def graph_string(gph: nx.Graph) -> str: ret = '' for u in gph.nodes_iter(): ret += ('{0}: '.format(u)) ret += ('{0}\n'.format(', '.join(['{0}'.format(v) for v in gph.neighbors(u)]))) return ret def traverse_directory(path: str): """ :param path: :return: """ root_dir = abspath(expanduser(path)) files = [] if isdir(root_dir): for dirName, _, fileList in walk(root_dir): files.extend(fileList) break return [join(root_dir, f) for f in files] def random_name(n=6) -> str: """ Return a pseudorandom string of characters of `ascii_letters` and `digits` without a repetition. :param n: the length of the returned string (default: 6) :type n: int :return: a string of given length :rtype: str :raise AssertionError: if `n <= 0` """ assert n > 0 # FIXME: must raise if n > len(characters) characters = string.ascii_letters + string.digits _l = len(characters) _ret = '' _used = set() for i in range(n): _idx = randint(0, _l - 1) _c = characters[_idx] if not (_c in _used): _ret += _c _used.add(_c) return _ret def file_probably_ba(path: str) -> bool: """ Tests whether the the file on `path` exists and if it has one of expacted extensions (``.ba``, ``.ALL``, or at least two decimal digits). :param path: the file path to test :type path: str :return: `True` if the path points to a file in BA format, `False` otherwise """ if isfile(path): _regexps = [ re.compile('^\.\d\d\d*$', re.IGNORECASE), re.compile('^\.ALL$', re.IGNORECASE), re.compile('^\.BA$', re.IGNORECASE) ] _, _ext = splitext(path) for _re in _regexps: if not (_re.match(_ext) is None): return True return False def file_name_hint(path: str, reader: object) -> bool: """ The function test whether the path points to a file and if yes, it test whether it has adequate extension. Two file kinds of readers are accepted at the moment: `BA format` and `Graph6`. :type path: str :param path: the file path to test :type reader: object :param reader: either the reader class or a string defining the reader. Two literals are supported :return: ``True`` if the file exist and the ``path`` has one of the admissible extensions, ``False`` otherwise. Note that if the reader is recognised and the path points to a file, ``True`` is returned. """ if isfile(path): a_reader = None if isinstance(reader, str) and reader.lower() in IO_READERS: a_reader = IO_READERS[reader.lower()] else: a_reader = reader _, _ext = splitext(path) _regexps = None if not (hasattr(a_reader, 'kind')): pass elif a_reader.kind() == 'ba': _regexps = [ re.compile('^\.\d\d\d*$', re.IGNORECASE), re.compile('^\.ALL$', re.IGNORECASE), re.compile('^\.BA$', re.IGNORECASE) ] elif a_reader.kind() == 'g6': _regexps = [ re.compile('^\.g6$', re.IGNORECASE), ] elif a_reader.kind() == 's6': _regexps = [ re.compile('^\.s6$', re.IGNORECASE), ] else: # shall we log this; a warning should be thrown? pass if _regexps is None: return True # we have no limit on file name, beacuse reader kind is unknown else: for _re in _regexps: if not (_re.match(_ext) is None): return True return False def walk_path(path: (str, bytes), recurse=False, hint=None): """ A generator returning absolute paths to the files following the ``path``. If the object at ``path`` does not exist, returns the empty generator, if the object at ``path`` is a file, one-element generator is returned, traverse through director at ``path`` otherwise :param path: the path-like object :type path: str, bytes :param recurse: search recursively, or (if `False`) list the directory only :type recurse: bool :param hint: the value accepted by ``file_name_hint`` function. At this moment use either ``"g6"`` or ``"ba"``. Other values does not affect the generator (all files regardless the type hint are listed). :return: the generator object """ _path = os.path.abspath(os.path.expandvars(os.path.expanduser(path))) if not (isinstance(_path, (str, bytes))): raise StopIteration if os.path.isdir(_path): if recurse: ret = [] for _root, _, _filenames in os.walk(_path): for _name in _filenames: _p = os.path.join(_root, _name) if file_name_hint(_p, hint): yield _p # ret.append(_p) # return ret else: for _name in os.listdir(_path): _p = os.path.join(_path, _name) if file_name_hint(_p, hint): yield _p # return [os.path.join(_path, _x) for _x in os.listdir(_path) if # file_name_hint(os.path.join(_path, _x), hint)] else: if file_name_hint(_path, hint): # return [_path] yield _path if __name__ == '__main__': for _e in walk_path('~/.snarkx/snarkbase/whatever'): print(_e) print(walk_path('~/.snarkx/snarkbase'))
bsd-3-clause
421,645,369,970,011,000
28.141553
115
0.558446
false
bmaupin/junkpile
python/wordpress-mu/wpmu_usage_data.py
1
6008
#!/usr/bin/env python import MySQLdb import os import sys import wpmu home = os.getenv('HOME') daily_filename = '%s/Desktop/daily.txt' % (home) total_filename = '%s/Desktop/total.txt' % (home) daily_users_filename = '%s/Desktop/daily_users.txt' % (home) total_users_filename = '%s/Desktop/total_users.txt' % (home) blog_deletions = {'2009-06-03': 2350, '2009-07-01': 22, '2009-08-03': 9, '2009-09-01': 26, '2009-10-01': 47, '2009-11-05': 21} def main(): # output_daily_user_registrations() # output_total_users() output_daily_registrations() output_total_registrations() def get_daily_blog_registrations(): daily_blog_registrations = {} try: wpmu.cursor.execute("SELECT date_registered FROM wp_registration_log") result = wpmu.cursor.fetchall() for record in result: date_registered = record[0].strftime('%Y-%m-%d') if date_registered not in daily_blog_registrations: daily_blog_registrations[date_registered] = 0 daily_blog_registrations[date_registered] += 1 except MySQLdb.Error, error: # if there's an error sys.stderr.write('ERROR:\t%s\n' % (error)) # write to the error log sys.exit(1) # exit the script return daily_blog_registrations def get_daily_user_registrations(): daily_user_registrations = {} try: wpmu.cursor.execute("SELECT user_registered FROM wp_users") result = wpmu.cursor.fetchall() for record in result: user_registered = record[0].strftime('%Y-%m-%d') if user_registered not in daily_user_registrations: daily_user_registrations[user_registered] = 0 daily_user_registrations[user_registered] += 1 except MySQLdb.Error, error: # if there's an error sys.stderr.write('ERROR:\t%s\n' % (error)) # write to the error log sys.exit(1) # exit the script return daily_user_registrations def get_daily_registrations(): """The key for daily_registrations is the day, the value is a list where the first element is the number of blog registrations that day and the second is the number of user registrations that day """ daily_registrations = {} daily_blog_registrations = get_daily_blog_registrations() daily_user_registrations = get_daily_user_registrations() # update daily registrations with blog data for day in daily_blog_registrations: daily_registrations[day] = [] daily_registrations[day].append(daily_blog_registrations[day]) # update daily registrations with user data for day in daily_user_registrations: if day not in daily_registrations: daily_registrations[day] = [0] daily_registrations[day].append(daily_user_registrations[day]) # insert 0 values for missing user data for day in daily_registrations: if len(daily_registrations[day]) < 2: daily_registrations[day].append(0) return daily_registrations def output_daily_registrations(): daily_registrations = get_daily_registrations() daily_file = open(daily_filename, 'w') for day in sorted(daily_registrations): daily_file.write('%s\t%s\t%s\n' % (day, daily_registrations[day][0], daily_registrations[day][1])) daily_file.close() def output_total_registrations(): total_registrations = {} daily_registrations = get_daily_registrations() blog_total = 0 user_total = 0 for day in sorted(daily_registrations): total_registrations[day] = [blog_total + daily_registrations[day][0], user_total + daily_registrations[day][1]] if day in blog_deletions: total_registrations[day][0] = total_registrations[day][0] - blog_deletions[day] blog_total = total_registrations[day][0] user_total = total_registrations[day][1] total_file = open(total_filename, 'w') for day in sorted(total_registrations): total_file.write('%s\t%s\t%s\n' % (day, total_registrations[day][0], total_registrations[day][1])) total_file.close() def output_daily_user_registrations(): daily_user_registrations = get_daily_user_registrations() daily_users_file = open(daily_users_filename, 'w') for day in sorted(daily_user_registrations): daily_users_file.write('%s\t%s\n' % (day, daily_user_registrations[day])) daily_users_file.close() def output_total_users(): total_users = {} daily_user_registrations = get_daily_user_registrations() current_total = 0 for day in sorted(daily_user_registrations): total_users[day] = current_total + daily_user_registrations[day] current_total = total_users[day] total_users_file = open(total_users_filename, 'w') for day in sorted(total_users): total_users_file.write('%s\t%s\n' % (day, total_users[day])) total_users_file.close() def output_daily_blog_registrations(): daily_blog_registrations = get_daily_blog_registrations() daily_users_file = open(daily_users_filename, 'w') for day in sorted(daily_user_registrations): daily_users_file.write('%s\t%s\n' % (day, daily_user_registrations[day])) daily_users_file.close() def output_total_blogs(): total_users = {} daily_user_registrations = get_daily_user_registrations() current_total = 0 for day in sorted(daily_user_registrations): total_users[day] = current_total + daily_user_registrations[day] current_total = total_users[day] total_users_file = open(total_users_filename, 'w') for day in sorted(total_users): total_users_file.write('%s\t%s\n' % (day, total_users[day])) total_users_file.close() # calls the main() function if __name__=='__main__': main()
mit
-1,525,131,400,748,672,000
33.337143
91
0.633822
false
sourcepole/qgis-openlayers-plugin
openlayers/plugin_upload.py
1
2703
#!/usr/bin/env python # This script uploads a plugin package on the server # # Author: A. Pasotti, V. Picavet import xmlrpclib, sys, os import getpass from optparse import OptionParser # Configuration PROTOCOL='https' SERVER='plugins.qgis.org' PORT='443' ENDPOINT='/plugins/RPC2/' VERBOSE=False def main(options, args): address = "%s://%s:%s@%s:%s%s" % (PROTOCOL, options.username, options.password, options.server, options.port, ENDPOINT) print "Connecting to: %s" % hidepassword(address) server = xmlrpclib.ServerProxy(address, verbose=VERBOSE) try: plugin_id, version_id = server.plugin.upload(xmlrpclib.Binary(open(args[0]).read())) print "Plugin ID: %s" % plugin_id print "Version ID: %s" % version_id except xmlrpclib.ProtocolError, err: print "A protocol error occurred" print "URL: %s" % hidepassword(err.url, 0) print "HTTP/HTTPS headers: %s" % err.headers print "Error code: %d" % err.errcode print "Error message: %s" % err.errmsg except xmlrpclib.Fault, err: print "A fault occurred" print "Fault code: %d" % err.faultCode print "Fault string: %s" % err.faultString def hidepassword(url, start = 6): """Returns the http url with password part replaced with '*'.""" passdeb = url.find(':', start) + 1 passend = url.find('@') return "%s%s%s" % (url[:passdeb], '*' * (passend - passdeb), url[passend:]) if __name__ == "__main__": parser = OptionParser(usage="%prog [options] plugin.zip") parser.add_option("-w", "--password", dest="password", help="Password for plugin site", metavar="******") parser.add_option("-u", "--username", dest="username", help="Username of plugin site", metavar="user") parser.add_option("-p", "--port", dest="port", help="Server port to connect to", metavar="80") parser.add_option("-s", "--server", dest="server", help="Specify server name", metavar="plugins.qgis.org") (options, args) = parser.parse_args() if len(args) != 1: print "Please specify zip file.\n" parser.print_help() sys.exit(1) if not options.server: options.server = SERVER if not options.port: options.port = PORT if not options.username: # interactive mode username = getpass.getuser() print "Please enter user name [%s] :"%username, res = raw_input() if res != "": options.username = res else: options.username = username if not options.password: # interactive mode options.password = getpass.getpass() main(options, args)
gpl-2.0
2,325,613,006,822,997,500
34.103896
92
0.610433
false
mouradmourafiq/data-analysis
quora/feed_optimizer.py
1
11042
# -*- coding: utf-8 -*- """ Created on Jan 11, 2013 @author: Mourad Mourafiq About: This is an attempt to solve the Quora challenge Feed Optimizer. """ import itertools import copy import math from random import choice, random BRUTE_FORCE = 1 ANNEALING_SIMULATED = 2 class Story(object): """ Story object @type _cpt: int @param _cpt: counts the number of instance created. @type _height: int @param _height: The stroy's height. @type _time: int @param _time: The time of publication. @type _id: int @param _id: The story's id. @type _score: int @param _score: The story's score. @type _height: int @param _height: The stroy's height. @type _proportioned_score: float @param _proportioned_score: The stroy's _score proportioned to height. """ __cpt = 0 def __init__(self, time=-1, score=-1, height=-1): self._id = Story.__cpt self._time = time self._score = score self._height = height self._proportioned_score = float(score) / height Story.__cpt += 1 def __repr__(self): return "id: %s, time: %s" % (self._id, self._time) def __gt__(self, story): if (self._proportioned_score > story._proportioned_score): return True if (self._proportioned_score < story._proportioned_score): return False if (self._id < story._id): return True return False def _better_score(self, story): if (self._score > story._score): return True if (self._score < story._score): return False if (self._id < story._id): return True return False class Solution(object): """ Potential solution for the upcoming reload @type _stories: list @param _stories: The list of potential items. @type _len_stories : int @param _len_stories: The length of the list of stories. @type _score: int @param _score: The current solution's score. @type _height: int @param _height: The current solution's height. """ def __init__(self): self._stories = [] self._len_stories = 0 self._score = 0 self._height = 0 def __repr__(self): return "%s %s %s" % ( self._score, self._len_stories, ' '.join(sorted([str(story._id) for story in self._stories]))) def __gt__(self, solution): # check who's score is better if self._score > solution._score: return True if self._score < solution._score: return False # same score; check who has less stories if self._len_stories < solution._len_stories: return True if self._len_stories > solution._len_stories: return False #same score, same number of stories; check who has smaller lexicographically if sorted([story._id for story in self._stories]) <= sorted([story._id for story in solution._stories]): return True else: return False @classmethod def clone(cls, solution): clone_solution = cls() clone_solution._stories = copy.copy(solution._stories) clone_solution._len_stories = solution._len_stories clone_solution._score = solution._score clone_solution._height = solution._height return clone_solution def add(self, story): """ add story to the solution """ self._stories.append(story) self._score += story._score self._height += story._height self._len_stories += 1 def remove(self, story): """ remove story from the solution """ self._stories.remove(story) self._score -= story._score self._height -= story._height self._len_stories -= 1 class Optimizer(object): """ Keep track of stories that can potentially make a solution. The stories should be sorted by time of publication. @type _stories: list @param stories: The list of stories that can potentially make a solution. @type _len_stories : int @param _len_stories: The length of the list of stories. @type __height: int @param window: The height of the browser. @type __window: int @param window: The window of recent stories. @type _best_story: Stroy @param _best_story: The best story so far. """ __height = 0 __window = 0 def __init__(self, window, height): self._stories = [] self._len_stories = 0 Optimizer.__window = window Optimizer.__height = height self._best_story = Story() def _purge_old_stories(self, current_time): """ remove old stories form the current list of stories """ # check if the oldest stories can still be part of the solution to_be_removed = [] for old_story in self._stories: if (current_time - old_story._time) <= Optimizer.__window: break else: to_be_removed.append(old_story) for old_story in to_be_removed: self._stories.remove(old_story) self._len_stories -= 1 def _brute_force(self): """ check all possibilities: 1) best solution for combination of 2 stories (if it exists). 2) best solution for combination of 3 stories (if it exists). . . l-1) best solution for combination of l-1 stories (if it exists). l : being the length of the current stories. """ best_solution = Solution() best_solution.add(self._best_story) for i in xrange(2, self._len_stories + 1): for tuple_stories in itertools.combinations(self._stories, i): if self.addable(tuple_stories): current_solution = Solution() for story in tuple_stories: current_solution.add(story) if current_solution > best_solution: best_solution = current_solution return best_solution def _annealing_simulated(self, T=1000.0, cool=0.35): """ perform the annealing simulated algorithm: 1) start with a random solution. 2) move to a neighbour solution. (favors better solutions, and accepts worst solutions with a certain probabilities to avoid local minimum until the temperature is totally down) """ # order stories based on their proportioned score ordered_stories = sorted(self._stories, reverse=True) # produce a random solution current_solution, stories_in_current = self.random_solution(ordered_stories, self._len_stories) best_solution = Solution.clone(current_solution) while (T > 0.1): temp_solution = Solution.clone(current_solution) stories_in_temp = copy.copy(stories_in_current) stories_at_true = [i for i in xrange(self._len_stories) if stories_in_temp[i]] #check if there is still stories if len(stories_at_true) == self._len_stories: break #choose a story and remove it if stories_at_true: indice = choice(stories_at_true) stories_in_temp[indice] = False temp_solution.remove(ordered_stories[indice]) else: indice = -1 #add any number of other stories available for i in xrange(indice + 1, self._len_stories): if stories_in_temp[i]: continue story = ordered_stories[i] if self.addable((story,), temp_solution): stories_in_temp[i] = True temp_solution.add(story) elif temp_solution._height == self.__height: break #compare temp and current solutions if temp_solution > current_solution: current_solution = temp_solution stories_in_current = stories_in_temp #also since temp is better than current, compare it to best if current_solution > best_solution: best_solution = Solution.clone(current_solution) #current solution is better than temp #the algorithm states that we can still give it a try depending on a probability else: #since temp solution score is < current solution score #this probability will be near one at the beginning where T is high #but will get lower and lower as T cool down #hence will accept less and less bad solution p = pow(math.e, float(temp_solution._score - current_solution._score) / T) if p > random(): current_solution = temp_solution stories_in_current = stories_in_temp #decrease the temperature T = T * cool return best_solution def add(self, story): # check if the story's height is within the browser's height if story._height <= Optimizer.__height: self._stories.append(story) self._len_stories += 1 if (story > self._best_story): self._best_story = story def produce_solution(self, current_time, solution=BRUTE_FORCE): self._purge_old_stories(current_time) if solution == BRUTE_FORCE: return self._brute_force() elif solution == ANNEALING_SIMULATED: return self._annealing_simulated() @classmethod def addable(cls, tuple_stories, solution=Solution()): total_height = solution._height for story in tuple_stories: total_height += story._height if total_height <= cls.__height: return True return False @classmethod def random_solution(cls, list_stories, length_stories): """ produce a random solution """ stories_in = [False] * length_stories solution = Solution() for i in xrange(length_stories): story = list_stories[i] if cls.addable((story,), solution): solution.add(story) stories_in[i] = True elif solution._height == cls.__height: break return solution, stories_in N, W, H = [int(x) for x in raw_input().split()] p = Optimizer(W, H) while (N): command = raw_input().split() if command[0] == "S": # story t, s, h = [int(x) for x in command[1:]] p.add(Story(t, s, h)) elif command[0] == "R": # Reload tr = int(command[1]) print p.produce_solution(tr, solution=ANNEALING_SIMULATED) N -= 1
bsd-2-clause
-8,738,647,890,914,487,000
32.978462
112
0.562851
false
004helix/bybankpy
contrib/insync-add-shortcut.py
1
1123
#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import print_function, unicode_literals from six.moves import input import sys import os sys.path.append(os.path.join(os.path.dirname(__file__), "..")) import insync def main(): i = insync.client(os.path.expanduser('~/lib/insync.db')) i.login() i.desktop() for prodtype in ('ACCOUNT', 'DEPOSIT', 'CREDIT'): for item in i.products(prodtype)['items']: if 'onDesktop' not in item or item['onDesktop']: continue print('------------------------------------') print(' title:', item['info']['title']) print(' type/id:', item['type'], item['id']) if 'amount' in item['info']: print(' balance:', item['info']['amount']['amount'], item['info']['amount']['currency']) print() type_ = input('Enter item TYPE: ') id_ = input('Enter item ID: ') i.debug = True i.add_product_shortcut(type_.strip(), id_.strip()) i.debug = False i.logout() if __name__ == '__main__': main()
gpl-3.0
4,726,185,302,800,300,000
25.738095
62
0.51024
false
Harvard-ATG/HarvardCards
flash/decorators.py
1
1485
from functools import wraps from django.core.exceptions import PermissionDenied from flash.models import Deck from flash.services import has_role_with_request def check_role(roles, entity_type): """ A decorator that checks to see if a user has the required role in a collection. Allows the user to enter the function if the user has the role. Raises a PermissionDenied exception if the user doesn't have the role. Input: a list of roles allowed for this function Output: the function if user has role, else a PermissionDenied """ def decorator(func): def inner_decorator(request, *args, **kwargs): entity_id = None if request.GET: entity_id = request.GET.get('deck_id','') if entity_type == 'deck' else request.GET.get('collection_id','') elif not entity_id and request.POST: entity_id = request.POST.get('deck_id','') if entity_type == 'deck' else request.POST.get('collection_id','') else: raise PermissionDenied entity_id = int(entity_id) if entity_type == 'deck': deck = Deck.objects.get(id=entity_id) entity_id = deck.collection.id if has_role_with_request(request, roles, entity_id): return func(request, *args, **kwargs) raise PermissionDenied return wraps(func)(inner_decorator) return decorator
bsd-3-clause
7,790,181,930,741,742,000
39.135135
125
0.617508
false
twilio/twilio-python
tests/integration/events/v1/test_event_type.py
1
6848
# coding=utf-8 r""" This code was generated by \ / _ _ _| _ _ | (_)\/(_)(_|\/| |(/_ v1.0.0 / / """ from tests import IntegrationTestCase from tests.holodeck import Request from twilio.base.exceptions import TwilioException from twilio.http.response import Response class EventTypeTestCase(IntegrationTestCase): def test_list_request(self): self.holodeck.mock(Response(500, '')) with self.assertRaises(TwilioException): self.client.events.v1.event_types.list() self.holodeck.assert_has_request(Request( 'get', 'https://events.twilio.com/v1/Types', )) def test_read_empty_response(self): self.holodeck.mock(Response( 200, ''' { "types": [], "meta": { "page": 0, "page_size": 10, "first_page_url": "https://events.twilio.com/v1/Types?PageSize=10&Page=0", "previous_page_url": null, "url": "https://events.twilio.com/v1/Types?PageSize=10&Page=0", "next_page_url": null, "key": "types" } } ''' )) actual = self.client.events.v1.event_types.list() self.assertIsNotNone(actual) def test_read_results_response(self): self.holodeck.mock(Response( 200, ''' { "types": [ { "date_created": "2020-08-13T13:28:20Z", "date_updated": "2020-08-13T13:28:20Z", "type": "com.twilio.messaging.message.delivered", "schema_id": "Messaging.MessageStatus", "public": true, "description": "Messaging- delivered message", "url": "https://events.twilio.com/v1/Types/com.twilio.messaging.message.delivered", "links": { "schema": "https://events.twilio.com/v1/Schemas/Messaging.MessageStatus/Versions" } }, { "date_created": "2020-08-13T13:28:19Z", "date_updated": "2020-08-13T13:28:19Z", "type": "com.twilio.messaging.message.failed", "schema_id": "Messaging.MessageStatus", "public": true, "description": "Messaging- failed message", "url": "https://events.twilio.com/v1/Types/com.twilio.messaging.message.failed", "links": { "schema": "https://events.twilio.com/v1/Schemas/Messaging.MessageStatus/Versions" } } ], "meta": { "page": 0, "page_size": 20, "first_page_url": "https://events.twilio.com/v1/Types?PageSize=20&Page=0", "previous_page_url": null, "url": "https://events.twilio.com/v1/Types?PageSize=20&Page=0", "next_page_url": null, "key": "types" } } ''' )) actual = self.client.events.v1.event_types.list() self.assertIsNotNone(actual) def test_read_results_with_schema_id_response(self): self.holodeck.mock(Response( 200, ''' { "types": [ { "date_created": "2020-08-13T13:28:20Z", "date_updated": "2020-08-13T13:28:20Z", "type": "com.twilio.messaging.message.delivered", "schema_id": "Messaging.MessageStatus", "public": true, "description": "Messaging- delivered message", "url": "https://events.twilio.com/v1/Types/com.twilio.messaging.message.delivered", "links": { "schema": "https://events.twilio.com/v1/Schemas/Messaging.MessageStatus/Versions" } }, { "date_created": "2020-08-13T13:28:19Z", "date_updated": "2020-08-13T13:28:19Z", "type": "com.twilio.messaging.message.failed", "schema_id": "Messaging.MessageStatus", "public": true, "description": "Messaging- failed message", "url": "https://events.twilio.com/v1/Types/com.twilio.messaging.message.failed", "links": { "schema": "https://events.twilio.com/v1/Schemas/Messaging.MessageStatus/Versions" } } ], "meta": { "page": 0, "page_size": 20, "first_page_url": "https://events.twilio.com/v1/Types?SchemaId=Messaging.MessageStatus&PageSize=20&Page=0", "previous_page_url": null, "url": "https://events.twilio.com/v1/Types?SchemaId=Messaging.MessageStatus&PageSize=20&Page=0", "next_page_url": null, "key": "types" } } ''' )) actual = self.client.events.v1.event_types.list() self.assertIsNotNone(actual) def test_fetch_request(self): self.holodeck.mock(Response(500, '')) with self.assertRaises(TwilioException): self.client.events.v1.event_types("type").fetch() self.holodeck.assert_has_request(Request( 'get', 'https://events.twilio.com/v1/Types/type', )) def test_fetch_response(self): self.holodeck.mock(Response( 200, ''' { "date_created": "2020-08-13T13:28:20Z", "date_updated": "2020-08-13T13:28:20Z", "type": "com.twilio.messaging.message.delivered", "schema_id": "Messaging.MessageStatus", "public": true, "description": "Messaging- delivered message", "url": "https://events.twilio.com/v1/Types/com.twilio.messaging.message.delivered", "links": { "schema": "https://events.twilio.com/v1/Schemas/Messaging.MessageStatus/Versions" } } ''' )) actual = self.client.events.v1.event_types("type").fetch() self.assertIsNotNone(actual)
mit
3,487,979,031,603,684,400
37.256983
127
0.457214
false
napalm-automation/napalm-yang
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/static_routes/static/state/__init__.py
1
26575
# -*- coding: utf-8 -*- from operator import attrgetter from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType from pyangbind.lib.yangtypes import RestrictedClassType from pyangbind.lib.yangtypes import TypedListType from pyangbind.lib.yangtypes import YANGBool from pyangbind.lib.yangtypes import YANGListType from pyangbind.lib.yangtypes import YANGDynClass from pyangbind.lib.yangtypes import ReferenceType from pyangbind.lib.base import PybindBase from collections import OrderedDict from decimal import Decimal from bitarray import bitarray import six # PY3 support of some PY2 keywords (needs improved) if six.PY3: import builtins as __builtin__ long = int elif six.PY2: import __builtin__ class state(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/static-routes/static/state. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Operational state data for static routes """ __slots__ = ("_path_helper", "_extmethods", "__prefix", "__set_tag") _yang_name = "state" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__prefix = YANGDynClass( base=[ RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))" }, ), RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))" }, ), ], is_leaf=True, yang_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="inet:ip-prefix", is_config=False, ) self.__set_tag = YANGDynClass( base=[ RestrictedClassType( base_type=long, restriction_dict={"range": ["0..4294967295"]}, int_size=32, ), RestrictedClassType( base_type=six.text_type, restriction_dict={"pattern": "([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?"}, ), ], is_leaf=True, yang_name="set-tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="oc-pt:tag-type", is_config=False, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "protocols", "protocol", "static-routes", "static", "state", ] def _get_prefix(self): """ Getter method for prefix, mapped from YANG variable /network_instances/network_instance/protocols/protocol/static_routes/static/state/prefix (inet:ip-prefix) YANG Description: Destination prefix for the static route, either IPv4 or IPv6. """ return self.__prefix def _set_prefix(self, v, load=False): """ Setter method for prefix, mapped from YANG variable /network_instances/network_instance/protocols/protocol/static_routes/static/state/prefix (inet:ip-prefix) If this variable is read-only (config: false) in the source YANG file, then _set_prefix is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_prefix() directly. YANG Description: Destination prefix for the static route, either IPv4 or IPv6. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=[ RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))" }, ), RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))" }, ), ], is_leaf=True, yang_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="inet:ip-prefix", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """prefix must be of a type compatible with inet:ip-prefix""", "defined-type": "inet:ip-prefix", "generated-type": """YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ip-prefix', is_config=False)""", } ) self.__prefix = t if hasattr(self, "_set"): self._set() def _unset_prefix(self): self.__prefix = YANGDynClass( base=[ RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))" }, ), RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))" }, ), ], is_leaf=True, yang_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="inet:ip-prefix", is_config=False, ) def _get_set_tag(self): """ Getter method for set_tag, mapped from YANG variable /network_instances/network_instance/protocols/protocol/static_routes/static/state/set_tag (oc-pt:tag-type) YANG Description: Set a generic tag value on the route. This tag can be used for filtering routes that are distributed to other routing protocols. """ return self.__set_tag def _set_set_tag(self, v, load=False): """ Setter method for set_tag, mapped from YANG variable /network_instances/network_instance/protocols/protocol/static_routes/static/state/set_tag (oc-pt:tag-type) If this variable is read-only (config: false) in the source YANG file, then _set_set_tag is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_set_tag() directly. YANG Description: Set a generic tag value on the route. This tag can be used for filtering routes that are distributed to other routing protocols. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=[ RestrictedClassType( base_type=long, restriction_dict={"range": ["0..4294967295"]}, int_size=32, ), RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?" }, ), ], is_leaf=True, yang_name="set-tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="oc-pt:tag-type", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """set_tag must be of a type compatible with oc-pt:tag-type""", "defined-type": "oc-pt:tag-type", "generated-type": """YANGDynClass(base=[RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?'}),], is_leaf=True, yang_name="set-tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-pt:tag-type', is_config=False)""", } ) self.__set_tag = t if hasattr(self, "_set"): self._set() def _unset_set_tag(self): self.__set_tag = YANGDynClass( base=[ RestrictedClassType( base_type=long, restriction_dict={"range": ["0..4294967295"]}, int_size=32, ), RestrictedClassType( base_type=six.text_type, restriction_dict={"pattern": "([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?"}, ), ], is_leaf=True, yang_name="set-tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="oc-pt:tag-type", is_config=False, ) prefix = __builtin__.property(_get_prefix) set_tag = __builtin__.property(_get_set_tag) _pyangbind_elements = OrderedDict([("prefix", prefix), ("set_tag", set_tag)]) class state(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/static-routes/static/state. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Operational state data for static routes """ __slots__ = ("_path_helper", "_extmethods", "__prefix", "__set_tag") _yang_name = "state" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__prefix = YANGDynClass( base=[ RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))" }, ), RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))" }, ), ], is_leaf=True, yang_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="inet:ip-prefix", is_config=False, ) self.__set_tag = YANGDynClass( base=[ RestrictedClassType( base_type=long, restriction_dict={"range": ["0..4294967295"]}, int_size=32, ), RestrictedClassType( base_type=six.text_type, restriction_dict={"pattern": "([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?"}, ), ], is_leaf=True, yang_name="set-tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="oc-pt:tag-type", is_config=False, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "protocols", "protocol", "static-routes", "static", "state", ] def _get_prefix(self): """ Getter method for prefix, mapped from YANG variable /network_instances/network_instance/protocols/protocol/static_routes/static/state/prefix (inet:ip-prefix) YANG Description: Destination prefix for the static route, either IPv4 or IPv6. """ return self.__prefix def _set_prefix(self, v, load=False): """ Setter method for prefix, mapped from YANG variable /network_instances/network_instance/protocols/protocol/static_routes/static/state/prefix (inet:ip-prefix) If this variable is read-only (config: false) in the source YANG file, then _set_prefix is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_prefix() directly. YANG Description: Destination prefix for the static route, either IPv4 or IPv6. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=[ RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))" }, ), RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))" }, ), ], is_leaf=True, yang_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="inet:ip-prefix", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """prefix must be of a type compatible with inet:ip-prefix""", "defined-type": "inet:ip-prefix", "generated-type": """YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ip-prefix', is_config=False)""", } ) self.__prefix = t if hasattr(self, "_set"): self._set() def _unset_prefix(self): self.__prefix = YANGDynClass( base=[ RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))" }, ), RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))" }, ), ], is_leaf=True, yang_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="inet:ip-prefix", is_config=False, ) def _get_set_tag(self): """ Getter method for set_tag, mapped from YANG variable /network_instances/network_instance/protocols/protocol/static_routes/static/state/set_tag (oc-pt:tag-type) YANG Description: Set a generic tag value on the route. This tag can be used for filtering routes that are distributed to other routing protocols. """ return self.__set_tag def _set_set_tag(self, v, load=False): """ Setter method for set_tag, mapped from YANG variable /network_instances/network_instance/protocols/protocol/static_routes/static/state/set_tag (oc-pt:tag-type) If this variable is read-only (config: false) in the source YANG file, then _set_set_tag is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_set_tag() directly. YANG Description: Set a generic tag value on the route. This tag can be used for filtering routes that are distributed to other routing protocols. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=[ RestrictedClassType( base_type=long, restriction_dict={"range": ["0..4294967295"]}, int_size=32, ), RestrictedClassType( base_type=six.text_type, restriction_dict={ "pattern": "([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?" }, ), ], is_leaf=True, yang_name="set-tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="oc-pt:tag-type", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """set_tag must be of a type compatible with oc-pt:tag-type""", "defined-type": "oc-pt:tag-type", "generated-type": """YANGDynClass(base=[RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?'}),], is_leaf=True, yang_name="set-tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-pt:tag-type', is_config=False)""", } ) self.__set_tag = t if hasattr(self, "_set"): self._set() def _unset_set_tag(self): self.__set_tag = YANGDynClass( base=[ RestrictedClassType( base_type=long, restriction_dict={"range": ["0..4294967295"]}, int_size=32, ), RestrictedClassType( base_type=six.text_type, restriction_dict={"pattern": "([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?"}, ), ], is_leaf=True, yang_name="set-tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="oc-pt:tag-type", is_config=False, ) prefix = __builtin__.property(_get_prefix) set_tag = __builtin__.property(_get_set_tag) _pyangbind_elements = OrderedDict([("prefix", prefix), ("set_tag", set_tag)])
apache-2.0
8,400,137,908,949,543,000
42.998344
866
0.50175
false
sehoonha/optskills
optskills/problems/gp_jump.py
1
6468
import numpy as np from numpy.linalg import norm from sim_problem import SimProblem, STR import phase_controller import math class GPJump(SimProblem): def __init__(self): super(GPJump, self).__init__('urdf/BioloidGP/BioloidGP.URDF', fps=1000.0) self.__init__simulation__() self.dim = 6 self.eval_counter = 0 # Well, increasing when simulated self.params = None def __init__simulation__(self): self.init_state = self.skel().x self.init_state[0] = -0.50 * 3.14 self.init_state[4] = 0.230 self.init_state[5] = 0.230 self.reset() self.controller = phase_controller.PhaseController(self.world) self.controller.callback = self.balance # self.set_params(np.array([0.14, 0.3, -0.15, -0.1, -0.2, 0.2, 0, 0])) self.set_params(np.array([0.14, 0.3, -0.15, -0.1, -0.2, 0.2])) self.controller.reset() def balance(self): Cd = self.skel().Cdot bal = Cd[2] l0 = self.skel().dof_index('l_thigh') r0 = self.skel().dof_index('r_thigh') l2 = self.skel().dof_index('l_heel') r2 = self.skel().dof_index('r_heel') qhat = np.array(self.controller.phase().target) # bal *= 1.0 bal *= 0.7 # print('t: %0.4f bal: %.6f' % (self.world.t, bal)) qhat[l0] -= bal * 1.0 qhat[r0] -= bal * 1.0 qhat[l2] -= bal * 1.0 qhat[r2] -= bal * 1.0 self.controller.pd.target = qhat def simulate(self, sample): self.eval_counter += 1 self.set_params(sample) self.reset() while not self.terminated(): self.step() return self.collect_result() def evaluate(self, result, task): w = task # Calculate the validity of C C = np.array(result['C']) C[1] = result['maxCy'] lo = np.array([0.0, 0.22, 0.00]) hi = np.array([0.0, 0.27, 0.00]) C_hat = lo * (1 - w) + hi * w weight = np.array([0.1, 10.0, 0.0]) obj = norm((C - C_hat) * weight) ** 2 if result['maxCy'] < 0.2: obj += 10.0 * (0.2 - result['maxCy']) b_penalty = 0.0 if result['fallen']: t = result['t'] b_penalty += 0.5 * (2.0 - t) # Calculate parameter penalty params = result['params'] penalty = 0.0 if params is not None: for i in range(self.dim): v = params[i] penalty += max(0.0, v - 1.0) ** 2 penalty += min(0.0, v - (-1.0)) ** 2 return obj + b_penalty + penalty # return b_penalty def set_random_params(self): pass def set_params(self, x): self.params = x w = (x - (-1.0)) / 2.0 # Change to 0 - 1 Scale lo = np.array([-0.5, -1.5, -0.5, -1.0, -200, -200]) hi = np.array([1.5, 0.0, 1.5, 1.0, 0, 0]) params = lo * (1 - w) + hi * w (q0, q1, q2, q3, f0, f1) = params (q4, q5) = (-0.7, 0.0) # print 'q:', q0, q1, q2, q3, f0, f1 # (q0, q1, q2, q3, f0, f1) = (0.65, -1.3, 0.55, 0.2, -150, -70) # (q0, q1, q2, q3, f0, f1) = (0.65, -1.3, 0.55, 0.2, -100, -40) # print 'q:', q0, q1, q2, q3, f0, f1 # (q0, q1, q2, q3, q4) = (0.16, -0.8, -1.0, 0.60, -0.5) self.reset() self.controller.clear_phases() # The first phase - balance phase = self.controller.add_phase_from_now(0.7) phase.set_target('l_thigh', q0) # 0.65 phase.set_target('r_thigh', q0) # 0.65 phase.set_target('l_shin', q1) # -1.3 phase.set_target('r_shin', q1) # -1.3 phase.set_target('l_heel', q2) # 0.6 phase.set_target('r_heel', q2) # 0.6 phase.set_target('l_shoulder', q4) # -0.7 phase.set_target('r_shoulder', q4) # -0.7 # The second phase - swing back phase = self.controller.add_phase_from_now('no_contact') phase.add_virtual_force(['l_foot', 'r_foot'], np.array([0, f0, f1])) # 0, -150, -55 # The third phase - swing forward phase = self.controller.add_phase_from_now(0.8) phase.set_target('l_shoulder', 0.3) # 0.3 phase.set_target('r_shoulder', 0.3) # 0.3 phase.set_target('l_thigh', q3) # 0.2 phase.set_target('r_thigh', q3) # 0.2 phase.add_target_offset('l_shin', q5) # 0.2 phase.add_target_offset('r_shin', q5) # 0.2 # For the final production phase.terminae = 0.5 phase = self.controller.add_phase_from_now(0.8) phase.add_target_offset('l_heel', -0.1) # 0.2 phase.add_target_offset('r_heel', -0.1) # 0.2 # print('num phases: %d' % len(self.controller.phases)) def collect_result(self): res = {} res['params'] = None if self.params is None else np.array(self.params) res['C'] = np.array(self.skel().C) # T = self.skel().COP # res['T'] = np.array(T) if T is not None else np.zeros(3) res['maxCy'] = max([C[1] for C in self.com_trajectory]) res['t'] = self.world.t res['fallen'] = self.fallen # print 'result: ', res return res def reset_hook(self): self.fallen = False def terminated(self): C = self.skel().C Hy = self.skel().body('torso').C[1] if C[1] < 0.12 or math.fabs(C[0]) > 0.06 or \ Hy < C[1]: # May check |Cx| > 0.04 self.fallen = True return True # return (self.world.t > 1.7) return (self.world.t > 3.0) # For final production def __str__(self): res = self.collect_result() status = "" status += '[GPJump at %.4f' % self.world.t status += '(%d)' % self.controller.phase_index for key, value in self.collect_result().iteritems(): if key in set(['params']): continue if hasattr(value, '__len__'): status += ' %s : %s' % (key, STR(value, 3)) else: status += ' %s : %s' % (key, value) status += ' value = {' tasks = np.linspace(0.0, 1.0, 6) values = [self.evaluate(res, t) for t in tasks] status += ' '.join(['%.4f' % v for v in values]) status += '}]' return status def __repr__(self): return 'problems.GPJump()'
mit
991,349,405,432,925,200
32.863874
78
0.493816
false
strazzere/pfp
setup.py
1
1092
#!/usr/bin/env python # encoding: utf-8 import os, sys from setuptools import setup setup( # metadata name='pfp', description='An 010 template interpreter for Python', long_description=""" pfp is an 010 template interpreter for Python. It accepts an input data stream and an 010 template and returns a modifiable DOM of the parsed data. Extensions have also been added to the 010 template syntax to allow for linked fields (e.g. checksums, length calculations, etc), sub structures in compressed data, etc. """, license='MIT', version='0.1.11', author='James Johnson', maintainer='James Johnson', author_email='[email protected]', url='https://github.com/d0c-s4vage/pfp', platforms='Cross Platform', download_url="https://github.com/d0c-s4vage/pfp/tarball/v0.1.11", install_requires = open(os.path.join(os.path.dirname(__file__), "requirements.txt")).read().split("\n"), classifiers = [ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3',], packages=['pfp', 'pfp.native'], )
mit
4,640,449,585,166,833,000
33.125
105
0.67674
false
nguy/pyparticleprobe
pyparticleprobe/dsd_calcs/attenuation.py
1
3961
""" pyparticleprobe.dsd_calcs.attenuation ========================= A grouping of functions that calcuates attenuation characteristics 7 Feb 2014 - Adapted by Nick Guy NOAA/NSSL/WRDD, NRC """ # HISTORY:: # 3 Feb 2014 - Nick Guy. NOAA/NSSL, NRC ([email protected]) # # NOTES:: # Arrays seem to be able to be passed, but make sure they are float arrays # (e.g. created with numpy) and not lists # # FUNCTIONS:: # abs_coeff - Absorption coefficient # scat_coeff - Scattering coefficient # ext_coeff - Extinction coefficient # spec_atten - Specific attenuation #------------------------------------------------------------------- # Load the needed packages import numpy as np #=============================================================== # BEGIN FUNCTIONS #=============================================================== def abs_coeff(D,lam,m): """Absorption coefficient of a spherical particle From Doviak and Zrnic (1993), Eqn 3.14a or Battan (1973), Eqn 6.6 INPUT:: D = Particle diameter [m] lam = Radar wavelength [m] m = Complex refractive index [unitless] OUTPUT:: Qa = Absorption coefficient USAGE:: Qa = abs_coeff(D,lam,m) NOTES:: The default is for a dielectric factor value for water. This can be changed by the user, e.g. K=0.208 for particle sizes of equivalent melted diameters or K=0.176 for particle sizes of equivalent ice spheres. """ #--------------------------------------- Km = (m**2 - 1) / (m**2 + 2) Qa = (np.pi**2 * D**3 / lam) * np.imag(-1 * Km) return Qa #==================================================== def scat_coeff(D,lam,m): """Scattering coefficient of a spherical particle From Doviak and Zrnic (1993), Eqn 3.14b or Battan (1973), Eqn 6.5 INPUT:: D = Particle diameter [m] lam = Radar wavelength [m] m = Complex refractive index [unitless] OUTPUT:: Qs = Scattering coefficient USAGE:: Qs = scat_coeff(D,lam,m) """ #--------------------------------------- Km = (m**2 - 1) / (m**2 + 2) Qs = (2 * np.pi**5 * D**6 / (3 * lam**4) * (np.absolute(Km))**2) return Qs #==================================================== def ext_coeff(D,lam,m): """Extinction coefficient of a spherical particle From Doviak and Zrnic (1993), Eqn 3.14b or Battan (1973), Eqn 6.5 INPUT:: D = Particle diameter [m] lam = Radar wavelength [m] m = Complex refractive index [unitless] OUTPUT:: Qe = Scattering coefficient USAGE:: Qe = ext_coeff(D,lam,m) NOTES:: The default is for a dielectric factor value for water. This can be changed by the user, e.g. K=0.208 for particle sizes of equivalent melted diameters or K=0.176 for particle sizes of equivalent ice spheres. """ #--------------------------------------- Qa = abs_coeff(D,lam,m) Qs = scat+coeff(D,lam,m) Qe = Qa + Qs return Qe #==================================================== def spec_atten(Nd,Diam,lam,m): """Extinction coefficient of a spherical particle From Doviak and Zrnic (1993), Eqn 3.15 INPUT:: Nd = Drop concentration as a function of drop size [m^-3] Diam = Drop size diameter [mm] lam = Radar wavelength [m] m = Complex refractive index [unitless] OUTPUT:: K = Specific attenuation [dB/km] USAGE:: K = spec_atten(Nd,Diam,lam,m) NOTES:: The default is for a dielectric factor value for water. This can be changed by the user, e.g. K=0.208 for particle sizes of equivalent melted diameters or K=0.176 for particle sizes of equivalent ice spheres. """ #--------------------------------------- Qa = abs_coeff(Diam,lam,m) Qs = scat_coeff(Diam,lam,m) Qe = Qa + Qs # Calculate specific attenuation K = 4.34e3 * Nd * Qe return Qe #====================================================
lgpl-2.1
6,669,627,535,984,001,000
31.735537
76
0.533199
false
dishbreak/bluesync
python/modules/bluetoothdevice.py
1
12044
from contrib import bglib from collections import namedtuple from modules import types import time class BleHandles(): """docstring for BleHandles""" def __init__( self, connection = -1, service_start = -1, service_end = -1, timestamp = -1, trigger_scanning = -1, ref_time = -1, sequence_number = -1, standard_flag = -1 ): self.connection = connection self.service_start = service_start self.service_end = service_end self.timestamp = timestamp self.trigger_scanning = trigger_scanning self.ref_time = ref_time self.sequence_number = sequence_number self.standard_flag = standard_flag class TimestampData(): def __init(): self.timestamp = 0 self.ref_time = 0 self.sequence_number = 0 self.standard_flag = 0 def to_string(self): return "Timestamp: {} Seq_Num: {} Ref_Time: {} Std_Flag: {}".format( self.timestamp, self.sequence_number, self.ref_time, self.standard_flag ) class BluetoothDevice(): """docstring for BluetoothDevice""" def __init__(self, bglib, serial, device_info, logfile, retry_count = 3): self.bglib = bglib self.serial = serial self.device_info = device_info self.retry_count = retry_count self.retries_remaining = retry_count self.handles = BleHandles() self.service_discovery_in_progress = False self.connection_in_progress = False self.connection_failed = False self.sequence_number = 1 self.handle_being_read = -1 self.time_data = 0 self.last_command = 0 self.logfile = logfile print("constructor called") def on_connect_rsp(self, sender, args): if args['result'] == 0: self.handles.connection = args['connection_handle'] print("{} got connection handle {}".format( self.device_info.addr_str, self.handles.connection) ) else: print("Connection error: {0} ({0:04X})".format(args['result'])) def __reconnect(self): self.retries_remaining -= 1 if self.retries_remaining > 0: print("Retrying, {} retry attempts remaining".format(self.retries_remaining)) self.connect(self.retries_remaining, True) else: print("Couldn't connect.") self.connection_failed = True def on_connection_status_evt(self, sender, args): if(args['flags'] == 0x05): print("Connected (I think).") self.connection_in_progress = False else: print("Got flags value 0x{:02X}...reconnecting") self.__reconnect() def on_disconnection_event(self, sender, args): print("Got disconnection event with reason {0} ({0:04X})".format(args['reason'])) if args['reason'] == 0x023E: self.__reconnect() def connect(self, retry_count = -1, is_retry = False): if retry_count == -1: retry_count = self.retry_count print("attempting a connection to {}".format(self.device_info.addr_str)) if not is_retry: self.bglib.ble_rsp_gap_connect_direct += self.on_connect_rsp self.bglib.ble_evt_connection_status += self.on_connection_status_evt self.connection_in_progress = True self.retries_remaining = retry_count self.bglib.send_command( self.serial, self.bglib.ble_cmd_gap_connect_direct( self.device_info.addr, 0, 60, 76, 100, 0 ) ) #sorry for the magic numbers self.bglib.check_activity(self.serial, 1) timeout = time.time() + 5 while self.connection_in_progress: time.sleep(0.1) self.bglib.check_activity(self.serial) if time.time() > timeout: break; if self.connection_in_progress: print("Connection timeout. attempting a reconnect.") self.__reconnect() self.bglib.ble_evt_connection_status -= self.on_connection_status_evt self.bglib.ble_rsp_gap_connect_direct -= self.on_connect_rsp def disconnect(self): print("disconnecting from device {}".format( self.handles.connection) ) self.device_info = types.Device( self.device_info.addr_str, self.device_info.addr, time.time()) self.bglib.send_command( self.serial, self.bglib.ble_cmd_connection_disconnect(self.handles.connection) ) self.bglib.check_activity(self.serial, 1) def on_service_characteristic_rsp(self, sender, args): print("responded with code {:0X}".format(args['result'])) def on_service_discovered(self, sender, args): if args['uuid'] == types.bluesync_uuid: self.handles.service_start = args['start'] self.handles.service_end = args['end'] print("Found BlueSync service on device {} [{}-{}]".format( self.handles.connection, self.handles.service_start, self.handles.service_end) ) if args['end'] == 0xFFFF: self.service_discovery_in_progress = False def on_characteristic_discovered(self, sender, args): print("Discovered handle {}".format(args['chrhandle'])) if args['uuid'] == types.timestamp_uuid: self.handles.timestamp = args['chrhandle'] elif args['uuid'] == types.trigger_scanning_uuid: self.handles.trigger_scanning = args['chrhandle'] elif args['uuid'] == types.reference_time_uuid: self.handles.ref_time = args['chrhandle'] elif args['uuid'] == types.sequence_number_uuid: self.handles.sequence_number = args['chrhandle'] elif args['uuid'] == types.standard_flag_uuid: self.handles.standard_flag = args['chrhandle'] def on_characteristics_end(self, sender, args): self.service_discovery_in_progress = False print("timestamp:{} trigger_scanning:{} ref_time:{} seq_num:{} std:{}".format( self.handles.timestamp, self.handles.trigger_scanning, self.handles.ref_time, self.handles.sequence_number, self.handles.standard_flag ) ) def on_characteristic_read(self, sender, args): # print("read characteristic {}: {} ({})".format(args['atthandle'], # args['value'], types.array_to_integer(args['value']))) if args['atthandle'] == self.handle_being_read: self.handle_being_read = -1 read_value = types.array_to_integer(args['value']) if args['atthandle'] == self.handles.timestamp: self.time_data.timestamp = read_value elif args['atthandle'] == self.handles.standard_flag: self.time_data.standard_flag = read_value elif args['atthandle'] == self.handles.ref_time: self.time_data.ref_time = read_value elif args['atthandle'] == self.handles.sequence_number: self.time_data.sequence_number = read_value def on_characteristic_read_rsp(self, sender, args): print("command response {:04X}".format(args['result'])) if args['result'] == 0x0186: print("attempting a reconnect") self.connect() print('resending last command...') self.bglib.send_command( self.serial, self.bglib.ble_cmd_attclient_read_by_handle( self.handles.connection, self.handle_being_read) ) self.bglib.check_activity(self.serial, 1) def read_timestamp_data(self): self.bglib.ble_evt_attclient_attribute_value += self.on_characteristic_read self.bglib.ble_rsp_attclient_read_by_handle += self.on_characteristic_read_rsp print("timestamp:{} trigger_scanning:{} ref_time:{}".format( self.handles.timestamp, self.handles.trigger_scanning, self.handles.ref_time ) ) handles = [ self.handles.timestamp, self.handles.ref_time, self.handles.sequence_number, self.handles.standard_flag ] self.time_data = TimestampData() for handle in handles: self.handle_being_read = handle # print("reading handle {}".format(handle)) self.last_command = self.bglib.ble_cmd_attclient_read_by_handle( self.handles.connection, handle ) self.bglib.send_command( self.serial, self.last_command ) # print("waiting for response...") # Catch response self.bglib.check_activity(self.serial, 3) # print("waiting for event...") while self.handle_being_read != -1: self.bglib.check_activity(self.serial) time.sleep(0.1) print("RESULT device {} data: {}".format(self.handles.connection, self.time_data.to_string())) self.logfile.write("RESULT device {} data: {}".format(self.handles.connection, self.time_data.to_string())) self.bglib.ble_evt_attclient_attribute_value -= self.on_characteristic_read self.bglib.ble_rsp_attclient_read_by_handle -= self.on_characteristic_read_rsp return self.time_data def discover_services(self): self.service_discovery_in_progress = True self.bglib.ble_rsp_attclient_find_information += self.on_service_characteristic_rsp self.bglib.ble_evt_attclient_group_found += self.on_service_discovered self.bglib.send_command( self.serial, self.bglib.ble_cmd_attclient_read_by_group_type( self.handles.connection, 1, 65535, [0x00, 0x28] ) ) self.bglib.check_activity(self.serial, 1) while self.service_discovery_in_progress: time.sleep(0.1) self.bglib.check_activity(self.serial) if self.handles.service_start == -1: print("Didn't discover uuid for BlueSync") self.bglib.ble_evt_attclient_group_found -= self.on_service_discovered self.bglib.ble_rsp_attclient_find_information += self.on_service_characteristic_rsp self.service_discovery_in_progress = True self.bglib.ble_evt_attclient_find_information_found += self.on_characteristic_discovered self.bglib.ble_evt_attclient_procedure_completed += self.on_characteristics_end self.bglib.ble_rsp_attclient_find_information += self.on_service_characteristic_rsp self.bglib.send_command(self.serial, self.bglib.ble_cmd_attclient_find_information( self.handles.connection, self.handles.service_start, self.handles.service_end, ) ) self.bglib.check_activity(self.serial, 1) while self.service_discovery_in_progress: self.bglib.check_activity(self.serial) time.sleep(0.1) self.bglib.ble_evt_attclient_find_information_found -= self.on_characteristic_discovered self.bglib.ble_evt_attclient_procedure_completed -= self.on_characteristics_end self.bglib.ble_rsp_attclient_find_information -= self.on_service_characteristic_rsp def trigger_scanning(self): self.bglib.send_command(self.serial, self.bglib.ble_cmd_attclient_attribute_write( self.handles.connection, self.handles.trigger_scanning, [0x01] ) ) self.bglib.check_activity(self.serial, 1)
bsd-2-clause
-8,836,920,886,961,241,000
35.49697
115
0.583029
false
dreid/yunomi
yunomi/tests/test_meter.py
1
2107
from __future__ import division, absolute_import import mock from unittest2 import TestCase from yunomi.compat import xrange from yunomi.core.meter import Meter class MeterTests(TestCase): def test_a_blankmeter(self): self.meter = Meter("test") self.assertEqual(self.meter.get_count(), 0) self.assertAlmostEqual(self.meter.get_mean_rate(), 0.0) def test_meter_with_three_events(self): self.meter = Meter("test") self.meter.mark(3) self.assertEqual(self.meter.get_count(), 3) @mock.patch("yunomi.core.meter.time") def test_mean_rate_one_per_second(self, time_mock): time_mock.return_value = 0.0 self.meter = Meter("test") for i in xrange(10): self.meter.mark() time_mock.return_value += 1 self.meter._tick() self.assertAlmostEqual(self.meter.get_mean_rate(), 1) @mock.patch("yunomi.stats.ewma.time") def test_meter_EWMA_rates(self, time_mock): time_mock.return_value = 0.0 self.meter = Meter("test") self.meter.mark(3) time_mock.return_value += 5 for one, five, fifteen in [(0.6, 0.6, 0.6), (0.22072766, 0.49123845, 0.56130419), (0.08120117, 0.40219203, 0.52510399), (0.02987224, 0.32928698, 0.49123845), (0.01098938, 0.26959738, 0.45955700), (0.00404277, 0.22072766, 0.42991879), (0.00148725, 0.18071653, 0.40219203), (0.00054713, 0.14795818, 0.37625345), (0.00020128, 0.12113791, 0.35198773), (0.00007405, 0.09917933, 0.32928698)]: self.assertAlmostEqual(self.meter.get_one_minute_rate(), one) self.assertAlmostEqual(self.meter.get_five_minute_rate(), five) self.assertAlmostEqual(self.meter.get_fifteen_minute_rate(), fifteen) time_mock.return_value += 60
mit
7,697,591,247,821,383,000
38.754717
81
0.548647
false
OpenReliability/OpenReliability
veusz/embed.py
1
19768
# A module for embedding Veusz within another python program # Copyright (C) 2005 Jeremy S. Sanders # Email: Jeremy Sanders <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ############################################################################## """This module allows veusz to be embedded within other Python programs. For example: import time import numpy import veusz.embed as veusz g = veusz.Embedded('new win') g.To( g.Add('page') ) g.To( g.Add('graph') ) g.SetData('x', numpy.arange(20)) g.SetData('y', numpy.arange(20)**2) g.Add('xy') g.Zoom(0.5) time.sleep(60) g.Close() More than one embedded window can be opened at once """ from __future__ import division import atexit import sys import os import os.path import struct import socket import subprocess import time import uuid import functools import types # python3 compatibility try: import cPickle as pickle except ImportError: import pickle # check remote process has this API version API_VERSION = 2 def findOnPath(cmd): """Find a command on the system path, or None if does not exist.""" path = os.getenv('PATH', os.path.defpath) pathparts = path.split(os.path.pathsep) for dirname in pathparts: cmdtry = os.path.join(dirname, cmd) if os.path.isfile(cmdtry): return cmdtry return None class Embedded(object): """An embedded instance of Veusz. This embedded instance supports all the normal veusz functions """ remote = None def __init__(self, name='Veusz', copyof=None, hidden=False): """Initialse the embedded veusz window. name is the name of the window to show. copyof duplicates a view of the document in the Embedded instance given hidden makes a hidden window (useful for batch scripting) """ if not Embedded.remote: Embedded.startRemote() if not copyof: retval = self.sendCommand( (-1, '_NewWindow', (name,), {'hidden': hidden}) ) else: retval = self.sendCommand( (-1, '_NewWindowCopy', (name, copyof.winno), {'hidden': hidden}) ) self.winno, cmds = retval # add methods corresponding to Veusz commands for name, doc in cmds: func = functools.partial(self.runCommand, name) func.__doc__ = doc # set docstring func.__name__ = name # make name match what it calls method = types.MethodType(func, self) setattr(self, name, method) # assign to self # check API version is same try: remotever = self._apiVersion() except AttributeError: remotever = 0 if remotever != API_VERSION: raise RuntimeError("Remote Veusz instance reports version %i of" " API. This embed.py supports version %i." % (remotever, API_VERSION)) # define root object self.Root = WidgetNode(self, 'widget', '/') def StartSecondView(self, name = 'Veusz'): """Provides a second view onto the document of this window. Returns an Embedded instance """ return Embedded(name=name, copyof=self) def WaitForClose(self): """Wait for the window to close.""" # this is messy, polling for closure, but cleaner than doing # it in the remote client while not self.IsClosed(): time.sleep(0.1) @classmethod def makeSockets(cls): """Make socket(s) to communicate with remote process. Returns string to send to remote process """ if ( hasattr(socket, 'AF_UNIX') and hasattr(socket, 'socketpair') ): # convenient interface cls.sockfamily = socket.AF_UNIX sock, socket2 = socket.socketpair(cls.sockfamily, socket.SOCK_STREAM) # socket is closed on popen in Python 3.4+ without this (PEP 446) try: os.set_inheritable(socket2.fileno(), True) except AttributeError: pass sendtext = 'unix %i\n' % socket2.fileno() cls.socket2 = socket2 # prevent socket being destroyed waitaccept = False else: # otherwise mess around with internet sockets # * This is required for windows, which doesn't have AF_UNIX # * It is required where socketpair is not supported cls.sockfamily = socket.AF_INET sock = socket.socket(cls.sockfamily, socket.SOCK_STREAM) sock.bind( ('localhost', 0) ) interface, port = sock.getsockname() sock.listen(1) sendtext = 'internet %s %i\n' % (interface, port) waitaccept = True return (sock, sendtext.encode('ascii'), waitaccept) @classmethod def makeRemoteProcess(cls): """Try to find veusz process for remote program.""" # here's where to look for embed_remote.py thisdir = os.path.dirname(os.path.abspath(__file__)) # build up a list of possible command lines to start the remote veusz if sys.platform == 'win32': # windows is a special case # we need to run embed_remote.py under pythonw.exe, not python.exe # look for the python windows interpreter on path findpython = findOnPath('pythonw.exe') if not findpython: # if it wasn't on the path, use sys.prefix instead findpython = os.path.join(sys.prefix, 'pythonw.exe') # look for veusz executable on path findexe = findOnPath('veusz.exe') if not findexe: try: # add the usual place as a guess :-( findexe = os.path.join(os.environ['ProgramFiles'], 'Veusz', 'veusz.exe') except KeyError: pass # here is the list of commands to try possiblecommands = [ [findpython, os.path.join(thisdir, 'veusz_main.py')], [findexe] ] else: executable = sys.executable # try embed_remote.py in this directory, veusz in this directory # or veusz on the path in order possiblecommands = [ [executable, os.path.join(thisdir, 'veusz_main.py')], [os.path.join(thisdir, 'veusz')], [findOnPath('veusz')] ] # cheat and look for Veusz app for MacOS under the standard application # directory. I don't know how else to find it :-( if sys.platform == 'darwin': findbundle = findOnPath('Veusz.app') if findbundle: possiblecommands += [ [findbundle+'/Contents/MacOS/Veusz'] ] else: possiblecommands += [[ '/Applications/Veusz.app/Contents/MacOS/Veusz' ]] possiblecommands += [[ os.path.expanduser('~/Applications/Veusz.app/Contents/MacOS/Veusz')]] for cmd in possiblecommands: # only try to run commands that exist as error handling # does not work well when interfacing with OS (especially Windows) if ( None not in cmd and False not in [os.path.isfile(c) for c in cmd] ): try: # we don't use stdout below, but works around windows bug # http://bugs.python.org/issue1124861 cls.remote = subprocess.Popen(cmd + ['--embed-remote'], shell=False, bufsize=0, close_fds=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE) return except OSError: pass raise RuntimeError('Unable to find a veusz executable on system path') @classmethod def startRemote(cls): """Start remote process.""" cls.serv_socket, sendtext, waitaccept = cls.makeSockets() cls.makeRemoteProcess() stdin = cls.remote.stdin # send socket number over pipe stdin.write( sendtext ) # accept connection if necessary if waitaccept: cls.serv_socket, address = cls.serv_socket.accept() # Send a secret to the remote program by secure route and # check it comes back. This is to check that no program has # secretly connected on our port, which isn't really useful # for AF_UNIX sockets. secret = (str(uuid.uuid4()) + '\n').encode('ascii') stdin.write(secret) secretback = cls.readLenFromSocket(cls.serv_socket, len(secret)) if secret != secretback: raise RuntimeError("Security between client and server broken") # packet length for command bytes cls.cmdlen = struct.calcsize('<I') atexit.register(cls.exitQt) @staticmethod def readLenFromSocket(socket, length): """Read length bytes from socket.""" s = b'' while len(s) < length: s += socket.recv(length-len(s)) return s @staticmethod def writeToSocket(socket, data): count = 0 while count < len(data): count += socket.send(data[count:]) @classmethod def sendCommand(cls, cmd): """Send the command to the remote process.""" # note: protocol 2 for python2 compat outs = pickle.dumps(cmd, 2) cls.writeToSocket( cls.serv_socket, struct.pack('<I', len(outs)) ) cls.writeToSocket( cls.serv_socket, outs ) backlen = struct.unpack('<I', cls.readLenFromSocket(cls.serv_socket, cls.cmdlen))[0] rets = cls.readLenFromSocket( cls.serv_socket, backlen ) retobj = pickle.loads(rets) if isinstance(retobj, Exception): raise retobj else: return retobj def runCommand(self, cmd, *args, **args2): """Execute the given function in the Qt thread with the arguments given.""" return self.sendCommand( (self.winno, cmd, args[1:], args2) ) @classmethod def exitQt(cls): """Exit the Qt thread.""" try: cls.sendCommand( (-1, '_Quit', (), {}) ) cls.serv_socket.shutdown(socket.SHUT_RDWR) cls.serv_socket.close() except socket.error: pass cls.serv_socket, cls.from_pipe = -1, -1 ############################################################################ # Tree-based interface to Veusz widget tree below class Node(object): """Represents an element in the Veusz widget-settinggroup-setting tree.""" def __init__(self, ci, wtype, path): self._ci = ci self._type = wtype self._path = path def __eq__(self, other): """Is this the same node?""" return self._ci is other._ci and self._path == other._path def __ne__(self, other): """Is this a different node?""" return self._ci is not other._ci or self._path != other._path def __repr__(self): """Text description""" return "<%s at %s (type %s)>" % (self.__class__.__name__, repr(self._path), self._type) def fromPath(self, path): """Return a new Node for the path given.""" wtype = self._ci.NodeType(path) if wtype == 'widget': return WidgetNode(self._ci, wtype, path) elif wtype == 'setting': return SettingNode(self._ci, wtype, path) else: return SettingGroupNode(self._ci, wtype, path) @property def path(self): """Veusz full path to node""" return self._path @property def type(self): """Type of node: 'widget', 'settinggroup', or 'setting'""" return self._type def _joinPath(self, child): """Return new path of child.""" if self._path == '/': return '/' + child else: return self._path + '/' + child def __getitem__(self, key): """Return a child widget, settinggroup or setting.""" if self._type != 'setting': try: return self.fromPath(self._joinPath(key)) except ValueError: pass raise KeyError("%s does not have key or child '%s'" % ( self.__class__.__name__, key)) def __getattr__(self, attr): """Return a child widget, settinggroup or setting.""" if self._type == 'setting': pass elif attr[:2] != '__': try: return self.fromPath(self._joinPath(attr)) except ValueError: pass raise AttributeError("%s does not have attribute or child '%s'" % ( self.__class__.__name__, attr)) # boring ways to get children of nodes @property def children(self): """Generator to get children as Nodes.""" for c in self._ci.NodeChildren(self._path): yield self.fromPath(self._joinPath(c)) @property def children_widgets(self): """Generator to get child widgets as Nodes.""" for c in self._ci.NodeChildren(self._path, types='widget'): yield self.fromPath(self._joinPath(c)) @property def children_settings(self): """Generator to get child settings as Nodes.""" for c in self._ci.NodeChildren(self._path, types='setting'): yield self.fromPath(self._joinPath(c)) @property def children_settinggroups(self): """Generator to get child settingsgroups as Nodes.""" for c in self._ci.NodeChildren(self._path, types='settinggroup'): yield self.fromPath(self._joinPath(c)) @property def childnames(self): """Get names of children.""" return self._ci.NodeChildren(self._path) @property def childnames_widgets(self): """Get names of children widgets.""" return self._ci.NodeChildren(self._path, types='widget') @property def childnames_settings(self): """Get names of child settings.""" return self._ci.NodeChildren(self._path, types='setting') @property def childnames_settinggroups(self): """Get names of child setting groups""" return self._ci.NodeChildren(self._path, types='settinggroup') @property def parent(self): """Return parent of node.""" if self._path == '/': raise TypeError("Cannot get parent node of root node""") p = self._path.split('/')[:-1] if p == ['']: newpath = '/' else: newpath = '/'.join(p) return self.fromPath(newpath) @property def name(self): """Get name of node.""" if self._path == '/': return self._path else: return self._path.split('/')[-1] class SettingNode(Node): """A node which is a setting.""" def _getVal(self): """The value of a setting.""" if self._type == 'setting': return self._ci.Get(self._path) raise TypeError("Cannot get value unless is a setting""") def _setVal(self, val): if self._type == 'setting': self._ci.Set(self._path, val) else: raise TypeError("Cannot set value unless is a setting.""") val = property(_getVal, _setVal) @property def isreference(self): """Is this setting set to a reference to another setting?.""" ref = self._ci.ResolveReference(self._path) return bool(ref) def resolveReference(self): """If this is set to a reference to a setting, return a new SettingNode to the original setting. If there are a chain of references, follow them to the target. Returns None if this setting is not set to a reference. """ real = self._ci.ResolveReference(self._path) if not real: return None return self.fromPath(real) def setToReference(self, othernode): """Make this setting point to another setting, by creating a reference. References can be chained. Note that the absolute path is used to specify a reference, so moving affected widgets around will destroy the link.""" if not isinstance(othernode, SettingNode): raise ValueError("othernode is not a SettingNode") self._ci.SetToReference(self._path, othernode._path) @property def settingtype(self): """Get the type of setting, which is a string.""" return self._ci.SettingType(self._path) class SettingGroupNode(Node): """A node containing a group of settings.""" pass class WidgetNode(Node): """A node pointing to a widget.""" @property def widgettype(self): """Get Veusz type of widget.""" return self._ci.WidgetType(self.path) def WalkWidgets(self, widgettype=None): """Generator to walk widget tree and get this widget and the widgets below this WidgetNode of type given. widgettype is a Veusz widget type name or None to get all widgets.""" if widgettype is None or self._ci.WidgetType(self._path) == widgettype: yield self for child in self.children_widgets: for w in child.WalkWidgets(widgettype=widgettype): yield w def Add(self, widgettype, *args, **args_opt): """Add a widget of the type given, returning the Node instance. """ args_opt['widget'] = self._path name = self._ci.Add(widgettype, *args, **args_opt) return WidgetNode( self._ci, 'widget', self._joinPath(name) ) def Rename(self, newname): """Renames widget to name given.""" if self._path == '/': raise RuntimeError("Cannot rename root widget") self._ci.Rename(self._path, newname) self._path = '/'.join( self._path.split('/')[:-1] + [newname] ) def Action(self, action): """Applies action on widget.""" self._ci.Action(action, widget=self._path) def Remove(self): """Removes a widget and its children.""" self._ci.Remove(self._path) def Clone(self, newparent, newname=None): """Clone widget, placing at newparent. Uses newname if given. Returns new node.""" path = self._ci.CloneWidget(self._path, newparent._path, newname=newname) return WidgetNode( self._ci, 'widget', path )
gpl-2.0
-839,175,214,798,296,300
33.319444
89
0.565055
false
jzellman/webpy-crust
crust/auth.py
1
3655
from uuid import uuid4 import web def AuthorizationProcessor(session, user_field="user_id", require_login=None): """ Authorization processor useful for authorizing URL paths. Example: session = web.session.Session(app, session_store, initializer={"user_id": None}) app = web.application(urls, globals()) def require_login_for_account(path): return path.startswith("/account") app.add_processor( AuthorizationProcessor(session, require_login=require_login_for_account)) This will cause authorization for any URL starting with "/account" """ def auth_app_processor(handle): path = web.ctx.path if require_login and require_login(path): uri = web.ctx.environ.get('REQUEST_URI', path) session.back = uri user_id = session.get(user_field, None) if user_id: setattr(web.ctx, user_field, user_id) return handle() return auth_app_processor class Protector: """ Contains a set of common decorators useful for action level authorization. Example: def current_user(): return db.load_some_user() session = web.session.Session(app, session_store, initializer={"user_id": None}) protect = Protector(session, current_user) template_funs = { 'csrf_token': protect.csrf_token } render = web.template.render('templates/', globals=template_funs) class account: @protect.login_required def GET(self): pass @protect.csrf_protected def POST(self): Then in the account form template <form method="POST"> <input type="hidden" name="csrf-token" value="$csrf_token()"> </form> """ def __init__(self, session, user_loader, **kwargs): self.session = session self.user_loader = user_loader self.user_field = kwargs.get("user_field", "user_id") self.login_path = kwargs.get("login_path", "/login") def login_required(self, f): def decorated(*args, **kwargs): self._verify_session_user() return f(*args, **kwargs) return decorated def admin_required(self, f): def decorated(*args, **kwargs): self._verify_session_user(web.notfound) user = self.user_loader() if not user.is_admin: raise web.notfound() return f(*args, **kwargs) return decorated def _verify_session_user(self, redirect=None): if not self.session.get(self.user_field, None): self.session.back = web.ctx.path if redirect: raise redirect() else: raise web.seeother(self.login_path) def csrf_protected(self, f): def decorated(*args, **kwargs): inp = web.input() stored_token = self.session.pop('csrf_token', None) if ('csrf_token' not in inp or inp.csrf_token != stored_token): raise web.HTTPError( "400 Bad request", {'content-type': 'text/html'}, """Cross-site request forgery (CSRF) attempt (or stale browser form). <a href="">Back to the form</a>.""") return f(*args, **kwargs) return decorated def csrf_token(self): if 'csrf_token' not in self.session: self.session.csrf_token = uuid4().hex return self.session.csrf_token
mit
-677,115,743,829,445,000
30.508621
89
0.560055
false
minrk/sympy
sympy/polys/polyroots.py
1
18245
"""Algorithms for computing symbolic roots of polynomials. """ from sympy.core.symbol import Dummy from sympy.core.add import Add from sympy.core.mul import Mul from sympy.core import S, I, Basic from sympy.core.sympify import sympify from sympy.core.numbers import Rational, igcd from sympy.ntheory import divisors, isprime, nextprime from sympy.functions import exp, sqrt, re, im from sympy.polys.polytools import Poly, cancel, factor, gcd_list from sympy.polys.specialpolys import cyclotomic_poly from sympy.polys.polyerrors import PolynomialError, GeneratorsNeeded, DomainError from sympy.simplify import simplify from sympy.utilities import all, default_sort_key from sympy.core.compatibility import reduce import math def roots_linear(f): """Returns a list of roots of a linear polynomial.""" r = -f.nth(0)/f.nth(1) dom = f.get_domain() if not dom.is_Numerical: if dom.is_Composite: r = factor(r) else: r = simplify(r) return [r] def roots_quadratic(f): """Returns a list of roots of a quadratic polynomial.""" a, b, c = f.all_coeffs() dom = f.get_domain() def _simplify(expr): if dom.is_Composite: return factor(expr) else: return simplify(expr) if c is S.Zero: r0, r1 = S.Zero, -b/a if not dom.is_Numerical: r1 = _simplify(r1) elif b is S.Zero: r = -c/a if not dom.is_Numerical: R = sqrt(_simplify(r)) else: R = sqrt(r) r0 = R r1 = -R else: d = b**2 - 4*a*c if dom.is_Numerical: D = sqrt(d) r0 = (-b + D) / (2*a) r1 = (-b - D) / (2*a) else: D = sqrt(_simplify(d)) A = 2*a E = _simplify(-b/A) F = D/A r0 = E + F r1 = E - F return sorted([r0, r1], key=default_sort_key) def roots_cubic(f): """Returns a list of roots of a cubic polynomial.""" _, a, b, c = f.monic().all_coeffs() if c is S.Zero: x1, x2 = roots([1,a,b], multiple = True) return [x1, S.Zero, x2] p = b - a**2/3 q = c - a*b/3 + 2*a**3/27 pon3 = p/3 aon3 = a/3 if p is S.Zero: if q is S.Zero: return [-aon3]*3 else: u1 = q**Rational(1, 3) elif q is S.Zero: y1, y2 = roots([1, 0, p], multiple=True) return [tmp - aon3 for tmp in [y1, S.Zero, y2]] else: u1 = (q/2 + sqrt(q**2/4 + pon3**3))**Rational(1, 3) coeff = S.ImaginaryUnit*sqrt(3)/2 u2 = u1*(-S.Half + coeff) u3 = u1*(-S.Half - coeff) soln = [ -u1 + pon3/u1 - aon3, -u2 + pon3/u2 - aon3, -u3 + pon3/u3 - aon3 ] return soln def roots_quartic(f): r""" Returns a list of roots of a quartic polynomial. There are many references for solving quartic expressions available [1-5]. This reviewer has found that many of them require one to select from among 2 or more possible sets of solutions and that some solutions work when one is searching for real roots but don't work when searching for complex roots (though this is not always stated clearly). The following routine has been tested and found to be correct for 0, 2 or 4 complex roots. The quasisymmetric case solution [6] looks for quartics that have the form `x**4 + A*x**3 + B*x**2 + C*x + D = 0` where `(C/A)**2 = D`. Although there is a general solution, simpler results can be obtained for certain values of the coefficients. In all cases, 4 roots are returned: 1) `f = c + a*(a**2/8 - b/2) == 0` 2) `g = d - a*(a*(3*a**2/256 - b/16) + c/4) = 0` 3) if `f != 0` and `g != 0` and `p = -d + a*c/4 - b**2/12` then a) `p == 0` b) `p != 0` **Examples** >>> from sympy import Poly, symbols, I >>> from sympy.polys.polyroots import roots_quartic >>> r = roots_quartic(Poly('x**4-6*x**3+17*x**2-26*x+20')) >>> # 4 complex roots: 1+-I*sqrt(3), 2+-I >>> sorted(str(tmp.evalf(n=2)) for tmp in r) ['1.0 + 1.7*I', '1.0 - 1.7*I', '2.0 + 1.0*I', '2.0 - 1.0*I'] **References** 1. http://mathforum.org/dr.math/faq/faq.cubic.equations.html 2. http://en.wikipedia.org/wiki/Quartic_function#Summary_of_Ferrari.27s_method 3. http://planetmath.org/encyclopedia/GaloisTheoreticDerivationOfTheQuarticFormula.html 4. http://staff.bath.ac.uk/masjhd/JHD-CA.pdf 5. http://www.albmath.org/files/Math_5713.pdf 6. http://www.statemaster.com/encyclopedia/Quartic-equation """ _, a, b, c, d = f.monic().all_coeffs() if not d: return [S.Zero] + roots([1, a, b, c], multiple=True) elif (c/a)**2 == d: x, m = f.gen, c/a g = Poly(x**2 + a*x + b - 2*m, x) z1, z2 = roots_quadratic(g) h1 = Poly(x**2 - z1*x + m, x) h2 = Poly(x**2 - z2*x + m, x) r1 = roots_quadratic(h1) r2 = roots_quadratic(h2) return r1 + r2 else: a2 = a**2 e = b - 3*a2/8 f = c + a*(a2/8 - b/2) g = d - a*(a*(3*a2/256 - b/16) + c/4) aon4 = a/4 ans = [] if f is S.Zero: y1, y2 = [tmp**S.Half for tmp in roots([1, e, g], multiple = True)] return [tmp - aon4 for tmp in [-y1, -y2, y1, y2]] if g is S.Zero: y = [S.Zero] + roots([1, 0, e, f], multiple = True) return [tmp - aon4 for tmp in y] else: p = -e**2/12 - g q = -e**3/108 + e*g/3 - f**2/8 TH = Rational(1, 3) if p is S.Zero: y = -5*e/6 - q**TH else: # with p !=0 then u below is not 0 root = sqrt(q**2/4 + p**3/27) r = -q/2 + root # or -q/2 - root u = r**TH # primary root of solve(x**3-r, x) y = -5*e/6 + u - p/u/3 w = sqrt(e + 2*y) arg1 = 3*e + 2*y arg2 = 2*f/w for s in [-1, 1]: root = sqrt(-(arg1 + s*arg2)) for t in [-1, 1]: ans.append((s*w - t*root)/2 - aon4) return ans def roots_binomial(f): """Returns a list of roots of a binomial polynomial.""" n = f.degree() a, b = f.nth(n), f.nth(0) alpha = (-cancel(b/a))**Rational(1, n) if alpha.is_number: alpha = alpha.expand(complex=True) roots, I = [], S.ImaginaryUnit for k in xrange(n): zeta = exp(2*k*S.Pi*I/n).expand(complex=True) roots.append((alpha*zeta).expand(power_base=False)) return sorted(roots, key=default_sort_key) def _inv_totient_estimate(m): """ Find ``(L, U)`` such that ``L <= phi^-1(m) <= U``. **Examples** >>> from sympy.polys.polyroots import _inv_totient_estimate >>> _inv_totient_estimate(192) (192, 840) >>> _inv_totient_estimate(400) (400, 1750) """ primes = [ d + 1 for d in divisors(m) if isprime(d + 1) ] a, b = 1, 1 for p in primes: a *= p b *= p - 1 L = m U = int(math.ceil(m*(float(a)/b))) P = p = 2 primes = [] while P <= U: p = nextprime(p) primes.append(p) P *= p P //= p b = 1 for p in primes[:-1]: b *= p - 1 U = int(math.ceil(m*(float(P)/b))) return L, U def roots_cyclotomic(f, factor=False): """Compute roots of cyclotomic polynomials. """ L, U = _inv_totient_estimate(f.degree()) for n in xrange(L, U+1): g = cyclotomic_poly(n, f.gen, polys=True) if f == g: break else: # pragma: no cover raise RuntimeError("failed to find index of a cyclotomic polynomial") roots = [] if not factor: for k in xrange(1, n+1): if igcd(k, n) == 1: roots.append(exp(2*k*S.Pi*I/n).expand(complex=True)) else: g = Poly(f, extension=(-1)**Rational(1, n)) for h, _ in g.factor_list()[1]: roots.append(-h.TC()) return sorted(roots, key=default_sort_key) def roots_rational(f): """Returns a list of rational roots of a polynomial.""" domain = f.get_domain() if domain.is_QQ: _, f = f.clear_denoms() elif domain.is_ZZ: f = f.set_domain('QQ') else: return [] LC_divs = divisors(int(f.LC())) EC_divs = divisors(int(f.EC())) if not f.eval(S.Zero): zeros = [S.Zero] else: zeros = [] for p in LC_divs: for q in EC_divs: zero = Rational(p, q) if not f.eval(zero): zeros.append(zero) if not f.eval(-zero): zeros.append(-zero) return sorted(zeros, key=default_sort_key) def _integer_basis(poly): """Compute coefficient basis for a polynomial over integers. """ monoms, coeffs = zip(*poly.terms()) monoms, = zip(*monoms) coeffs = map(abs, coeffs) if coeffs[0] < coeffs[-1]: coeffs = list(reversed(coeffs)) else: return None monoms = monoms[:-1] coeffs = coeffs[:-1] divs = reversed(divisors(gcd_list(coeffs))[1:]) try: div = divs.next() except StopIteration: return None while True: for monom, coeff in zip(monoms, coeffs): if coeff % div**monom != 0: try: div = divs.next() except StopIteration: return None else: break else: return div def preprocess_roots(poly): """Try to get rid of symbolic coefficients from ``poly``. """ coeff = S.One try: _, poly = poly.clear_denoms(convert=True) except DomainError: return coeff, poly poly = poly.primitive()[1] poly = poly.retract() if poly.get_domain().is_Poly and all(c.is_monomial for c in poly.rep.coeffs()): poly = poly.inject() strips = zip(*poly.monoms()) gens = list(poly.gens[1:]) base, strips = strips[0], strips[1:] for gen, strip in zip(list(gens), strips): reverse = False if strip[0] < strip[-1]: strip = reversed(strip) reverse = True ratio = None for a, b in zip(base, strip): if not a and not b: continue elif not a or not b: break elif b % a != 0: break else: _ratio = b // a if ratio is None: ratio = _ratio elif ratio != _ratio: break else: if reverse: ratio = -ratio poly = poly.eval(gen, 1) coeff *= gen**(-ratio) gens.remove(gen) if gens: poly = poly.eject(*gens) if poly.is_univariate and poly.get_domain().is_ZZ: basis = _integer_basis(poly) if basis is not None: n = poly.degree() def func(k, coeff): return coeff//basis**(n-k[0]) poly = poly.termwise(func) coeff *= basis return coeff, poly def roots(f, *gens, **flags): """ Computes symbolic roots of a univariate polynomial. Given a univariate polynomial f with symbolic coefficients (or a list of the polynomial's coefficients), returns a dictionary with its roots and their multiplicities. Only roots expressible via radicals will be returned. To get a complete set of roots use RootOf class or numerical methods instead. By default cubic and quartic formulas are used in the algorithm. To disable them because of unreadable output set `cubics=False` or `quartics=False` respectively. To get roots from a specific domain set the `filter` flag with one of the following specifiers: Z, Q, R, I, C. By default all roots are returned (this is equivalent to setting `filter='C'`). By default a dictionary is returned giving a compact result in case of multiple roots. However to get a tuple containing all those roots set the `multiple` flag to True. **Examples** >>> from sympy import Poly, roots >>> from sympy.abc import x, y >>> roots(x**2 - 1, x) {-1: 1, 1: 1} >>> p = Poly(x**2-1, x) >>> roots(p) {-1: 1, 1: 1} >>> p = Poly(x**2-y, x, y) >>> roots(Poly(p, x)) {-y**(1/2): 1, y**(1/2): 1} >>> roots(x**2 - y, x) {-y**(1/2): 1, y**(1/2): 1} >>> roots([1, 0, -1]) {-1: 1, 1: 1} """ flags = dict(flags) auto = flags.pop('auto', True) cubics = flags.pop('cubics', True) quartics = flags.pop('quartics', True) multiple = flags.pop('multiple', False) filter = flags.pop('filter', None) predicate = flags.pop('predicate', None) if isinstance(f, list): if gens: raise ValueError('redundant generators given') x = Dummy('x') poly, i = {}, len(f)-1 for coeff in f: poly[i], i = sympify(coeff), i-1 f = Poly(poly, x, field=True) else: try: f = Poly(f, *gens, **flags) except GeneratorsNeeded: if multiple: return [] else: return {} if f.is_multivariate: raise PolynomialError('multivariate polynomials are not supported') def _update_dict(result, root, k): if root in result: result[root] += k else: result[root] = k def _try_decompose(f): """Find roots using functional decomposition. """ factors, roots = f.decompose(), [] for root in _try_heuristics(factors[0]): roots.append(root) for factor in factors[1:]: previous, roots = list(roots), [] for root in previous: g = factor - Poly(root, f.gen) for root in _try_heuristics(g): roots.append(root) return roots def _try_heuristics(f): """Find roots using formulas and some tricks. """ if f.is_ground: return [] if f.is_monomial: return [S(0)]*f.degree() if f.length() == 2: if f.degree() == 1: return map(cancel, roots_linear(f)) else: return roots_binomial(f) result = [] for i in [-1, 1]: if not f.eval(i): f = f.quo(Poly(f.gen - i, f.gen)) result.append(i) break n = f.degree() if n == 1: result += map(cancel, roots_linear(f)) elif n == 2: result += map(cancel, roots_quadratic(f)) elif f.is_cyclotomic: result += roots_cyclotomic(f) elif n == 3 and cubics: result += roots_cubic(f) elif n == 4 and quartics: result += roots_quartic(f) return result (k,), f = f.terms_gcd() if not k: zeros = {} else: zeros = {S(0) : k} coeff, f = preprocess_roots(f) if auto and f.get_domain().has_Ring: f = f.to_field() result = {} if not f.is_ground: if not f.get_domain().is_Exact: for r in f.nroots(): _update_dict(result, r, 1) elif f.degree() == 1: result[roots_linear(f)[0]] = 1 elif f.degree() == 2: for r in roots_quadratic(f): _update_dict(result, r, 1) elif f.length() == 2: for r in roots_binomial(f): _update_dict(result, r, 1) else: _, factors = Poly(f.as_expr()).factor_list() if len(factors) == 1 and factors[0][1] == 1: for root in _try_decompose(f): _update_dict(result, root, 1) else: for factor, k in factors: for r in _try_heuristics(Poly(factor, f.gen, field=True)): _update_dict(result, r, k) if coeff is not S.One: _result, result, = result, {} for root, k in _result.iteritems(): result[coeff*root] = k result.update(zeros) if filter not in [None, 'C']: handlers = { 'Z' : lambda r: r.is_Integer, 'Q' : lambda r: r.is_Rational, 'R' : lambda r: r.is_real, 'I' : lambda r: r.is_imaginary, } try: query = handlers[filter] except KeyError: raise ValueError("Invalid filter: %s" % filter) for zero in dict(result).iterkeys(): if not query(zero): del result[zero] if predicate is not None: for zero in dict(result).iterkeys(): if not predicate(zero): del result[zero] if not multiple: return result else: zeros = [] for zero, k in result.iteritems(): zeros.extend([zero]*k) return sorted(zeros, key=default_sort_key) def root_factors(f, *gens, **args): """ Returns all factors of a univariate polynomial. **Examples** >>> from sympy.abc import x, y >>> from sympy.polys.polyroots import root_factors >>> root_factors(x**2-y, x) [x - y**(1/2), x + y**(1/2)] """ args = dict(args) filter = args.pop('filter', None) F = Poly(f, *gens, **args) if not F.is_Poly: return [f] if F.is_multivariate: raise ValueError('multivariate polynomials not supported') x = F.gens[0] zeros = roots(F, filter=filter) if not zeros: factors = [F] else: factors, N = [], 0 for r, n in zeros.iteritems(): factors, N = factors + [Poly(x-r, x)]*n, N + n if N < F.degree(): G = reduce(lambda p,q: p*q, factors) factors.append(F.quo(G)) if not isinstance(f, Poly): return [ f.as_expr() for f in factors ] else: return factors
bsd-3-clause
9,109,196,948,957,987,000
25.064286
91
0.509455
false
JoshBradshaw/Arterial-BP-MRI-Triggering-Unit
monitoring_tool/realtimePlot.py
1
8458
from __future__ import division import os import ui_trigger import sys import numpy from PyQt4 import QtCore, QtGui, Qt import PyQt4.Qwt5 as Qwt import serial import serial.tools.list_ports import logging import logging.handlers from datetime import datetime from pprint import pprint TRIGGER_PULSE_CODE = 100000 SIXTEEN_BIT_TO_COUNTS = 19859 # 2^16 counts / 5 V = 13107.2 counts / volt SERIAL_BAUDRATE = 115200 log_dir = "logs" # larger x-axis -> slower progression of the line accross the plot speeds = { 'Slowest': 40, 'Slow': 20, 'Medium': 10, 'Fast': 4 } ## setup gui app = QtGui.QApplication(sys.argv) ### SET-UP WINDOWS win_plot = ui_trigger.QtGui.QMainWindow() gui = ui_trigger.Ui_win_trigger() gui.setupUi(win_plot) gui.bpPlot.setAxisScale(0, 0, 5, 5) gui.bpPlot.setAxisScale(1, 0, 4, 4) gui.bpPlot.setAxisTitle(0, "BP Signal (V)") # times the plot refresh gui.timer = QtCore.QTimer() # line on blood pressure graph bp_curve=Qwt.QwtPlotCurve() bp_curve.attach(gui.bpPlot) bp_curve.setPaintAttribute(Qwt.QwtPlotCurve.PaintFiltered, False) bp_curve.setPaintAttribute(Qwt.QwtPlotCurve.ClipPolygons, True) bp_curve.setPen(Qt.QPen(Qt.Qt.green)) # line on triggering graph trigger_curve = Qwt.QwtPlotCurve() trigger_curve.attach(gui.bpPlot) trigger_curve.setPaintAttribute(Qwt.QwtPlotCurve.PaintFiltered, False) trigger_curve.setPaintAttribute(Qwt.QwtPlotCurve.ClipPolygons, True) sympen = Qt.QPen(Qt.Qt.red) sympen.setWidth(5) trigger_curve.setStyle(-1) # make the heart beats appear as little red triangles trigger_curve.setSymbol(Qwt.QwtSymbol(Qwt.QwtSymbol.VLine, Qt.QBrush(), sympen, Qt.QSize(3, 3))) trigger_curve.setPen(Qt.QPen(Qt.Qt.red)) # if its a windows 7 machine clean up the blinkyness by running anti aliasing # if its a windows xp or mac, do not run anti aliasing because it will lag # if its a osx or linux box this call will not work anti_alias = False try: major_verion = sys.getwindowsversion().major if major_verion >= 6: anti_alias = True except: pass if anti_alias: trigger_curve.setRenderHint(Qwt.QwtPlotItem.RenderAntialiased) # prettier, but laggy on slow computers bp_curve.setRenderHint(Qwt.QwtPlotItem.RenderAntialiased) # prettier, but laggy on slow computers if not os.path.exists(log_dir): os.makedirs(log_dir) log_filename = os.path.join(log_dir, 'bp_triggering.log') # Set up a specific logger with our desired output level logger = logging.getLogger('triggeringLogger') logger.setLevel(logging.INFO) handler = logging.handlers.RotatingFileHandler(log_filename, backupCount=200) logger.addHandler(handler) def open_log_directory(): os.startfile(log_dir) # only works on windows, but that's of little consequence class Teensy(object): """Teensy is the microcontroller that drives the BP triggering unit. While it runs it it sends its current sensor levels out over serial. This serial communication is strictly one-way. The teensy has two codes it sends continuously: Trigger sent -> 100000 Sensor Value -> [0, 65536] value read by the 16 bit ADC every 16 ms """ def __init__(self): self.port_options = {} # populate the dropdown menu of serial ports on the form for port, description, details in serial.tools.list_ports.comports(): port = str(port) self.port_options[description] = port gui.serialPortSelector.addItem(description) self.ser = serial.Serial() def start(self): """begin Serial communications with the teensy""" if self.ser.isOpen(): self.ser.close() # must be the same baudrate as the one used in Serial.begin in the microcontroller program self.ser.baudrate = SERIAL_BAUDRATE self.ser.timeout = 1 # 1 second just in case readline() ever hangs self.ser.port = self.get_serial_port() self.ser.open() def stop(self): """end Serial communications with the teensy, freeing the serial port for other uses like reprogramming""" if self.ser.isOpen(): self.ser.close() def get_sensor_val(self): """read one line of data over serial and parse it""" try: serial_line = self.ser.readline() sampleval, trigger = serial_line.split() sampleval = int(sampleval) trigger = int(trigger) except ValueError: print "Failed to parse input, ensure that the serial port selector is set to Teensy USB Serial" return None return sampleval, trigger def get_serial_port(self): """get the port which is currently selected in the form""" return self.port_options[str(gui.serialPortSelector.currentText())] class plotData(object): """Updates the plot animations with the most recent sensor data, and rescales the axis as required. """ def __init__(self): self.select_speed() self.logging = gui.logDataButton.isChecked() self.teensy = Teensy() self.redraw_period = 2 # redraw once every 5 samples at 250Hz for 50fps redraws self.redraw_count = 0 self.trigger_pulse_width = 4 self.trigger_count = 0 def update_curves(self): """shifts the lines on the chart animation by one points, and adds the new point to the rightmost edge""" sensor_values = self.teensy.get_sensor_val() if sensor_values is None: return sampleval, trigger = sensor_values # trigger pulses are not marked immediately, instead they are marked when the next sensor # value is recieved for the sake of staying in perfect synch # shift the curves one point self.ys=numpy.roll(self.ys, -1) self.ts=numpy.roll(self.ts, -1) # 16 bit ADC value range 0-65536, want to reduce to 0-5V for human readability self.ys[self.last_point] = sampleval / SIXTEEN_BIT_TO_COUNTS # mark trigger pulse if trigger and self.trigger_count > self.trigger_pulse_width: # make the trigger marker appear just above the BP plot line self.ts[self.last_point] = (sampleval / SIXTEEN_BIT_TO_COUNTS) + 0.003 self.trigger_count = 0 else: self.ts[self.last_point] = -1 # -1 will place these points outside the plot's viewable area self.trigger_count += 1 if self.redraw_count < self.redraw_period: self.redraw_count += 1 else: self.redraw_plot() self.redraw_count = 0 # log the sample logger.info("{}: {} {}".format(datetime.now().strftime('%Y-%m-%d-%H-%M-%f'), self.ys[self.last_point], self.ts[self.last_point])) def redraw_plot(self): bp_curve.setData(self.xs, self.ys) gui.bpPlot.replot() trigger_curve.setData(self.xs, self.ts+0.3) def select_speed(self): """get the speed selected on the dropdown and setup the axis scales accordingly""" self.speed = speeds[str(gui.speedSelect.currentText())] self.xs = numpy.arange(0, self.speed, 0.012) self.numPoints = len(self.xs) self.last_point = self.numPoints-1 self.ys = numpy.zeros(self.numPoints) self.ts = numpy.zeros(self.numPoints) self.ts.fill(-1) self.trigger = False def start_stop(self): """start and stop the animation and logging (this has no effect on the actual triggering unit)""" if gui.startBtn.isChecked(): handler.doRollover() self.teensy.start() gui.timer.start(0.25) # poll the serial port every 1ms, 1 byte is expected every 4ms win_plot.connect(gui.timer, QtCore.SIGNAL('timeout()'), self.update_curves) else: gui.timer.stop() self.teensy.stop() if __name__ == '__main__': plots = plotData() # connect the buttons and dropdowns to there handler functions win_plot.connect(gui.speedSelect, QtCore.SIGNAL('activated(QString)'), plots.select_speed) win_plot.connect(gui.startBtn, QtCore.SIGNAL('released()'), plots.start_stop) win_plot.connect(gui.logDataButton, QtCore.SIGNAL('released()'), open_log_directory) ### DISPLAY WINDOWS win_plot.show() #WAIT UNTIL QT RETURNS EXIT CODE sys.exit(app.exec_())
mit
-1,731,087,471,367,237,400
35.304721
137
0.657839
false
lovetox/gajim
src/chat_control.py
1
88922
# -*- coding:utf-8 -*- ## src/chat_control.py ## ## Copyright (C) 2006 Dimitur Kirov <dkirov AT gmail.com> ## Copyright (C) 2006-2014 Yann Leboulanger <asterix AT lagaule.org> ## Copyright (C) 2006-2008 Jean-Marie Traissard <jim AT lapin.org> ## Nikos Kouremenos <kourem AT gmail.com> ## Travis Shirk <travis AT pobox.com> ## Copyright (C) 2007 Lukas Petrovicky <lukas AT petrovicky.net> ## Julien Pivotto <roidelapluie AT gmail.com> ## Copyright (C) 2007-2008 Brendan Taylor <whateley AT gmail.com> ## Stephan Erb <steve-e AT h3c.de> ## Copyright (C) 2008 Jonathan Schleifer <js-gajim AT webkeks.org> ## ## This file is part of Gajim. ## ## Gajim is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published ## by the Free Software Foundation; version 3 only. ## ## Gajim is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Gajim. If not, see <http://www.gnu.org/licenses/>. ## import os import time from gi.repository import Gtk from gi.repository import Gdk from gi.repository import GdkPixbuf from gi.repository import Pango from gi.repository import GLib import gtkgui_helpers import gui_menu_builder import message_control import dialogs from common import gajim from common import helpers from common import exceptions from common import ged from common import i18n from common.stanza_session import EncryptedStanzaSession, ArchivingStanzaSession from common.contacts import GC_Contact from common.logger import constants from nbxmpp.protocol import NS_XHTML, NS_XHTML_IM, NS_FILE, NS_MUC from nbxmpp.protocol import NS_RECEIPTS, NS_ESESSION from nbxmpp.protocol import NS_JINGLE_RTP_AUDIO, NS_JINGLE_RTP_VIDEO from nbxmpp.protocol import NS_JINGLE_ICE_UDP, NS_JINGLE_FILE_TRANSFER from nbxmpp.protocol import NS_CHATSTATES from common.connection_handlers_events import MessageOutgoingEvent from common.exceptions import GajimGeneralException from command_system.implementation.hosts import ChatCommands try: import gtkspell HAS_GTK_SPELL = True except (ImportError, ValueError): HAS_GTK_SPELL = False from chat_control_base import ChatControlBase ################################################################################ class ChatControl(ChatControlBase): """ A control for standard 1-1 chat """ ( JINGLE_STATE_NULL, JINGLE_STATE_CONNECTING, JINGLE_STATE_CONNECTION_RECEIVED, JINGLE_STATE_CONNECTED, JINGLE_STATE_ERROR ) = range(5) TYPE_ID = message_control.TYPE_CHAT old_msg_kind = None # last kind of the printed message # Set a command host to bound to. Every command given through a chat will be # processed with this command host. COMMAND_HOST = ChatCommands def __init__(self, parent_win, contact, acct, session, resource=None): ChatControlBase.__init__(self, self.TYPE_ID, parent_win, 'chat_control', contact, acct, resource) self.gpg_is_active = False self.last_recv_message_id = None self.last_recv_message_marks = None self.last_message_timestamp = None # for muc use: # widget = self.xml.get_object('muc_window_actions_button') self.actions_button = self.xml.get_object('message_window_actions_button') id_ = self.actions_button.connect('clicked', self.on_actions_button_clicked) self.handlers[id_] = self.actions_button self._formattings_button = self.xml.get_object('formattings_button') self._add_to_roster_button = self.xml.get_object( 'add_to_roster_button') id_ = self._add_to_roster_button.connect('clicked', self._on_add_to_roster_menuitem_activate) self.handlers[id_] = self._add_to_roster_button self._audio_button = self.xml.get_object('audio_togglebutton') id_ = self._audio_button.connect('toggled', self.on_audio_button_toggled) self.handlers[id_] = self._audio_button # add a special img gtkgui_helpers.add_image_to_button(self._audio_button, 'gajim-mic_inactive') self._video_button = self.xml.get_object('video_togglebutton') id_ = self._video_button.connect('toggled', self.on_video_button_toggled) self.handlers[id_] = self._video_button # add a special img gtkgui_helpers.add_image_to_button(self._video_button, 'gajim-cam_inactive') self._send_file_button = self.xml.get_object('send_file_button') # add a special img for send file button pixbuf = gtkgui_helpers.get_icon_pixmap('document-send', quiet=True) img = Gtk.Image.new_from_pixbuf(pixbuf) self._send_file_button.set_image(img) id_ = self._send_file_button.connect('clicked', self._on_send_file_menuitem_activate) self.handlers[id_] = self._send_file_button self._convert_to_gc_button = self.xml.get_object( 'convert_to_gc_button') id_ = self._convert_to_gc_button.connect('clicked', self._on_convert_to_gc_menuitem_activate) self.handlers[id_] = self._convert_to_gc_button self._contact_information_button = self.xml.get_object( 'contact_information_button') id_ = self._contact_information_button.connect('clicked', self._on_contact_information_menuitem_activate) self.handlers[id_] = self._contact_information_button compact_view = gajim.config.get('compact_view') self.chat_buttons_set_visible(compact_view) self.widget_set_visible(self.xml.get_object('banner_eventbox'), gajim.config.get('hide_chat_banner')) self.authentication_button = self.xml.get_object( 'authentication_button') id_ = self.authentication_button.connect('clicked', self._on_authentication_button_clicked) self.handlers[id_] = self.authentication_button # Add lock image to show chat encryption self.lock_image = self.xml.get_object('lock_image') # Remove padding from authentication button or else it will # be higher than the message box style_provider = Gtk.CssProvider() css = 'GtkButton { padding-top: 0px; padding-bottom: 0px}' style_provider.load_from_data(css.encode()) context = self.authentication_button.get_style_context() context.add_provider(style_provider, Gtk.STYLE_PROVIDER_PRIORITY_USER) # Convert to GC icon img = self.xml.get_object('convert_to_gc_button_image') img.set_from_pixbuf(gtkgui_helpers.load_icon( 'muc_active').get_pixbuf()) self._audio_banner_image = self.xml.get_object('audio_banner_image') self._video_banner_image = self.xml.get_object('video_banner_image') self.audio_sid = None self.audio_state = self.JINGLE_STATE_NULL self.audio_available = False self.video_sid = None self.video_state = self.JINGLE_STATE_NULL self.video_available = False self.update_toolbar() self._pep_images = {} self._pep_images['mood'] = self.xml.get_object('mood_image') self._pep_images['activity'] = self.xml.get_object('activity_image') self._pep_images['tune'] = self.xml.get_object('tune_image') self._pep_images['location'] = self.xml.get_object('location_image') self.update_all_pep_types() # keep timeout id and window obj for possible big avatar # it is on enter-notify and leave-notify so no need to be # per jid self.show_bigger_avatar_timeout_id = None self.bigger_avatar_window = None self.show_avatar() # chatstate timers and state self.reset_kbd_mouse_timeout_vars() self._schedule_activity_timers() # Hook up signals id_ = self.parent_win.window.connect('motion-notify-event', self._on_window_motion_notify) self.handlers[id_] = self.parent_win.window message_tv_buffer = self.msg_textview.get_buffer() id_ = message_tv_buffer.connect('changed', self._on_message_tv_buffer_changed) self.handlers[id_] = message_tv_buffer widget = self.xml.get_object('avatar_eventbox') widget.set_property('height-request', gajim.config.get( 'chat_avatar_height')) id_ = widget.connect('enter-notify-event', self.on_avatar_eventbox_enter_notify_event) self.handlers[id_] = widget id_ = widget.connect('leave-notify-event', self.on_avatar_eventbox_leave_notify_event) self.handlers[id_] = widget id_ = widget.connect('button-press-event', self.on_avatar_eventbox_button_press_event) self.handlers[id_] = widget widget = self.xml.get_object('location_eventbox') id_ = widget.connect('button-release-event', self.on_location_eventbox_button_release_event) self.handlers[id_] = widget id_ = widget.connect('enter-notify-event', self.on_location_eventbox_enter_notify_event) self.handlers[id_] = widget id_ = widget.connect('leave-notify-event', self.on_location_eventbox_leave_notify_event) self.handlers[id_] = widget for key in ('1', '2', '3', '4', '5', '6', '7', '8', '9', '*', '0', '#'): widget = self.xml.get_object(key + '_button') id_ = widget.connect('pressed', self.on_num_button_pressed, key) self.handlers[id_] = widget id_ = widget.connect('released', self.on_num_button_released) self.handlers[id_] = widget self.dtmf_window = self.xml.get_object('dtmf_window') self.dtmf_window.get_child().set_direction(Gtk.TextDirection.LTR) id_ = self.dtmf_window.connect('focus-out-event', self.on_dtmf_window_focus_out_event) self.handlers[id_] = self.dtmf_window widget = self.xml.get_object('dtmf_button') id_ = widget.connect('clicked', self.on_dtmf_button_clicked) self.handlers[id_] = widget widget = self.xml.get_object('mic_hscale') id_ = widget.connect('value_changed', self.on_mic_hscale_value_changed) self.handlers[id_] = widget widget = self.xml.get_object('sound_hscale') id_ = widget.connect('value_changed', self.on_sound_hscale_value_changed) self.handlers[id_] = widget self.info_bar = Gtk.InfoBar() content_area = self.info_bar.get_content_area() self.info_bar_label = Gtk.Label() self.info_bar_label.set_use_markup(True) self.info_bar_label.set_halign(Gtk.Align.START) self.info_bar_label.set_valign(Gtk.Align.START) content_area.add(self.info_bar_label) self.info_bar.set_no_show_all(True) widget = self.xml.get_object('vbox2') widget.pack_start(self.info_bar, False, True, 5) widget.reorder_child(self.info_bar, 1) # List of waiting infobar messages self.info_bar_queue = [] self.subscribe_events() if not session: # Don't use previous session if we want to a specific resource # and it's not the same if not resource: resource = contact.resource session = gajim.connections[self.account].find_controlless_session( self.contact.jid, resource) self.setup_seclabel(self.xml.get_object('label_selector')) if session: session.control = self self.session = session if session.enable_encryption: self.print_esession_details() # Enable encryption if needed self.no_autonegotiation = False e2e_is_active = self.session and self.session.enable_encryption gpg_pref = gajim.config.get_per('contacts', contact.jid, 'gpg_enabled') # try GPG first if not e2e_is_active and gpg_pref and \ gajim.config.get_per('accounts', self.account, 'keyid') and \ gajim.connections[self.account].USE_GPG: self.gpg_is_active = True gajim.encrypted_chats[self.account].append(contact.jid) msg = _('OpenPGP encryption enabled') ChatControlBase.print_conversation_line(self, msg, 'status', '', None) if self.session: self.session.loggable = gajim.config.get_per('accounts', self.account, 'log_encrypted_sessions') # GPG is always authenticated as we use GPG's WoT self._show_lock_image(self.gpg_is_active, 'OpenPGP', self.gpg_is_active, self.session and self.session.is_loggable(), True) self.update_ui() # restore previous conversation self.restore_conversation() self.msg_textview.grab_focus() gajim.ged.register_event_handler('pep-received', ged.GUI1, self._nec_pep_received) gajim.ged.register_event_handler('vcard-received', ged.GUI1, self._nec_vcard_received) gajim.ged.register_event_handler('failed-decrypt', ged.GUI1, self._nec_failed_decrypt) gajim.ged.register_event_handler('chatstate-received', ged.GUI1, self._nec_chatstate_received) gajim.ged.register_event_handler('caps-received', ged.GUI1, self._nec_caps_received) # PluginSystem: adding GUI extension point for this ChatControl # instance object gajim.plugin_manager.gui_extension_point('chat_control', self) def subscribe_events(self): """ Register listeners to the events class """ gajim.events.event_added_subscribe(self.on_event_added) gajim.events.event_removed_subscribe(self.on_event_removed) def unsubscribe_events(self): """ Unregister listeners to the events class """ gajim.events.event_added_unsubscribe(self.on_event_added) gajim.events.event_removed_unsubscribe(self.on_event_removed) def _update_toolbar(self): if (gajim.connections[self.account].connected > 1 and not \ self.TYPE_ID == 'pm') or (self.contact.show != 'offline' and \ self.TYPE_ID == 'pm'): emoticons_button = self.xml.get_object('emoticons_button') emoticons_button.set_sensitive(True) send_button = self.xml.get_object('send_button') send_button.set_sensitive(True) # Formatting if self.contact.supports(NS_XHTML_IM) and not self.gpg_is_active: self._formattings_button.set_sensitive(True) self._formattings_button.set_tooltip_text(_( 'Show a list of formattings')) else: self._formattings_button.set_sensitive(False) if self.contact.supports(NS_XHTML_IM): self._formattings_button.set_tooltip_text(_('Formattings are ' 'not available when GPG is active')) else: self._formattings_button.set_tooltip_text(_('This contact does ' 'not support HTML')) # Add to roster if not isinstance(self.contact, GC_Contact) \ and _('Not in Roster') in self.contact.groups and \ gajim.connections[self.account].roster_supported: self._add_to_roster_button.show() else: self._add_to_roster_button.hide() # Jingle detection if self.contact.supports(NS_JINGLE_ICE_UDP) and \ gajim.HAVE_FARSTREAM and self.contact.resource: self.audio_available = self.contact.supports(NS_JINGLE_RTP_AUDIO) self.video_available = self.contact.supports(NS_JINGLE_RTP_VIDEO) else: if self.video_available or self.audio_available: self.stop_jingle() self.video_available = False self.audio_available = False # Audio buttons self._audio_button.set_sensitive(self.audio_available) # Video buttons self._video_button.set_sensitive(self.video_available) # change tooltip text for audio and video buttons if farstream is # not installed audio_tooltip_text = _('Toggle audio session') + '\n' video_tooltip_text = _('Toggle video session') + '\n' if not gajim.HAVE_FARSTREAM: ext_text = _('Feature not available, see Help->Features') self._audio_button.set_tooltip_text(audio_tooltip_text + ext_text) self._video_button.set_tooltip_text(video_tooltip_text + ext_text) elif not self.audio_available : ext_text =_('Feature not supported by remote client') self._audio_button.set_tooltip_text(audio_tooltip_text + ext_text) self._video_button.set_tooltip_text(video_tooltip_text + ext_text) else: self._audio_button.set_tooltip_text(audio_tooltip_text[:-1]) self._video_button.set_tooltip_text(video_tooltip_text[:-1]) # Send file if ((self.contact.supports(NS_FILE) or \ self.contact.supports(NS_JINGLE_FILE_TRANSFER)) and \ (self.type_id == 'chat' or self.gc_contact.resource)) and \ self.contact.show != 'offline': self._send_file_button.set_sensitive(True) self._send_file_button.set_tooltip_text(_('Send files')) else: self._send_file_button.set_sensitive(False) if not (self.contact.supports(NS_FILE) or self.contact.supports( NS_JINGLE_FILE_TRANSFER)): self._send_file_button.set_tooltip_text(_( "This contact does not support file transfer.")) else: self._send_file_button.set_tooltip_text( _("You need to know the real JID of the contact to send " "him or her a file.")) # Convert to GC if gajim.config.get_per('accounts', self.account, 'is_zeroconf'): self._convert_to_gc_button.set_no_show_all(True) self._convert_to_gc_button.hide() else: if self.contact.supports(NS_MUC): self._convert_to_gc_button.set_sensitive(True) else: self._convert_to_gc_button.set_sensitive(False) # Information if gajim.account_is_disconnected(self.account): self._contact_information_button.set_sensitive(False) else: self._contact_information_button.set_sensitive(True) def update_all_pep_types(self): for pep_type in self._pep_images: self.update_pep(pep_type) def update_pep(self, pep_type): if isinstance(self.contact, GC_Contact): return if pep_type not in self._pep_images: return pep = self.contact.pep img = self._pep_images[pep_type] if pep_type in pep: img.set_from_pixbuf(gtkgui_helpers.get_pep_as_pixbuf(pep[pep_type])) img.set_tooltip_markup(pep[pep_type].asMarkupText()) img.show() else: img.hide() def _nec_pep_received(self, obj): if obj.conn.name != self.account: return if obj.jid != self.contact.jid: return if obj.pep_type == 'nickname': self.update_ui() self.parent_win.redraw_tab(self) self.parent_win.show_title() else: self.update_pep(obj.pep_type) def _update_jingle(self, jingle_type): if jingle_type not in ('audio', 'video'): return banner_image = getattr(self, '_' + jingle_type + '_banner_image') state = getattr(self, jingle_type + '_state') if state == self.JINGLE_STATE_NULL: banner_image.hide() else: banner_image.show() if state == self.JINGLE_STATE_CONNECTING: banner_image.set_from_stock( Gtk.STOCK_CONVERT, 1) elif state == self.JINGLE_STATE_CONNECTION_RECEIVED: banner_image.set_from_stock( Gtk.STOCK_NETWORK, 1) elif state == self.JINGLE_STATE_CONNECTED: banner_image.set_from_stock( Gtk.STOCK_CONNECT, 1) elif state == self.JINGLE_STATE_ERROR: banner_image.set_from_stock( Gtk.STOCK_DIALOG_WARNING, 1) self.update_toolbar() def update_audio(self): self._update_jingle('audio') hbox = self.xml.get_object('audio_buttons_hbox') if self.audio_state == self.JINGLE_STATE_CONNECTED: # Set volume from config input_vol = gajim.config.get('audio_input_volume') output_vol = gajim.config.get('audio_output_volume') input_vol = max(min(input_vol, 100), 0) output_vol = max(min(output_vol, 100), 0) self.xml.get_object('mic_hscale').set_value(input_vol) self.xml.get_object('sound_hscale').set_value(output_vol) # Show vbox hbox.set_no_show_all(False) hbox.show_all() elif not self.audio_sid: hbox.set_no_show_all(True) hbox.hide() def update_video(self): self._update_jingle('video') def change_resource(self, resource): old_full_jid = self.get_full_jid() self.resource = resource new_full_jid = self.get_full_jid() # update gajim.last_message_time if old_full_jid in gajim.last_message_time[self.account]: gajim.last_message_time[self.account][new_full_jid] = \ gajim.last_message_time[self.account][old_full_jid] # update events gajim.events.change_jid(self.account, old_full_jid, new_full_jid) # update MessageWindow._controls self.parent_win.change_jid(self.account, old_full_jid, new_full_jid) def stop_jingle(self, sid=None, reason=None): if self.audio_sid and sid in (self.audio_sid, None): self.close_jingle_content('audio') if self.video_sid and sid in (self.video_sid, None): self.close_jingle_content('video') def _set_jingle_state(self, jingle_type, state, sid=None, reason=None): if jingle_type not in ('audio', 'video'): return if state in ('connecting', 'connected', 'stop', 'error') and reason: str = _('%(type)s state : %(state)s, reason: %(reason)s') % { 'type': jingle_type.capitalize(), 'state': state, 'reason': reason} self.print_conversation(str, 'info') states = {'connecting': self.JINGLE_STATE_CONNECTING, 'connection_received': self.JINGLE_STATE_CONNECTION_RECEIVED, 'connected': self.JINGLE_STATE_CONNECTED, 'stop': self.JINGLE_STATE_NULL, 'error': self.JINGLE_STATE_ERROR} jingle_state = states[state] if getattr(self, jingle_type + '_state') == jingle_state or state == 'error': return if state == 'stop' and getattr(self, jingle_type + '_sid') not in (None, sid): return setattr(self, jingle_type + '_state', jingle_state) if jingle_state == self.JINGLE_STATE_NULL: setattr(self, jingle_type + '_sid', None) if state in ('connection_received', 'connecting'): setattr(self, jingle_type + '_sid', sid) getattr(self, '_' + jingle_type + '_button').set_active(jingle_state != self.JINGLE_STATE_NULL) getattr(self, 'update_' + jingle_type)() def set_audio_state(self, state, sid=None, reason=None): self._set_jingle_state('audio', state, sid=sid, reason=reason) def set_video_state(self, state, sid=None, reason=None): self._set_jingle_state('video', state, sid=sid, reason=reason) def _get_audio_content(self): session = gajim.connections[self.account].get_jingle_session( self.contact.get_full_jid(), self.audio_sid) return session.get_content('audio') def on_num_button_pressed(self, widget, num): self._get_audio_content()._start_dtmf(num) def on_num_button_released(self, released): self._get_audio_content()._stop_dtmf() def on_dtmf_button_clicked(self, widget): self.dtmf_window.show_all() def on_dtmf_window_focus_out_event(self, widget, event): self.dtmf_window.hide() def on_mic_hscale_value_changed(self, widget, value): self._get_audio_content().set_mic_volume(value / 100) # Save volume to config gajim.config.set('audio_input_volume', value) def on_sound_hscale_value_changed(self, widget, value): self._get_audio_content().set_out_volume(value / 100) # Save volume to config gajim.config.set('audio_output_volume', value) def on_avatar_eventbox_enter_notify_event(self, widget, event): """ Enter the eventbox area so we under conditions add a timeout to show a bigger avatar after 0.5 sec """ jid = self.contact.jid avatar_pixbuf = gtkgui_helpers.get_avatar_pixbuf_from_cache(jid) if avatar_pixbuf in ('ask', None): return avatar_w = avatar_pixbuf.get_width() avatar_h = avatar_pixbuf.get_height() scaled_buf = self.xml.get_object('avatar_image').get_pixbuf() scaled_buf_w = scaled_buf.get_width() scaled_buf_h = scaled_buf.get_height() # do we have something bigger to show? if avatar_w > scaled_buf_w or avatar_h > scaled_buf_h: # wait for 0.5 sec in case we leave earlier if self.show_bigger_avatar_timeout_id is not None: GLib.source_remove(self.show_bigger_avatar_timeout_id) self.show_bigger_avatar_timeout_id = GLib.timeout_add(500, self.show_bigger_avatar, widget) def on_avatar_eventbox_leave_notify_event(self, widget, event): """ Left the eventbox area that holds the avatar img """ # did we add a timeout? if yes remove it if self.show_bigger_avatar_timeout_id is not None: GLib.source_remove(self.show_bigger_avatar_timeout_id) self.show_bigger_avatar_timeout_id = None def on_avatar_eventbox_button_press_event(self, widget, event): """ If right-clicked, show popup """ if event.button == 3: # right click menu = Gtk.Menu() menuitem = Gtk.MenuItem.new_with_mnemonic(_('Save _As')) id_ = menuitem.connect('activate', gtkgui_helpers.on_avatar_save_as_menuitem_activate, self.contact.jid, self.contact.get_shown_name()) self.handlers[id_] = menuitem menu.append(menuitem) menu.show_all() menu.connect('selection-done', lambda w: w.destroy()) # show the menu menu.show_all() menu.attach_to_widget(widget, None) menu.popup(None, None, None, None, event.button, event.time) return True def on_location_eventbox_button_release_event(self, widget, event): if 'location' in self.contact.pep: location = self.contact.pep['location']._pep_specific_data if ('lat' in location) and ('lon' in location): uri = 'http://www.openstreetmap.org/?' + \ 'mlat=%(lat)s&mlon=%(lon)s&zoom=16' % {'lat': location['lat'], 'lon': location['lon']} helpers.launch_browser_mailer('url', uri) def on_location_eventbox_leave_notify_event(self, widget, event): """ Just moved the mouse so show the cursor """ cursor = Gdk.Cursor.new(Gdk.CursorType.LEFT_PTR) self.parent_win.window.get_window().set_cursor(cursor) def on_location_eventbox_enter_notify_event(self, widget, event): cursor = Gdk.Cursor.new(Gdk.CursorType.HAND2) self.parent_win.window.get_window().set_cursor(cursor) def _on_window_motion_notify(self, widget, event): """ It gets called no matter if it is the active window or not """ if self.parent_win.get_active_jid() == self.contact.jid: # if window is the active one, change vars assisting chatstate self.mouse_over_in_last_5_secs = True self.mouse_over_in_last_30_secs = True def _schedule_activity_timers(self): self.possible_paused_timeout_id = GLib.timeout_add_seconds(5, self.check_for_possible_paused_chatstate, None) self.possible_inactive_timeout_id = GLib.timeout_add_seconds(30, self.check_for_possible_inactive_chatstate, None) def update_ui(self): # The name banner is drawn here ChatControlBase.update_ui(self) self.update_toolbar() def _update_banner_state_image(self): contact = gajim.contacts.get_contact_with_highest_priority(self.account, self.contact.jid) if not contact or self.resource: # For transient contacts contact = self.contact show = contact.show jid = contact.jid # Set banner image img_32 = gajim.interface.roster.get_appropriate_state_images(jid, size='32', icon_name=show) img_16 = gajim.interface.roster.get_appropriate_state_images(jid, icon_name=show) if show in img_32 and img_32[show].get_pixbuf(): # we have 32x32! use it! banner_image = img_32[show] use_size_32 = True else: banner_image = img_16[show] use_size_32 = False banner_status_img = self.xml.get_object('banner_status_image') if banner_image.get_storage_type() == Gtk.ImageType.ANIMATION: banner_status_img.set_from_animation(banner_image.get_animation()) else: pix = banner_image.get_pixbuf() if pix is not None: if use_size_32: banner_status_img.set_from_pixbuf(pix) else: # we need to scale 16x16 to 32x32 scaled_pix = pix.scale_simple(32, 32, GdkPixbuf.InterpType.BILINEAR) banner_status_img.set_from_pixbuf(scaled_pix) def draw_banner_text(self): """ Draw the text in the fat line at the top of the window that houses the name, jid """ contact = self.contact jid = contact.jid banner_name_label = self.xml.get_object('banner_name_label') name = contact.get_shown_name() if self.resource: name += '/' + self.resource if self.TYPE_ID == message_control.TYPE_PM: name = i18n.direction_mark + _( '%(nickname)s from group chat %(room_name)s') % \ {'nickname': name, 'room_name': self.room_name} name = i18n.direction_mark + GLib.markup_escape_text(name) # We know our contacts nick, but if another contact has the same nick # in another account we need to also display the account. # except if we are talking to two different resources of the same contact acct_info = '' for account in gajim.contacts.get_accounts(): if account == self.account: continue if acct_info: # We already found a contact with same nick break for jid in gajim.contacts.get_jid_list(account): other_contact_ = \ gajim.contacts.get_first_contact_from_jid(account, jid) if other_contact_.get_shown_name() == \ self.contact.get_shown_name(): acct_info = i18n.direction_mark + ' (%s)' % \ GLib.markup_escape_text(self.account) break status = contact.status if status is not None: banner_name_label.set_ellipsize(Pango.EllipsizeMode.END) self.banner_status_label.set_ellipsize(Pango.EllipsizeMode.END) status_reduced = helpers.reduce_chars_newlines(status, max_lines=1) else: status_reduced = '' status_escaped = GLib.markup_escape_text(status_reduced) font_attrs, font_attrs_small = self.get_font_attrs() st = gajim.config.get('displayed_chat_state_notifications') cs = contact.chatstate if cs and st in ('composing_only', 'all'): if contact.show == 'offline': chatstate = '' elif st == 'all' or cs == 'composing': chatstate = helpers.get_uf_chatstate(cs) else: chatstate = '' label_text = '<span %s>%s</span><span %s>%s %s</span>' \ % (font_attrs, name, font_attrs_small, acct_info, chatstate) if acct_info: acct_info = i18n.direction_mark + ' ' + acct_info label_tooltip = '%s%s %s' % (name, acct_info, chatstate) else: # weight="heavy" size="x-large" label_text = '<span %s>%s</span><span %s>%s</span>' % \ (font_attrs, name, font_attrs_small, acct_info) if acct_info: acct_info = i18n.direction_mark + ' ' + acct_info label_tooltip = '%s%s' % (name, acct_info) if status_escaped: status_text = self.urlfinder.sub(self.make_href, status_escaped) status_text = '<span %s>%s</span>' % (font_attrs_small, status_text) self.banner_status_label.set_tooltip_text(status) self.banner_status_label.set_no_show_all(False) self.banner_status_label.show() else: status_text = '' self.banner_status_label.hide() self.banner_status_label.set_no_show_all(True) self.banner_status_label.set_markup(status_text) # setup the label that holds name and jid banner_name_label.set_markup(label_text) banner_name_label.set_tooltip_text(label_tooltip) def close_jingle_content(self, jingle_type): sid = getattr(self, jingle_type + '_sid') if not sid: return setattr(self, jingle_type + '_sid', None) setattr(self, jingle_type + '_state', self.JINGLE_STATE_NULL) session = gajim.connections[self.account].get_jingle_session( self.contact.get_full_jid(), sid) if session: content = session.get_content(jingle_type) if content: session.remove_content(content.creator, content.name) getattr(self, '_' + jingle_type + '_button').set_active(False) getattr(self, 'update_' + jingle_type)() def on_jingle_button_toggled(self, widget, jingle_type): img_name = 'gajim-%s_%s' % ({'audio': 'mic', 'video': 'cam'}[jingle_type], {True: 'active', False: 'inactive'}[widget.get_active()]) path_to_img = gtkgui_helpers.get_icon_path(img_name) if widget.get_active(): if getattr(self, jingle_type + '_state') == \ self.JINGLE_STATE_NULL: if jingle_type == 'video': video_hbox = self.xml.get_object('video_hbox') video_hbox.set_no_show_all(False) if gajim.config.get('video_see_self'): fixed = self.xml.get_object('outgoing_fixed') fixed.set_no_show_all(False) video_hbox.show_all() out_da = self.xml.get_object('outgoing_drawingarea') out_da.realize() if os.name == 'nt': out_xid = out_da.get_window().handle else: out_xid = out_da.get_window().get_xid() else: out_xid = None video_hbox.show_all() in_da = self.xml.get_object('incoming_drawingarea') in_da.realize() in_xid = in_da.get_window().get_xid() sid = gajim.connections[self.account].start_video( self.contact.get_full_jid(), in_xid, out_xid) else: sid = getattr(gajim.connections[self.account], 'start_' + jingle_type)(self.contact.get_full_jid()) getattr(self, 'set_' + jingle_type + '_state')('connecting', sid) else: video_hbox = self.xml.get_object('video_hbox') video_hbox.set_no_show_all(True) video_hbox.hide() fixed = self.xml.get_object('outgoing_fixed') fixed.set_no_show_all(True) self.close_jingle_content(jingle_type) img = getattr(self, '_' + jingle_type + '_button').get_property('image') img.set_from_file(path_to_img) def on_audio_button_toggled(self, widget): self.on_jingle_button_toggled(widget, 'audio') def on_video_button_toggled(self, widget): self.on_jingle_button_toggled(widget, 'video') def _toggle_gpg(self): if not self.gpg_is_active and not self.contact.keyID: dialogs.ErrorDialog(_('No OpenPGP key assigned'), _('No OpenPGP key is assigned to this contact. So you cannot ' 'encrypt messages with OpenPGP.')) return ec = gajim.encrypted_chats[self.account] if self.gpg_is_active: # Disable encryption ec.remove(self.contact.jid) self.gpg_is_active = False loggable = False msg = _('OpenPGP encryption disabled') ChatControlBase.print_conversation_line(self, msg, 'status', '', None) if self.session: self.session.loggable = True else: # Enable encryption ec.append(self.contact.jid) self.gpg_is_active = True msg = _('OpenPGP encryption enabled') ChatControlBase.print_conversation_line(self, msg, 'status', '', None) loggable = gajim.config.get_per('accounts', self.account, 'log_encrypted_sessions') if self.session: self.session.loggable = loggable loggable = self.session.is_loggable() else: loggable = loggable and gajim.config.should_log(self.account, self.contact.jid) if loggable: msg = _('Session WILL be logged') else: msg = _('Session WILL NOT be logged') ChatControlBase.print_conversation_line(self, msg, 'status', '', None) gajim.config.set_per('contacts', self.contact.jid, 'gpg_enabled', self.gpg_is_active) self._show_lock_image(self.gpg_is_active, 'OpenPGP', self.gpg_is_active, loggable, True) def _show_lock_image(self, visible, enc_type='', enc_enabled=False, chat_logged=False, authenticated=False): """ Set lock icon visibility and create tooltip """ #encryption %s active status_string = enc_enabled and _('is') or _('is NOT') #chat session %s be logged logged_string = chat_logged and _('will') or _('will NOT') if authenticated: #About encrypted chat session authenticated_string = _('and authenticated') img_path = gtkgui_helpers.get_icon_path('security-high') else: #About encrypted chat session authenticated_string = _('and NOT authenticated') img_path = gtkgui_helpers.get_icon_path('security-low') self.lock_image.set_from_file(img_path) #status will become 'is' or 'is not', authentificaed will become #'and authentificated' or 'and not authentificated', logged will become #'will' or 'will not' tooltip = _('%(type)s encryption %(status)s active %(authenticated)s.\n' 'Your chat session %(logged)s be logged.') % {'type': enc_type, 'status': status_string, 'authenticated': authenticated_string, 'logged': logged_string} self.authentication_button.set_tooltip_text(tooltip) self.widget_set_visible(self.authentication_button, not visible) self.lock_image.set_sensitive(enc_enabled) def _on_authentication_button_clicked(self, widget): if self.gpg_is_active: dialogs.GPGInfoWindow(self, self.parent_win.window) elif self.session and self.session.enable_encryption: dialogs.ESessionInfoWindow(self.session, self.parent_win.window) def send_message(self, message, keyID='', chatstate=None, xhtml=None, process_commands=True, attention=False): """ Send a message to contact """ message = helpers.remove_invalid_xml_chars(message) if message in ('', None, '\n'): return None # refresh timers self.reset_kbd_mouse_timeout_vars() contact = self.contact encrypted = bool(self.session) and self.session.enable_encryption keyID = '' if self.gpg_is_active: keyID = contact.keyID encrypted = True if not keyID: keyID = 'UNKNOWN' chatstates_on = gajim.config.get('outgoing_chat_state_notifications') != \ 'disabled' chatstate_to_send = None if chatstates_on and contact is not None: if contact.supports(NS_CHATSTATES): # send active chatstate on every message (as XEP says) chatstate_to_send = 'active' contact.our_chatstate = 'active' GLib.source_remove(self.possible_paused_timeout_id) GLib.source_remove(self.possible_inactive_timeout_id) self._schedule_activity_timers() def _on_sent(obj, msg_stanza, message, encrypted, xhtml, label): id_ = msg_stanza.getID() if self.contact.supports(NS_RECEIPTS) and gajim.config.get_per( 'accounts', self.account, 'request_receipt'): xep0184_id = id_ else: xep0184_id = None if label: displaymarking = label.getTag('displaymarking') else: displaymarking = None if self.correcting: self.correcting = False self.msg_textview.override_background_color( Gtk.StateType.NORMAL, self.old_message_tv_color) self.print_conversation(message, self.contact.jid, encrypted=encrypted, xep0184_id=xep0184_id, xhtml=xhtml, displaymarking=displaymarking, msg_stanza_id=id_, correct_id=msg_stanza.getTagAttr('replace', 'id'), additional_data=obj.additional_data) ChatControlBase.send_message(self, message, keyID, type_='chat', chatstate=chatstate_to_send, xhtml=xhtml, callback=_on_sent, callback_args=[message, encrypted, xhtml, self.get_seclabel()], process_commands=process_commands, attention=attention) def check_for_possible_paused_chatstate(self, arg): """ Did we move mouse of that window or write something in message textview in the last 5 seconds? If yes - we go active for mouse, composing for kbd. If not - we go paused if we were previously composing """ contact = self.contact jid = contact.jid current_state = contact.our_chatstate if current_state is False: # jid doesn't support chatstates return False # stop looping message_buffer = self.msg_textview.get_buffer() if (self.kbd_activity_in_last_5_secs and message_buffer.get_char_count()): # Only composing if the keyboard activity was in text entry self.send_chatstate('composing', self.contact) elif (self.mouse_over_in_last_5_secs and current_state == 'inactive' and jid == self.parent_win.get_active_jid()): self.send_chatstate('active', self.contact) else: if current_state == 'composing': self.send_chatstate('paused', self.contact) # pause composing # assume no activity and let the motion-notify or 'insert-text' make them # True refresh 30 seconds vars too or else it's 30 - 5 = 25 seconds! self.reset_kbd_mouse_timeout_vars() return True # loop forever def check_for_possible_inactive_chatstate(self, arg): """ Did we move mouse over that window or wrote something in message textview in the last 30 seconds? if yes - we go active. If no - we go inactive """ contact = self.contact current_state = contact.our_chatstate if current_state is False: # jid doesn't support chatstates return False # stop looping if self.mouse_over_in_last_5_secs or self.kbd_activity_in_last_5_secs: return True # loop forever if not self.mouse_over_in_last_30_secs or \ self.kbd_activity_in_last_30_secs: self.send_chatstate('inactive', contact) # assume no activity and let the motion-notify or 'insert-text' make them # True refresh 30 seconds too or else it's 30 - 5 = 25 seconds! self.reset_kbd_mouse_timeout_vars() return True # loop forever def reset_kbd_mouse_timeout_vars(self): self.kbd_activity_in_last_5_secs = False self.mouse_over_in_last_5_secs = False self.mouse_over_in_last_30_secs = False self.kbd_activity_in_last_30_secs = False def on_cancel_session_negotiation(self): msg = _('Session negotiation cancelled') ChatControlBase.print_conversation_line(self, msg, 'status', '', None) def print_archiving_session_details(self): """ Print esession settings to textview """ archiving = bool(self.session) and isinstance(self.session, ArchivingStanzaSession) and self.session.archiving if archiving: msg = _('This session WILL be archived on server') else: msg = _('This session WILL NOT be archived on server') ChatControlBase.print_conversation_line(self, msg, 'status', '', None) def print_esession_details(self): """ Print esession settings to textview """ e2e_is_active = bool(self.session) and self.session.enable_encryption if e2e_is_active: msg = _('This session is encrypted') if self.session.is_loggable(): msg += _(' and WILL be logged') else: msg += _(' and WILL NOT be logged') ChatControlBase.print_conversation_line(self, msg, 'status', '', None) if not self.session.verified_identity: ChatControlBase.print_conversation_line(self, _("Remote contact's identity not verified. Click the shield button for more details."), 'status', '', None) else: msg = _('E2E encryption disabled') ChatControlBase.print_conversation_line(self, msg, 'status', '', None) self._show_lock_image(e2e_is_active, 'E2E', e2e_is_active, self.session and \ self.session.is_loggable(), self.session and self.session.verified_identity) def print_session_details(self, old_session=None): if isinstance(self.session, EncryptedStanzaSession) or \ (old_session and isinstance(old_session, EncryptedStanzaSession)): self.print_esession_details() elif isinstance(self.session, ArchivingStanzaSession): self.print_archiving_session_details() def get_our_nick(self): return gajim.nicks[self.account] def print_conversation(self, text, frm='', tim=None, encrypted=False, subject=None, xhtml=None, simple=False, xep0184_id=None, displaymarking=None, msg_log_id=None, correct_id=None, msg_stanza_id=None, additional_data={}): """ Print a line in the conversation If frm is set to status: it's a status message. if frm is set to error: it's an error message. The difference between status and error is mainly that with error, msg count as a new message (in systray and in control). If frm is set to info: it's a information message. If frm is set to print_queue: it is incomming from queue. If frm is set to another value: it's an outgoing message. If frm is not set: it's an incomming message. """ contact = self.contact if frm == 'status': if not gajim.config.get('print_status_in_chats'): return kind = 'status' name = '' elif frm == 'error': kind = 'error' name = '' elif frm == 'info': kind = 'info' name = '' else: if self.session and self.session.enable_encryption: # ESessions if not encrypted: msg = _('The following message was NOT encrypted') ChatControlBase.print_conversation_line(self, msg, 'status', '', tim) else: # GPG encryption if encrypted and not self.gpg_is_active: msg = _('The following message was encrypted') ChatControlBase.print_conversation_line(self, msg, 'status', '', tim) # turn on OpenPGP if this was in fact a XEP-0027 encrypted # message if encrypted == 'xep27': self._toggle_gpg() elif not encrypted and self.gpg_is_active: msg = _('The following message was NOT encrypted') ChatControlBase.print_conversation_line(self, msg, 'status', '', tim) if not frm: kind = 'incoming' name = contact.get_shown_name() elif frm == 'print_queue': # incoming message, but do not update time kind = 'incoming_queue' name = contact.get_shown_name() else: kind = 'outgoing' name = self.get_our_nick() if not xhtml and not (encrypted and self.gpg_is_active) and \ gajim.config.get('rst_formatting_outgoing_messages'): from common.rst_xhtml_generator import create_xhtml xhtml = create_xhtml(text) if xhtml: xhtml = '<body xmlns="%s">%s</body>' % (NS_XHTML, xhtml) ChatControlBase.print_conversation_line(self, text, kind, name, tim, subject=subject, old_kind=self.old_msg_kind, xhtml=xhtml, simple=simple, xep0184_id=xep0184_id, displaymarking=displaymarking, msg_log_id=msg_log_id, msg_stanza_id=msg_stanza_id, correct_id=correct_id, additional_data=additional_data) if text.startswith('/me ') or text.startswith('/me\n'): self.old_msg_kind = None else: self.old_msg_kind = kind def get_tab_label(self, chatstate): unread = '' if self.resource: jid = self.contact.get_full_jid() else: jid = self.contact.jid num_unread = len(gajim.events.get_events(self.account, jid, ['printed_' + self.type_id, self.type_id])) if num_unread == 1 and not gajim.config.get('show_unread_tab_icon'): unread = '*' elif num_unread > 1: unread = '[' + str(num_unread) + ']' # Draw tab label using chatstate theme = gajim.config.get('roster_theme') color_s = None if not chatstate: chatstate = self.contact.chatstate if chatstate is not None: if chatstate == 'composing': color_s = gajim.config.get_per('themes', theme, 'state_composing_color') elif chatstate == 'inactive': color_s = gajim.config.get_per('themes', theme, 'state_inactive_color') elif chatstate == 'gone': color_s = gajim.config.get_per('themes', theme, 'state_gone_color') elif chatstate == 'paused': color_s = gajim.config.get_per('themes', theme, 'state_paused_color') context = self.parent_win.notebook.get_style_context() if color_s: # We set the color for when it's the current tab or not color = Gdk.RGBA() ok = Gdk.RGBA.parse(color, color_s) if not ok: del color color = context.get_color(Gtk.StateFlags.ACTIVE) # In inactive tab color to be lighter against the darker inactive # background if chatstate in ('inactive', 'gone') and\ self.parent_win.get_active_control() != self: color = self.lighten_color(color) else: # active or not chatstate, get color from gtk color = context.get_color(Gtk.StateFlags.ACTIVE) name = self.contact.get_shown_name() if self.resource: name += '/' + self.resource label_str = GLib.markup_escape_text(name) if num_unread: # if unread, text in the label becomes bold label_str = '<b>' + unread + label_str + '</b>' return (label_str, color) def get_tab_image(self, count_unread=True): if self.resource: jid = self.contact.get_full_jid() else: jid = self.contact.jid if gajim.config.get('show_avatar_in_tabs'): avatar_pixbuf = gtkgui_helpers.get_avatar_pixbuf_from_cache(jid) if avatar_pixbuf not in ('ask', None): avatar_pixbuf = gtkgui_helpers.get_scaled_pixbuf_by_size( avatar_pixbuf, 16, 16) return avatar_pixbuf if count_unread: num_unread = len(gajim.events.get_events(self.account, jid, ['printed_' + self.type_id, self.type_id])) else: num_unread = 0 # Set tab image (always 16x16); unread messages show the 'event' image tab_img = None if num_unread and gajim.config.get('show_unread_tab_icon'): img_16 = gajim.interface.roster.get_appropriate_state_images( self.contact.jid, icon_name='event') tab_img = img_16['event'] else: contact = gajim.contacts.get_contact_with_highest_priority( self.account, self.contact.jid) if not contact or self.resource: # For transient contacts contact = self.contact img_16 = gajim.interface.roster.get_appropriate_state_images( self.contact.jid, icon_name=contact.show) tab_img = img_16[contact.show] return tab_img def prepare_context_menu(self, hide_buttonbar_items=False): """ Set compact view menuitem active state sets active and sensitivity state for toggle_gpg_menuitem sets sensitivity for history_menuitem (False for tranasports) and file_transfer_menuitem and hide()/show() for add_to_roster_menuitem """ if gajim.jid_is_transport(self.contact.jid): menu = gui_menu_builder.get_transport_menu(self.contact, self.account) else: menu = gui_menu_builder.get_contact_menu(self.contact, self.account, use_multiple_contacts=False, show_start_chat=False, show_encryption=True, control=self, show_buttonbar_items=not hide_buttonbar_items) return menu def send_chatstate(self, state, contact=None): """ Send OUR chatstate as STANDLONE chat state message (eg. no body) to contact only if new chatstate is different from the previous one if jid is not specified, send to active tab """ # JEP 85 does not allow resending the same chatstate # this function checks for that and just returns so it's safe to call it # with same state. # This functions also checks for violation in state transitions # and raises RuntimeException with appropriate message # more on that http://xmpp.org/extensions/xep-0085.html#statechart # do not send if we have chat state notifications disabled # that means we won't reply to the <active/> from other peer # so we do not broadcast jep85 capabalities chatstate_setting = gajim.config.get('outgoing_chat_state_notifications') if chatstate_setting == 'disabled': return # Dont leak presence to contacts # which are not allowed to see our status if contact and contact.sub in ('to', 'none'): return elif chatstate_setting == 'composing_only' and state != 'active' and\ state != 'composing': return if contact is None: contact = self.parent_win.get_active_contact() if contact is None: # contact was from pm in MUC, and left the room so contact is None # so we cannot send chatstate anymore return # Don't send chatstates to offline contacts if contact.show == 'offline': return if not contact.supports(NS_CHATSTATES): return if contact.our_chatstate == False: return # if the new state we wanna send (state) equals # the current state (contact.our_chatstate) then return if contact.our_chatstate == state: return # if wel're inactive prevent composing (XEP violation) if contact.our_chatstate == 'inactive' and state == 'composing': # go active before gajim.nec.push_outgoing_event(MessageOutgoingEvent(None, account=self.account, jid=self.contact.jid, chatstate='active', control=self)) contact.our_chatstate = 'active' self.reset_kbd_mouse_timeout_vars() gajim.nec.push_outgoing_event(MessageOutgoingEvent(None, account=self.account, jid=self.contact.jid, chatstate=state, msg_id=contact.msg_log_id, control=self)) contact.our_chatstate = state if state == 'active': self.reset_kbd_mouse_timeout_vars() def shutdown(self): # PluginSystem: removing GUI extension points connected with ChatControl # instance object gajim.plugin_manager.remove_gui_extension_point('chat_control', self) gajim.ged.remove_event_handler('pep-received', ged.GUI1, self._nec_pep_received) gajim.ged.remove_event_handler('vcard-received', ged.GUI1, self._nec_vcard_received) gajim.ged.remove_event_handler('failed-decrypt', ged.GUI1, self._nec_failed_decrypt) gajim.ged.remove_event_handler('chatstate-received', ged.GUI1, self._nec_chatstate_received) gajim.ged.remove_event_handler('caps-received', ged.GUI1, self._nec_caps_received) self.unsubscribe_events() # Send 'gone' chatstate self.send_chatstate('gone', self.contact) self.contact.chatstate = None self.contact.our_chatstate = None for jingle_type in ('audio', 'video'): self.close_jingle_content(jingle_type) # disconnect self from session if self.session: self.session.control = None # Disconnect timer callbacks GLib.source_remove(self.possible_paused_timeout_id) GLib.source_remove(self.possible_inactive_timeout_id) # Remove bigger avatar window if self.bigger_avatar_window: self.bigger_avatar_window.destroy() # Clean events gajim.events.remove_events(self.account, self.get_full_jid(), types=['printed_' + self.type_id, self.type_id]) # Remove contact instance if contact has been removed key = (self.contact.jid, self.account) roster = gajim.interface.roster if key in roster.contacts_to_be_removed.keys() and \ not roster.contact_has_pending_roster_events(self.contact, self.account): backend = roster.contacts_to_be_removed[key]['backend'] del roster.contacts_to_be_removed[key] roster.remove_contact(self.contact.jid, self.account, force=True, backend=backend) # remove all register handlers on widgets, created by self.xml # to prevent circular references among objects for i in list(self.handlers.keys()): if self.handlers[i].handler_is_connected(i): self.handlers[i].disconnect(i) del self.handlers[i] self.conv_textview.del_handlers() if gajim.config.get('use_speller') and HAS_GTK_SPELL: spell_obj = gtkspell.get_from_text_view(self.msg_textview) if spell_obj: spell_obj.detach() self.msg_textview.destroy() # PluginSystem: calling shutdown of super class (ChatControlBase) to let # it remove it's GUI extension points super(ChatControl, self).shutdown() def minimizable(self): return False def safe_shutdown(self): return False def allow_shutdown(self, method, on_yes, on_no, on_minimize): if time.time() - gajim.last_message_time[self.account]\ [self.get_full_jid()] < 2: # 2 seconds def on_ok(): on_yes(self) def on_cancel(): on_no(self) dialogs.ConfirmationDialog( #%s is being replaced in the code with JID _('You just received a new message from "%s"') % \ self.contact.jid, _('If you close this tab and you have history disabled, '\ 'this message will be lost.'), on_response_ok=on_ok, on_response_cancel=on_cancel, transient_for=self.parent_win.window) return on_yes(self) def _nec_chatstate_received(self, obj): """ Handle incoming chatstate that jid SENT TO us """ self.draw_banner_text() # update chatstate in tab for this chat self.parent_win.redraw_tab(self, self.contact.chatstate) def _nec_caps_received(self, obj): if obj.conn.name != self.account: return if self.TYPE_ID == 'chat' and obj.jid != self.contact.jid: return if self.TYPE_ID == 'pm' and obj.fjid != self.contact.jid: return self.update_ui() def _nec_ping_reply(self, obj): if obj.control: if obj.control != self: return else: if self.contact != obj.contact: return self.print_conversation(_('Pong! (%s s.)') % obj.seconds, 'status') def set_control_active(self, state): ChatControlBase.set_control_active(self, state) # send chatstate inactive to the one we're leaving # and active to the one we visit if state: message_buffer = self.msg_textview.get_buffer() if message_buffer.get_char_count(): self.send_chatstate('paused', self.contact) else: self.send_chatstate('active', self.contact) self.reset_kbd_mouse_timeout_vars() GLib.source_remove(self.possible_paused_timeout_id) GLib.source_remove(self.possible_inactive_timeout_id) self._schedule_activity_timers() else: self.send_chatstate('inactive', self.contact) # Hide bigger avatar window if self.bigger_avatar_window: self.bigger_avatar_window.destroy() self.bigger_avatar_window = None # Re-show the small avatar self.show_avatar() def show_avatar(self): if not gajim.config.get('show_avatar_in_chat'): return jid_with_resource = self.contact.get_full_jid() pixbuf = gtkgui_helpers.get_avatar_pixbuf_from_cache(jid_with_resource) if pixbuf == 'ask': # we don't have the vcard if self.TYPE_ID == message_control.TYPE_PM: if self.gc_contact.jid: # We know the real jid of this contact real_jid = self.gc_contact.jid if self.gc_contact.resource: real_jid += '/' + self.gc_contact.resource else: real_jid = jid_with_resource gajim.connections[self.account].request_vcard(real_jid, jid_with_resource) else: gajim.connections[self.account].request_vcard(jid_with_resource) return elif pixbuf: scaled_pixbuf = gtkgui_helpers.get_scaled_pixbuf(pixbuf, 'chat') else: scaled_pixbuf = None image = self.xml.get_object('avatar_image') image.set_from_pixbuf(scaled_pixbuf) image.show_all() def _nec_vcard_received(self, obj): if obj.conn.name != self.account: return j = gajim.get_jid_without_resource(self.contact.jid) if obj.jid != j: return self.show_avatar() def _on_drag_data_received(self, widget, context, x, y, selection, target_type, timestamp): if not selection.get_data(): return if self.TYPE_ID == message_control.TYPE_PM: c = self.gc_contact else: c = self.contact if target_type == self.TARGET_TYPE_URI_LIST: if not c.resource: # If no resource is known, we can't send a file return uri = selection.get_data().strip() uri_splitted = uri.split() # we may have more than one file dropped for uri in uri_splitted: path = helpers.get_file_path_from_dnd_dropped_uri(uri) if os.path.isfile(path): # is it file? ft = gajim.interface.instances['file_transfers'] ft.send_file(self.account, c, path) return # chat2muc treeview = gajim.interface.roster.tree model = treeview.get_model() data = selection.get_data() path = treeview.get_selection().get_selected_rows()[1][0] iter_ = model.get_iter(path) type_ = model[iter_][2] if type_ != 'contact': # source is not a contact return dropped_jid = data dropped_transport = gajim.get_transport_name_from_jid(dropped_jid) c_transport = gajim.get_transport_name_from_jid(c.jid) if dropped_transport or c_transport: return # transport contacts cannot be invited dialogs.TransformChatToMUC(self.account, [c.jid], [dropped_jid]) def _on_message_tv_buffer_changed(self, textbuffer): self.kbd_activity_in_last_5_secs = True self.kbd_activity_in_last_30_secs = True if textbuffer.get_char_count(): self.send_chatstate('composing', self.contact) e2e_is_active = self.session and \ self.session.enable_encryption e2e_pref = gajim.config.get_per('accounts', self.account, 'enable_esessions') and gajim.config.get_per('accounts', self.account, 'autonegotiate_esessions') and gajim.config.get_per( 'contacts', self.contact.jid, 'autonegotiate_esessions') want_e2e = not e2e_is_active and not self.gpg_is_active \ and e2e_pref if want_e2e and not self.no_autonegotiation \ and gajim.HAVE_PYCRYPTO and self.contact.supports(NS_ESESSION): self.begin_e2e_negotiation() elif (not self.session or not self.session.status) and \ gajim.connections[self.account].archiving_136_supported: self.begin_archiving_negotiation() else: self.send_chatstate('active', self.contact) def restore_conversation(self): jid = self.contact.jid # don't restore lines if it's a transport if gajim.jid_is_transport(jid): return # How many lines to restore and when to time them out restore_how_many = gajim.config.get('restore_lines') if restore_how_many <= 0: return timeout = gajim.config.get('restore_timeout') # in minutes # number of messages that are in queue and are already logged, we want # to avoid duplication pending_how_many = len(gajim.events.get_events(self.account, jid, ['chat', 'pm'])) if self.resource: pending_how_many += len(gajim.events.get_events(self.account, self.contact.get_full_jid(), ['chat', 'pm'])) try: rows = gajim.logger.get_last_conversation_lines(jid, restore_how_many, pending_how_many, timeout, self.account) except exceptions.DatabaseMalformed: import common.logger dialogs.ErrorDialog(_('Database Error'), _('The database file (%s) cannot be read. Try to repair it or ' 'remove it (all history will be lost).') % common.logger.LOG_DB_PATH) rows = [] local_old_kind = None self.conv_textview.just_cleared = True for row in rows: # row[0] time, row[1] has kind, row[2] the message, row[3] subject, row[4] additional_data msg = row[2] additional_data = row[4] if not msg: # message is empty, we don't print it continue if row[1] in (constants.KIND_CHAT_MSG_SENT, constants.KIND_SINGLE_MSG_SENT): kind = 'outgoing' name = self.get_our_nick() elif row[1] in (constants.KIND_SINGLE_MSG_RECV, constants.KIND_CHAT_MSG_RECV): kind = 'incoming' name = self.contact.get_shown_name() elif row[1] == constants.KIND_ERROR: kind = 'status' name = self.contact.get_shown_name() tim = float(row[0]) if gajim.config.get('restored_messages_small'): small_attr = ['small'] else: small_attr = [] xhtml = None if msg.startswith('<body '): xhtml = msg if row[3]: msg = _('Subject: %(subject)s\n%(message)s') % \ {'subject': row[3], 'message': msg} ChatControlBase.print_conversation_line(self, msg, kind, name, tim, small_attr, small_attr + ['restored_message'], small_attr + ['restored_message'], False, old_kind=local_old_kind, xhtml=xhtml, additional_data=additional_data) if row[2].startswith('/me ') or row[2].startswith('/me\n'): local_old_kind = None else: local_old_kind = kind if len(rows): self.conv_textview.print_empty_line() def read_queue(self): """ Read queue and print messages containted in it """ jid = self.contact.jid jid_with_resource = jid if self.resource: jid_with_resource += '/' + self.resource events = gajim.events.get_events(self.account, jid_with_resource) # list of message ids which should be marked as read message_ids = [] for event in events: if event.type_ != self.type_id: continue if event.kind == 'error': kind = 'info' else: kind = 'print_queue' if event.sent_forwarded: kind = 'out' self.print_conversation(event.message, kind, tim=event.time, encrypted=event.encrypted, subject=event.subject, xhtml=event.xhtml, displaymarking=event.displaymarking, correct_id=event.correct_id) if isinstance(event.msg_log_id, int): message_ids.append(event.msg_log_id) if event.session and not self.session: self.set_session(event.session) if message_ids: gajim.logger.set_read_messages(message_ids) gajim.events.remove_events(self.account, jid_with_resource, types=[self.type_id]) typ = 'chat' # Is it a normal chat or a pm ? # reset to status image in gc if it is a pm # Is it a pm ? room_jid, nick = gajim.get_room_and_nick_from_fjid(jid) control = gajim.interface.msg_win_mgr.get_gc_control(room_jid, self.account) if control and control.type_id == message_control.TYPE_GC: control.update_ui() control.parent_win.show_title() typ = 'pm' self.redraw_after_event_removed(jid) if (self.contact.show in ('offline', 'error')): show_offline = gajim.config.get('showoffline') show_transports = gajim.config.get('show_transports_group') if (not show_transports and gajim.jid_is_transport(jid)) or \ (not show_offline and typ == 'chat' and \ len(gajim.contacts.get_contacts(self.account, jid)) < 2): gajim.interface.roster.remove_to_be_removed(self.contact.jid, self.account) elif typ == 'pm': control.remove_contact(nick) def show_bigger_avatar(self, small_avatar): """ Resize the avatar, if needed, so it has at max half the screen size and shows it """ #if not small_avatar.window: ### Tab has been closed since we hovered the avatar #return avatar_pixbuf = gtkgui_helpers.get_avatar_pixbuf_from_cache( self.contact.jid) if avatar_pixbuf in ('ask', None): return # Hide the small avatar # this code hides the small avatar when we show a bigger one in case # the avatar has a transparency hole in the middle # so when we show the big one we avoid seeing the small one behind. # It's why I set it transparent. image = self.xml.get_object('avatar_image') pixbuf = image.get_pixbuf() pixbuf.fill(0xffffff00) # RGBA image.set_from_pixbuf(pixbuf) #image.queue_draw() screen_w = Gdk.Screen.width() screen_h = Gdk.Screen.height() avatar_w = avatar_pixbuf.get_width() avatar_h = avatar_pixbuf.get_height() half_scr_w = screen_w / 2 half_scr_h = screen_h / 2 if avatar_w > half_scr_w: avatar_w = half_scr_w if avatar_h > half_scr_h: avatar_h = half_scr_h # we should make the cursor visible # gtk+ doesn't make use of the motion notify on gtkwindow by default # so this line adds that alloc = small_avatar.get_allocation() # make the bigger avatar window show up centered small_avatar_x, small_avatar_y = alloc.x, alloc.y translated_coordinates = small_avatar.translate_coordinates( gajim.interface.roster.window, 0, 0) if translated_coordinates: small_avatar_x, small_avatar_y = translated_coordinates roster_x, roster_y = self.parent_win.window.get_window().get_origin()[1:] center_x = roster_x + small_avatar_x + (alloc.width / 2) center_y = roster_y + small_avatar_y + (alloc.height / 2) pos_x, pos_y = center_x - (avatar_w / 2), center_y - (avatar_h / 2) dialogs.BigAvatarWindow(avatar_pixbuf, pos_x, pos_y, avatar_w, avatar_h, self.show_avatar) def _on_send_file_menuitem_activate(self, widget): self._on_send_file() def _on_add_to_roster_menuitem_activate(self, widget): dialogs.AddNewContactWindow(self.account, self.contact.jid) def _on_contact_information_menuitem_activate(self, widget): gajim.interface.roster.on_info(widget, self.contact, self.account) def _on_toggle_gpg_menuitem_activate(self, widget): self._toggle_gpg() def _on_convert_to_gc_menuitem_activate(self, widget): """ User wants to invite some friends to chat """ dialogs.TransformChatToMUC(self.account, [self.contact.jid]) def _on_toggle_e2e_menuitem_activate(self, widget): if self.session and self.session.enable_encryption: # e2e was enabled, disable it jid = str(self.session.jid) thread_id = self.session.thread_id self.session.terminate_e2e() gajim.connections[self.account].delete_session(jid, thread_id) # presumably the user had a good reason to shut it off, so # disable autonegotiation too self.no_autonegotiation = True else: self.begin_e2e_negotiation() def begin_negotiation(self): self.no_autonegotiation = True if not self.session: fjid = self.contact.get_full_jid() new_sess = gajim.connections[self.account].make_new_session(fjid, type_=self.type_id) self.set_session(new_sess) def begin_e2e_negotiation(self): self.begin_negotiation() self.session.resource = self.contact.resource self.session.negotiate_e2e(False) def begin_archiving_negotiation(self): self.begin_negotiation() self.session.negotiate_archiving() def _nec_failed_decrypt(self, obj): if obj.session != self.session: return details = _('Unable to decrypt message from %s\nIt may have been ' 'tampered with.') % obj.fjid self.print_conversation_line(details, 'status', '', obj.timestamp) # terminate the session thread_id = self.session.thread_id self.session.terminate_e2e() obj.conn.delete_session(obj.fjid, thread_id) # restart the session self.begin_e2e_negotiation() # Stop emission so it doesn't go to gui_interface return True def got_connected(self): ChatControlBase.got_connected(self) # Refreshing contact contact = gajim.contacts.get_contact_with_highest_priority( self.account, self.contact.jid) if isinstance(contact, GC_Contact): contact = contact.as_contact() if contact: self.contact = contact self.draw_banner() emoticons_button = self.xml.get_object('emoticons_button') emoticons_button.set_sensitive(True) send_button = self.xml.get_object('send_button') send_button.set_sensitive(True) def got_disconnected(self): # Emoticons button emoticons_button = self.xml.get_object('emoticons_button') emoticons_button.set_sensitive(False) send_button = self.xml.get_object('send_button') send_button.set_sensitive(False) # Add to roster self._add_to_roster_button.hide() # Audio button self._audio_button.set_sensitive(False) # Video button self._video_button.set_sensitive(False) # Send file button self._send_file_button.set_tooltip_text('') self._send_file_button.set_sensitive(False) # Convert to GC button self._convert_to_gc_button.set_sensitive(False) ChatControlBase.got_disconnected(self) def update_status_display(self, name, uf_show, status): """ Print the contact's status and update the status/GPG image """ self.update_ui() self.parent_win.redraw_tab(self) self.print_conversation(_('%(name)s is now %(status)s') % {'name': name, 'status': uf_show}, 'status') if status: self.print_conversation(' (', 'status', simple=True) self.print_conversation('%s' % (status), 'status', simple=True) self.print_conversation(')', 'status', simple=True) def _info_bar_show_message(self): if self.info_bar.get_visible(): # A message is already shown return if not self.info_bar_queue: return markup, buttons, args, type_ = self.info_bar_queue[0] self.info_bar_label.set_markup(markup) # Remove old buttons area = self.info_bar.get_action_area() for b in area.get_children(): area.remove(b) # Add new buttons for button in buttons: self.info_bar.add_action_widget(button, 0) self.info_bar.set_message_type(type_) self.info_bar.set_no_show_all(False) self.info_bar.show_all() def _add_info_bar_message(self, markup, buttons, args, type_=Gtk.MessageType.INFO): self.info_bar_queue.append((markup, buttons, args, type_)) self._info_bar_show_message() def _get_file_props_event(self, file_props, type_): evs = gajim.events.get_events(self.account, self.contact.jid, [type_]) for ev in evs: if ev.file_props == file_props: return ev return None def _on_accept_file_request(self, widget, file_props): gajim.interface.instances['file_transfers'].on_file_request_accepted( self.account, self.contact, file_props) ev = self._get_file_props_event(file_props, 'file-request') if ev: gajim.events.remove_events(self.account, self.contact.jid, event=ev) def _on_cancel_file_request(self, widget, file_props): gajim.connections[self.account].send_file_rejection(file_props) ev = self._get_file_props_event(file_props, 'file-request') if ev: gajim.events.remove_events(self.account, self.contact.jid, event=ev) def _got_file_request(self, file_props): """ Show an InfoBar on top of control """ markup = '<b>%s:</b> %s' % (_('File transfer'), file_props.name) if file_props.desc: markup += ' (%s)' % file_props.desc markup += '\n%s: %s' % (_('Size'), helpers.convert_bytes( file_props.size)) b1 = Gtk.Button(_('_Accept')) b1.connect('clicked', self._on_accept_file_request, file_props) b2 = Gtk.Button(stock=Gtk.STOCK_CANCEL) b2.connect('clicked', self._on_cancel_file_request, file_props) self._add_info_bar_message(markup, [b1, b2], file_props, Gtk.MessageType.QUESTION) def _on_open_ft_folder(self, widget, file_props): path = os.path.split(file_props.file_name)[0] if os.path.exists(path) and os.path.isdir(path): helpers.launch_file_manager(path) ev = self._get_file_props_event(file_props, 'file-completed') if ev: gajim.events.remove_events(self.account, self.contact.jid, event=ev) def _on_ok(self, widget, file_props, type_): ev = self._get_file_props_event(file_props, type_) if ev: gajim.events.remove_events(self.account, self.contact.jid, event=ev) def _got_file_completed(self, file_props): markup = '<b>%s:</b> %s' % (_('File transfer completed'), file_props.name) if file_props.desc: markup += ' (%s)' % file_props.desc b1 = Gtk.Button(_('_Open Containing Folder')) b1.connect('clicked', self._on_open_ft_folder, file_props) b2 = Gtk.Button(stock=Gtk.STOCK_OK) b2.connect('clicked', self._on_ok, file_props, 'file-completed') self._add_info_bar_message(markup, [b1, b2], file_props) def _got_file_error(self, file_props, type_, pri_txt, sec_txt): markup = '<b>%s:</b> %s' % (pri_txt, sec_txt) b = Gtk.Button(stock=Gtk.STOCK_OK) b.connect('clicked', self._on_ok, file_props, type_) self._add_info_bar_message(markup, [b], file_props, Gtk.MessageType.ERROR) def _on_accept_gc_invitation(self, widget, event): try: if event.is_continued: gajim.interface.join_gc_room(self.account, event.room_jid, gajim.nicks[self.account], event.password, is_continued=True) else: dialogs.JoinGroupchatWindow(self.account, event.room_jid) except GajimGeneralException: pass gajim.events.remove_events(self.account, self.contact.jid, event=event) def _on_cancel_gc_invitation(self, widget, event): gajim.events.remove_events(self.account, self.contact.jid, event=event) def _get_gc_invitation(self, event): markup = '<b>%s:</b> %s' % (_('Groupchat Invitation'), event.room_jid) if event.comment: markup += ' (%s)' % event.comment b1 = Gtk.Button(_('_Join')) b1.connect('clicked', self._on_accept_gc_invitation, event) b2 = Gtk.Button(stock=Gtk.STOCK_CANCEL) b2.connect('clicked', self._on_cancel_gc_invitation, event) self._add_info_bar_message(markup, [b1, b2], (event.room_jid, event.comment), Gtk.MessageType.QUESTION) def on_event_added(self, event): if event.account != self.account: return if event.jid != self.contact.jid: return if event.type_ == 'file-request': self._got_file_request(event.file_props) elif event.type_ == 'file-completed': self._got_file_completed(event.file_props) elif event.type_ in ('file-error', 'file-stopped'): msg_err = '' if event.file_props.error == -1: msg_err = _('Remote contact stopped transfer') elif event.file_props.error == -6: msg_err = _('Error opening file') self._got_file_error(event.file_props, event.type_, _('File transfer stopped'), msg_err) elif event.type_ in ('file-request-error', 'file-send-error'): self._got_file_error(event.file_props, event.type_, _('File transfer cancelled'), _('Connection with peer cannot be established.')) elif event.type_ == 'gc-invitation': self._get_gc_invitation(event) def on_event_removed(self, event_list): """ Called when one or more events are removed from the event list """ for ev in event_list: if ev.account != self.account: continue if ev.jid != self.contact.jid: continue if ev.type_ not in ('file-request', 'file-completed', 'file-error', 'file-stopped', 'file-request-error', 'file-send-error', 'gc-invitation'): continue i = 0 removed = False for ib_msg in self.info_bar_queue: if ev.type_ == 'gc-invitation': if ev.room_jid == ib_msg[2][0]: self.info_bar_queue.remove(ib_msg) removed = True else: # file-* if ib_msg[2] == ev.file_props: self.info_bar_queue.remove(ib_msg) removed = True if removed: if i == 0: # We are removing the one currently displayed self.info_bar.set_no_show_all(True) self.info_bar.hide() # show next one? GLib.idle_add(self._info_bar_show_message) break i += 1
gpl-3.0
-346,342,180,975,263,940
40.885068
169
0.58314
false
gstarnberger/paasta
tests/cli/test_cmds_logs.py
1
42309
# Copyright 2015-2016 Yelp Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import datetime import json from multiprocessing import Queue from Queue import Empty import isodate import mock import pytest from pytest import raises try: from paasta_tools.cli.cmds import logs except ImportError: pass from paasta_tools.cli.cli import parse_args from paasta_tools.utils import ANY_CLUSTER from paasta_tools.utils import format_log_line try: from scribereader.scribereader import StreamTailerSetupError scribereader_available = True except ImportError: scribereader_available = False pass def test_cluster_to_scribe_env_good(): with mock.patch('paasta_tools.cli.cmds.logs.scribereader', autospec=True): scribe_log_reader = logs.ScribeLogReader(cluster_map={'mesosstage': 'env1'}) actual = scribe_log_reader.cluster_to_scribe_env('mesosstage') assert actual == 'env1' def test_cluster_to_scribe_env_bad(): with mock.patch('paasta_tools.cli.cmds.logs.scribereader', autospec=True): scribe_log_reader = logs.ScribeLogReader(cluster_map={}) with raises(SystemExit) as sys_exit: scribe_log_reader.cluster_to_scribe_env('dne') assert sys_exit.value.code == 1 def test_check_timestamp_in_range_with_none_arguments(): assert logs.check_timestamp_in_range(timestamp=None, start_time=None, end_time=None) is True assert logs.check_timestamp_in_range(datetime.datetime.utcnow(), None, None) is True def test_check_timestamp_in_range_false(): timestamp = datetime.datetime.utcnow() start_time, end_time = logs.generate_start_end_time("10m", "5m") assert logs.check_timestamp_in_range(timestamp, start_time, end_time) is False def test_check_timestamp_in_range_true(): timestamp = isodate.parse_datetime("2016-06-07T23:46:03+00:00") start_time = isodate.parse_datetime("2016-06-07T23:40:03+00:00") end_time = isodate.parse_datetime("2016-06-07T23:50:03+00:00") assert logs.check_timestamp_in_range(timestamp, start_time, end_time) is True def test_paasta_log_line_passes_filter_true(): service = 'fake_service' levels = ['fake_level1', 'fake_level2'] clusters = ['fake_cluster1', 'fake_cluster2'] instance = 'fake_instance' components = ['build', 'deploy'] line = 'fake_line' formatted_line = format_log_line(levels[0], clusters[0], service, instance, components[0], line) assert logs.paasta_log_line_passes_filter(formatted_line, levels, service, components, clusters) is True def test_paasta_log_line_passes_filter_true_when_default_cluster(): service = 'fake_service' levels = ['fake_level1', 'fake_level2'] clusters = ['fake_cluster1', 'fake_cluster2'] instance = 'fake_instance' components = ['build', 'deploy'] line = 'fake_line' formatted_line = format_log_line(levels[0], ANY_CLUSTER, service, instance, components[0], line) assert logs.paasta_log_line_passes_filter(formatted_line, levels, service, components, clusters) is True def test_paasta_log_line_passes_filter_false_when_wrong_level(): service = 'fake_service' levels = ['fake_level1', 'fake_level2'] clusters = ['fake_cluster1', 'fake_cluster2'] instance = 'fake_instance' components = ['build', 'deploy'] line = 'fake_line' formatted_line = format_log_line('BOGUS_LEVEL', clusters[0], service, instance, components[0], line) assert logs.paasta_log_line_passes_filter(formatted_line, levels, service, components, clusters) is False def test_paasta_log_line_passes_filter_false_when_wrong_component(): service = 'fake_service' levels = ['fake_level1', 'fake_level2'] clusters = ['fake_cluster1', 'fake_cluster2'] instance = 'fake_instance' components = ['build', 'deploy'] line = 'fake_line' # component must be legit as well as not in the list of requested # components formatted_line = format_log_line(levels[0], clusters[0], service, instance, 'monitoring', line) assert logs.paasta_log_line_passes_filter(formatted_line, levels, service, components, clusters) is False def test_paasta_log_line_passes_filter_false_when_wrong_cluster(): service = 'fake_service' levels = ['fake_level1', 'fake_level2'] clusters = ['fake_cluster1', 'fake_cluster2'] instance = 'fake_instance' components = ['build', 'deploy'] line = 'fake_line' # component must be legit as well as not in the list of requested # components formatted_line = format_log_line(levels[0], 'BOGUS_CLUSTER', service, instance, components[0], line) assert logs.paasta_log_line_passes_filter(formatted_line, levels, service, components, clusters) is False def test_paasta_log_line_passes_filter_false_when_line_not_valid_json(): service = 'fake_service' levels = ['fake_level1', 'fake_level2'] clusters = ['fake_cluster1', 'fake_cluster2'] components = ['build', 'deploy'] line = 'i am definitely not json' # component must be legit as well as not in the list of requested # components assert logs.paasta_log_line_passes_filter(line, levels, service, components, clusters) is False def test_paasta_log_line_passes_filter_true_when_valid_time(): service = 'fake_service' levels = ['fake_level1', 'fake_level2'] clusters = ['fake_cluster1', 'fake_cluster2'] instance = 'fake_instance' components = ['build', 'deploy'] line = 'fake_line' formatted_line = format_log_line(levels[0], clusters[0], service, instance, components[0], line, timestamp="2016-06-07T23:46:03+00:00") start_time = isodate.parse_datetime("2016-06-07T23:40:03+00:00") end_time = isodate.parse_datetime("2016-06-07T23:50:03+00:00") assert logs.paasta_log_line_passes_filter(formatted_line, levels, service, components, clusters, start_time=start_time, end_time=end_time) is True def test_paasta_log_line_passes_filter_false_when_invalid_time(): service = 'fake_service' levels = ['fake_level1', 'fake_level2'] clusters = ['fake_cluster1', 'fake_cluster2'] instance = 'fake_instance' components = ['build', 'deploy'] line = 'fake_line' formatted_line = format_log_line(levels[0], clusters[0], service, instance, components[0], line, timestamp=isodate.datetime_isoformat(datetime.datetime.utcnow())) start_time, end_time = logs.generate_start_end_time(from_string="5m", to_string="3m") assert logs.paasta_log_line_passes_filter(formatted_line, levels, service, components, clusters, start_time=start_time, end_time=end_time) is False def test_marathon_log_line_passes_filter_true_when_service_name_in_string(): service = 'fake_service' levels = [] components = [] clusters = [] line = format_log_line( 'fake_level', clusters, service, 'fake_instance', 'marathon', 'fake message with service name %s' % service, ) with mock.patch('paasta_tools.cli.cmds.logs.format_job_id', autospec=True) as format_job_id_patch: format_job_id_patch.return_value = service assert logs.marathon_log_line_passes_filter(line, levels, service, components, clusters) def test_marathon_log_line_passes_filter_false_when_service_name_missing(): service = 'fake_service' levels = [] components = [] clusters = [] line = format_log_line( 'fake_level', clusters, service, 'fake_instance', 'marathon', 'fake message without service name', ) with mock.patch('paasta_tools.cli.cmds.logs.format_job_id', autospec=True) as format_job_id_patch: format_job_id_patch.return_value = service assert not logs.marathon_log_line_passes_filter(line, levels, service, components, clusters) def test_marathon_log_line_passes_filter_fails_invalid_json(): assert not logs.marathon_log_line_passes_filter("{ abcd }", None, None, None, None) def test_chronos_log_line_passes_filter_fails_invalid_json(): assert not logs.chronos_log_line_passes_filter("{ abcd }", None, None, None, None) def test_chronos_log_line_passes_filter_true_when_service_name_in_string(): service = 'fake_service' levels = [] components = [] clusters = [] line = format_log_line( 'fake_level', clusters, service, 'fake_instance', 'chronos', 'fake message with service name %s' % service, ) with mock.patch('paasta_tools.chronos_tools.compose_job_id', autospec=True) as format_job_id_patch: format_job_id_patch.return_value = service assert logs.chronos_log_line_passes_filter(line, levels, service, components, clusters) def test_chronos_log_line_passes_filter_false_when_service_name_missing(): service = 'fake_service' levels = [] components = [] clusters = [] line = format_log_line( 'fake_level', clusters, service, 'fake_instance', 'chronos', 'fake message without service name', ) with mock.patch('paasta_tools.chronos_tools.compose_job_id', autospec=True) as format_job_id_patch: format_job_id_patch.return_value = service assert not logs.chronos_log_line_passes_filter(line, levels, service, components, clusters) def test_extract_utc_timestamp_from_log_line_ok(): fake_timestamp = '2015-07-22T10:38:46-07:00' fake_utc_timestamp = isodate.parse_datetime('2015-07-22T17:38:46.000000') line = '%s this is a fake syslog test message' % fake_timestamp assert logs.extract_utc_timestamp_from_log_line(line) == fake_utc_timestamp def test_extract_utc_timestamp_from_log_line_when_missing_date(): line = 'this is a fake invalid syslog message' assert not logs.extract_utc_timestamp_from_log_line(line) def test_extract_utc_timestamp_from_log_line_when_invalid_date_format(): line = 'Jul 22 10:39:08 this is a fake invalid syslog message' assert not logs.extract_utc_timestamp_from_log_line(line) def test_parse_marathon_log_line_fail(): assert '' == logs.parse_marathon_log_line("fake timestamp", None, None) def test_parse_marathon_log_line_ok(): fake_timestamp = '2015-07-22T10:38:46-07:00' fake_utc_timestamp = '2015-07-22T17:38:46.000000' fake_service = 'fake_service' line = '%s this is a fake syslog test message' % fake_timestamp clusters = ['fake_cluster'] expected = json.dumps({ 'timestamp': fake_utc_timestamp, 'component': 'marathon', 'cluster': clusters[0], 'service': fake_service, 'instance': 'ALL', 'level': 'event', 'message': line }) assert sorted(logs.parse_marathon_log_line(line, clusters, fake_service)) == sorted(expected) def test_parse_chronos_log_line_fail(): assert '' == logs.parse_chronos_log_line("fake timestamp", None, None) def test_parse_chronos_log_line_ok(): fake_timestamp = '2015-07-22T10:38:46-07:00' fake_utc_timestamp = '2015-07-22T17:38:46.000000' fake_service = 'fake_service' line = '%s this is a fake syslog test message' % fake_timestamp clusters = ['fake_cluster'] expected = json.dumps({ 'timestamp': fake_utc_timestamp, 'component': 'chronos', 'cluster': clusters[0], 'service': fake_service, 'instance': 'ALL', 'level': 'event', 'message': line }) assert sorted(logs.parse_chronos_log_line(line, clusters, fake_service)) == sorted(expected) @pytest.mark.skipif(not scribereader_available, reason='scribereader not available') def test_scribe_tail_log_everything(): env = 'fake_env' stream_name = 'fake_stream' service = 'fake_service' levels = ['fake_level1', 'fake_level2'] components = ['build', 'deploy'] clusters = ['fake_cluster1', 'fake_cluster2'] instance = 'fake_instance' queue = Queue() filter_fn = mock.Mock(return_value=True) tailer = iter([ format_log_line( levels[0], clusters, instance, 'build', 'level: first. component: build.', ), format_log_line( levels[1], clusters, instance, 'deploy', 'level: second. component: deploy.', ), ]) with contextlib.nested( mock.patch('paasta_tools.cli.cmds.logs.scribereader', autospec=True), ) as ( mock_scribereader, ): mock_scribereader.get_env_scribe_host.return_value = { 'host': 'fake_host', 'port': 'fake_port', } mock_scribereader.get_stream_tailer.return_value = tailer logs.scribe_tail( env, stream_name, service, levels, components, clusters, queue, filter_fn ) assert mock_scribereader.get_env_scribe_host.call_count == 1 mock_scribereader.get_stream_tailer.assert_called_once_with( stream_name, 'fake_host', 'fake_port', ) assert queue.qsize() == 2 # Sadly, fetching with a timeout seems to be needed with # multiprocessing.Queue (this was not the case with Queue.Queue). It # failed 8/10 times with a get_nowait() vs 0/10 times with a 0.1s # timeout. first_line = queue.get(True, 0.1) assert 'level: first. component: build.' in first_line second_line = queue.get(True, 0.1) assert 'level: second. component: deploy.' in second_line @pytest.mark.skipif(not scribereader_available, reason='scribereader not available') def test_scribe_tail_log_nothing(): env = 'fake_env' stream_name = 'fake_stream' service = 'fake_service' levels = ['fake_level1', 'fake_level2'] components = ['build', 'deploy'] clusters = ['fake_cluster1', 'fake_cluster2'] instance = 'fake_instance' queue = Queue() filter_fn = mock.Mock(return_value=False) tailer = iter([ format_log_line( levels[0], clusters, instance, 'build', 'level: first. component: build.', ), format_log_line( levels[1], clusters, instance, 'deploy', 'level: second. component: deploy.', ), ]) with contextlib.nested( mock.patch('paasta_tools.cli.cmds.logs.scribereader', autospec=True), ) as ( mock_scribereader, ): mock_scribereader.get_env_scribe_host.return_value = { 'host': 'fake_host', 'port': 'fake_port', } mock_scribereader.get_stream_tailer.return_value = tailer logs.scribe_tail( env, stream_name, service, levels, components, clusters, queue, filter_fn, ) assert queue.qsize() == 0 class FakeKeyboardInterrupt(KeyboardInterrupt): """Raising a real KeyboardInterrupt causes pytest to, y'know, stop.""" pass @pytest.mark.skipif(not scribereader_available, reason='scribereader not available') def test_scribe_tail_ctrl_c(): env = 'fake_env' stream_name = 'fake_stream' service = 'fake_service' levels = ['fake_level1', 'fake_level2'] components = ['build', 'deploy'] clusters = ['fake_cluster1', 'fake_cluster2'] queue = Queue() filter_fn = mock.Mock(return_value=True) with contextlib.nested( mock.patch('paasta_tools.cli.cmds.logs.scribereader', autospec=True), ) as ( mock_scribereader, ): # There's no reason this method is the one that raises the # KeyboardInterrupt. This just happens to be the first convenient place # to simulate the user pressing Ctrl-C. mock_scribereader.get_env_scribe_host.side_effect = FakeKeyboardInterrupt try: logs.scribe_tail( env, stream_name, service, levels, components, clusters, queue, filter_fn, ) # We have to catch this ourselves otherwise it will fool pytest too! except FakeKeyboardInterrupt: raise Exception('The code under test failed to catch a (fake) KeyboardInterrupt!') # If we made it here, KeyboardInterrupt was not raised and this test # was successful. @pytest.mark.skipif(not scribereader_available, reason='scribereader not available') def test_scribe_tail_handles_StreamTailerSetupError(): env = 'fake_env' stream_name = 'fake_stream' service = 'fake_service' levels = ['fake_level1'] components = ['build'] clusters = ['fake_cluster1'] queue = Queue() filter_fn = mock.Mock(return_value=True) with contextlib.nested( mock.patch('paasta_tools.cli.cmds.logs.scribereader', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.log', autospec=True), ) as ( mock_scribereader, mock_log, ): mock_scribereader.get_stream_tailer.side_effect = StreamTailerSetupError('bla', 'unused1', 'unused2') with raises(StreamTailerSetupError): logs.scribe_tail( env, stream_name, service, levels, components, clusters, queue, filter_fn, ) mock_log.error.assert_any_call('Failed to setup stream tailing for %s in fake_env' % stream_name) def test_prettify_timestamp(): timestamp = "2015-03-12T21:20:04.602002" actual = logs.prettify_timestamp(timestamp) # kwa and I tried to get python to recognize a hardcoded timezone # in TZ, even using tzset(), but it ignored us. So we're punting. assert "2015-03-12 " in actual assert ":20:04" in actual def test_prettify_component_valid(): component = "build" actual = logs.prettify_component(component) assert component in actual assert "UNPRETTIFIABLE COMPONENT" not in actual def test_prettify_component_invalid(): component = "non-existent component" actual = logs.prettify_component(component) assert component in actual assert "UNPRETTIFIABLE COMPONENT" in actual def test_prettify_level_more_than_one_requested_levels(): level = 'fake_level' requested_levels = ['fake_requested_level', 'fake_requested_level2'] assert level in logs.prettify_level(level, requested_levels) def test_prettify_level_less_than_or_equal_to_one_requested_levels(): level = 'fake_level' requested_levels = [] assert level not in logs.prettify_level(level, requested_levels) def test_prettify_log_line_invalid_json(): line = "i am not json" levels = [] assert logs.prettify_log_line(line, levels) == "Invalid JSON: %s" % line def test_prettify_log_line_valid_json_missing_key(): line = json.dumps({ "component": "fake_component", "oops_i_spelled_timestamp_rong": "1999-09-09", }) levels = [] actual = logs.prettify_log_line(line, levels) assert "JSON missing keys: %s" % line in actual def test_prettify_log_line_valid_json(): parsed_line = { "message": "fake_message", "component": "fake_component", "level": "fake_level", "cluster": "fake_cluster", "instance": "fake_instance", "timestamp": "2015-03-12T21:20:04.602002", } requested_levels = ['fake_requested_level1', 'fake_requested_level2'] line = json.dumps(parsed_line) actual = logs.prettify_log_line(line, requested_levels) expected_timestamp = logs.prettify_timestamp(parsed_line['timestamp']) assert expected_timestamp in actual assert parsed_line['component'] in actual assert parsed_line['cluster'] in actual assert parsed_line['instance'] in actual assert parsed_line['level'] in actual assert parsed_line['message'] in actual def test_prettify_log_line_valid_json_requested_level_is_only_event(): requested_levels = ['fake_requested_level1'] parsed_line = { "message": "fake_message", "component": "fake_component", "level": "event", "cluster": "fake_cluster", "instance": "fake_instance", "timestamp": "2015-03-12T21:20:04.602002", } line = json.dumps(parsed_line) actual = logs.prettify_log_line(line, requested_levels) assert parsed_line['level'] not in actual def test_scribereader_run_code_over_scribe_envs(): clusters = ['fake_cluster1', 'fake_cluster2'] components = ['build', 'deploy', 'monitoring', 'marathon', 'chronos', 'stdout', 'stderr'] callback = mock.MagicMock() with mock.patch('paasta_tools.cli.cmds.logs.ScribeLogReader.determine_scribereader_envs', autospec=True) \ as determine_scribereader_envs_patch, \ mock.patch('paasta_tools.cli.cmds.logs.scribereader'): envs = ['env1', 'env2'] determine_scribereader_envs_patch.return_value = envs logs.ScribeLogReader(cluster_map={}).run_code_over_scribe_envs(clusters, components, callback) # See comment in test_scribereader_print_last_n_logs for where this figure comes from assert callback.call_count == 14 def test_scribereader_print_last_n_logs(): service = 'fake_service' levels = ['fake_level1', 'fake_level2'] clusters = ['fake_cluster1', 'fake_cluster2'] components = ['build', 'deploy', 'monitoring', 'marathon', 'chronos', 'stdout', 'stderr'] with mock.patch('paasta_tools.cli.cmds.logs.scribereader', autospec=True) as mock_scribereader, \ mock.patch('paasta_tools.cli.cmds.logs.ScribeLogReader.determine_scribereader_envs', autospec=True) \ as determine_scribereader_envs_patch: determine_scribereader_envs_patch.return_value = ['env1', 'env2'] fake_iter = mock.MagicMock() fake_iter.__iter__.return_value = ["""{"cluster":"fake_cluster1","component":"stderr","instance":"main", "level":"debug","message":"testing", "timestamp":"2016-06-08T06:31:52.706609135Z"}"""] * 100 mock_scribereader.get_stream_tailer.return_value = fake_iter logs.ScribeLogReader(cluster_map={}).print_last_n_logs(service, 100, levels, components, clusters, raw_mode=False) # one call per component per environment except marathon and chronos which run 1/env/cluster # Defaults: # env1, env2 = 2 # marathon: # env1: cluster1 cluster2 = 2 # env2: cluster1 cluster2 = 2 # chronos: # env1: cluster1 cluster2 = 2 # env2: cluster1 cluster2 = 2 # stdout: # env1, env2 = 2 # stderr: # env1, env2 = 2 assert mock_scribereader.get_stream_tailer.call_count == 14 def test_scribereader_print_logs_by_time(): service = 'fake_service' levels = ['fake_level1', 'fake_level2'] clusters = ['fake_cluster1', 'fake_cluster2'] components = ['build', 'deploy', 'monitoring', 'marathon', 'chronos', 'stdout', 'stderr'] with mock.patch('paasta_tools.cli.cmds.logs.scribereader', autospec=True) as mock_scribereader, \ mock.patch('paasta_tools.cli.cmds.logs.ScribeLogReader.determine_scribereader_envs', autospec=True) \ as determine_scribereader_envs_patch: determine_scribereader_envs_patch.return_value = ['env1', 'env2'] fake_iter = mock.MagicMock() fake_iter.__iter__.return_value = ["""{"cluster":"fake_cluster1","component":"stderr","instance":"main", "level":"debug","message":"testing", "timestamp":"2016-06-08T06:31:52.706609135Z"}"""] * 100 mock_scribereader.get_stream_tailer.return_value = fake_iter mock_scribereader.get_stream_reader.return_value = fake_iter start_time, end_time = logs.generate_start_end_time() logs.ScribeLogReader(cluster_map={}).print_logs_by_time(service, start_time, end_time, levels, components, clusters, raw_mode=False) # Please see comment in test_scribereader_print_last_n_logs for where this number comes from assert mock_scribereader.get_stream_tailer.call_count == 14 start_time, end_time = logs.generate_start_end_time("3d", "2d") logs.ScribeLogReader(cluster_map={}).print_logs_by_time(service, start_time, end_time, levels, components, clusters, raw_mode=False) # Please see comment in test_scribereader_print_last_n_logs for where this number comes from assert mock_scribereader.get_stream_reader.call_count == 14 def test_tail_paasta_logs_ctrl_c_in_queue_get(): service = 'fake_service' levels = ['fake_level1', 'fake_level2'] components = ['deploy', 'monitoring', 'chronos', 'stdout', 'stderr'] clusters = ['fake_cluster1', 'fake_cluster2'] with contextlib.nested( mock.patch('paasta_tools.cli.cmds.logs.ScribeLogReader.determine_scribereader_envs', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.ScribeLogReader.scribe_tail', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.log', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.print_log', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.Queue', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.Process', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.scribereader'), ) as ( determine_scribereader_envs_patch, scribe_tail_patch, log_patch, print_log_patch, queue_patch, process_patch, mock_scribereader, ): fake_queue = mock.MagicMock(spec_set=Queue()) fake_queue.get.side_effect = FakeKeyboardInterrupt queue_patch.return_value = fake_queue try: logs.ScribeLogReader(cluster_map={}).tail_logs(service, levels, components, clusters) # We have to catch this ourselves otherwise it will fool pytest too! except FakeKeyboardInterrupt: raise Exception('The code under test failed to catch a (fake) KeyboardInterrupt!') # If we made it here, KeyboardInterrupt was not raised and this test # was successful. def test_tail_paasta_logs_ctrl_c_in_is_alive(): service = 'fake_service' levels = ['fake_level1', 'fake_level2'] components = ['deploy', 'monitoring'] clusters = ['fake_cluster1', 'fake_cluster2'] with contextlib.nested( mock.patch('paasta_tools.cli.cmds.logs.ScribeLogReader.determine_scribereader_envs', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.ScribeLogReader.scribe_tail', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.log', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.print_log', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.Queue', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.Process', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.scribereader'), ) as ( determine_scribereader_envs_patch, scribe_tail_patch, log_patch, print_log_patch, queue_patch, process_patch, mock_scribereader, ): determine_scribereader_envs_patch.return_value = ['env1', 'env2'] fake_queue = mock.MagicMock(spec_set=Queue()) fake_queue.get.side_effect = Empty queue_patch.return_value = fake_queue fake_process = mock.MagicMock() fake_process.is_alive.side_effect = FakeKeyboardInterrupt process_patch.return_value = fake_process scribe_log_reader = logs.ScribeLogReader(cluster_map={'env1': 'env1', 'env2': 'env2'}) try: scribe_log_reader.tail_logs(service, levels, components, clusters) # We have to catch this ourselves otherwise it will fool pytest too! except FakeKeyboardInterrupt: raise Exception('The code under test failed to catch a (fake) KeyboardInterrupt!') # If we made it here, KeyboardInterrupt was not raised and this test # was successful. def test_tail_paasta_logs_aliveness_check(): service = 'fake_service' levels = ['fake_level1', 'fake_level2'] components = ['deploy', 'monitoring'] clusters = ['fake_cluster1', 'fake_cluster2'] with contextlib.nested( mock.patch('paasta_tools.cli.cmds.logs.ScribeLogReader.determine_scribereader_envs', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.ScribeLogReader.scribe_tail', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.log', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.print_log', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.Queue', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.Process', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.scribereader'), ) as ( determine_scribereader_envs_patch, scribe_tail_patch, log_patch, print_log_patch, queue_patch, process_patch, mock_scribereader, ): determine_scribereader_envs_patch.return_value = ['env1', 'env2'] fake_queue = mock.MagicMock(spec_set=Queue()) fake_queue.get.side_effect = Empty queue_patch.return_value = fake_queue fake_process = mock.MagicMock() is_alive_responses = [ # First time: simulate both threads being alive. True, True, # Second time: simulate first thread is alive but second thread is now dead. True, False, # This gets us into the kill stanza, which calls is_alive() on each # thread again. We'll recycle our answers from the previous calls # to is_alive() where the first thread is alive but the second # thread is dead. True, False, ] fake_process.is_alive.side_effect = is_alive_responses process_patch.return_value = fake_process scribe_log_reader = logs.ScribeLogReader(cluster_map={'env1': 'env1', 'env2': 'env2'}) scribe_log_reader.tail_logs(service, levels, components, clusters) # is_alive() should be called on all the values we painstakingly provided above. assert fake_process.is_alive.call_count == len(is_alive_responses) # We only terminate the first thread, which is still alive. We don't # terminate the second thread, which was already dead. assert fake_process.terminate.call_count == 1 def test_tail_paasta_logs_empty_clusters(): service = 'fake_service' levels = ['fake_level1', 'fake_level2'] components = ['deploy', 'monitoring'] clusters = [] with contextlib.nested( mock.patch('paasta_tools.cli.cmds.logs.ScribeLogReader.determine_scribereader_envs', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.ScribeLogReader.scribe_tail', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.log', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.print_log', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.Queue', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.Process', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.scribereader'), ) as ( determine_scribereader_envs_patch, scribe_tail_patch, log_patch, print_log_patch, queue_patch, process_patch, mock_scribereader, ): determine_scribereader_envs_patch.return_value = [] fake_queue = mock.MagicMock(spec_set=Queue()) fake_queue.get.side_effect = Empty queue_patch.return_value = fake_queue logs.ScribeLogReader(cluster_map={}).tail_logs(service, levels, components, clusters) assert process_patch.call_count == 0 assert print_log_patch.call_count == 0 def test_tail_paasta_logs_marathon(): service = 'fake_service' clusters = ['fake_cluster'] levels = ['fake_level1', 'fake_level2'] components = ['marathon'] with contextlib.nested( mock.patch('paasta_tools.cli.cmds.logs.ScribeLogReader.determine_scribereader_envs', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.ScribeLogReader.scribe_tail', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.log', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.print_log', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.Queue', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.Process', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.parse_marathon_log_line', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.marathon_log_line_passes_filter', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.scribereader'), ) as ( determine_scribereader_envs_patch, scribe_tail_patch, log_patch, print_log_patch, queue_patch, process_patch, parse_marathon_log_line_patch, marathon_log_line_passes_filter_patch, mock_scribereader, ): determine_scribereader_envs_patch.return_value = ['env1'] fake_queue = mock.MagicMock(spec_set=Queue()) # Prevent tail_paasta_logs from reading from queue forever by simulating a Ctrl-C fake_queue.get.side_effect = KeyboardInterrupt queue_patch.return_value = fake_queue logs.ScribeLogReader(cluster_map={'env1': 'env1'}).tail_logs(service, levels, components, clusters) assert process_patch.call_count == 1 def test_determine_scribereader_envs(): cluster = 'fake_cluster' components = ['build', 'monitoring'] with contextlib.nested( mock.patch('paasta_tools.cli.cmds.logs.scribereader'), ) as ( mock_scribereader, ): cluster_map = { cluster: 'fake_scribe_env', } actual = logs.ScribeLogReader(cluster_map=cluster_map).determine_scribereader_envs(components, cluster) assert actual == set(['devc', 'fake_scribe_env']) def test_determine_scribereader_additional_envs(): cluster = 'fake_cluster' components = ['fake_component'] with mock.patch('paasta_tools.cli.cmds.logs.scribereader'), \ mock.patch('paasta_tools.cli.cmds.logs.LOG_COMPONENTS', spec_set=dict) as mock_LOG_COMPONENTS: cluster_map = { cluster: 'fake_scribe_env', } LOG_COMPONENTS = { 'fake_component': { 'additional_source_envs': ['fake_scribe_env2'] } } mock_LOG_COMPONENTS.__getitem__.side_effect = LOG_COMPONENTS.__getitem__ actual = logs.ScribeLogReader(cluster_map=cluster_map).determine_scribereader_envs(components, cluster) assert 'fake_scribe_env' in actual and 'fake_scribe_env2' in actual def test_prefix(): actual = logs.prefix('TEST STRING', 'deploy') assert 'TEST STRING' in actual def test_get_log_reader(): mock_system_paasta_config = mock.Mock(autospec='paasta_tools.utils.SystemPaastaConfig') mock_system_paasta_config.get_log_reader.return_value = {'driver': 'scribereader', 'options': {'cluster_map': {}}} with contextlib.nested( mock.patch('paasta_tools.cli.cmds.logs.load_system_paasta_config', autospec=True), mock.patch('paasta_tools.cli.cmds.logs.scribereader'), ) as ( mock_load_system_paasta_config, mock_scribereader, ): mock_load_system_paasta_config.return_value = mock_system_paasta_config actual = logs.get_log_reader() assert isinstance(actual, logs.ScribeLogReader) def test_generate_start_end_time(): start_time, end_time = logs.generate_start_end_time() # Default for no args, test that there's a 30 minute difference time_delta = end_time - start_time # Do a loose comparison to make this test less time sensitive. # On slower systems, doing a datetime.datetime.utcnow() might # take a few milliseconds itself, doing a straight up == comparison # would not work actual_time = time_delta.total_seconds() ideal_time = 30 * 60 assert abs(actual_time - ideal_time) < 0.1 def test_generate_start_end_time_human_durations(): start_time, end_time = logs.generate_start_end_time("35m", "25m") time_delta = end_time - start_time actual_time = time_delta.total_seconds() ideal_time = 10 * 60 # See note on the test above why this is not a simple == comparison assert abs(actual_time - ideal_time) < 0.1 def test_generate_start_end_time_invalid(): # Try giving a start time that's later than the end time try: logs.generate_start_end_time("2016-06-06T20:26:49+00:00", "2016-06-06T20:25:49+00:00") assert False except ValueError: assert True def test_generate_start_end_time_invalid_from(): try: logs.generate_start_end_time("invalid", "2016-06-06T20:25:49+00:00") assert False except ValueError: assert True def test_generate_start_end_time_invalid_to(): try: logs.generate_start_end_time("2016-06-06T20:25:49+00:00", "invalid") assert False except ValueError: assert True def test_validate_filtering_args_with_valid_inputs(): fake_reader = logs.LogReader() fake_reader.SUPPORTS_TAILING = True fake_reader.SUPPORTS_LINE_COUNT = True fake_reader.SUPPORTS_TIME = True fake_reader.SUPPORTS_LINE_OFFSET = True # No arguments, completely valid args, _ = parse_args(["logs"]) assert logs.validate_filtering_args(args, fake_reader) # Tailing args, _ = parse_args(["logs", "--tail"]) assert logs.validate_filtering_args(args, fake_reader) # Specify number of lines args, _ = parse_args(["logs", "-l", "200"]) assert logs.validate_filtering_args(args, fake_reader) # Specify number of lines and lines to offset by args, _ = parse_args(["logs", "-l", "200", "-o", "23"]) assert logs.validate_filtering_args(args, fake_reader) # Specify a time args, _ = parse_args(["logs", "--from", "1w"]) assert logs.validate_filtering_args(args, fake_reader) def test_validate_filtering_args_with_invalid_inputs(): fake_reader = logs.LogReader() fake_reader.SUPPORTS_TAILING = False args, _ = parse_args(["logs", "--tail"]) assert not logs.validate_filtering_args(args, fake_reader) fake_reader.SUPPORTS_TIME = False args, _ = parse_args(["logs", "--from", "1w"]) assert not logs.validate_filtering_args(args, fake_reader) fake_reader.SUPPORTS_LINE_COUNT = False args, _ = parse_args(["logs", "-l", "200"]) assert not logs.validate_filtering_args(args, fake_reader) fake_reader.SUPPORTS_LINE_OFFSET = False args, _ = parse_args(["logs", "-o", "23"]) assert not logs.validate_filtering_args(args, fake_reader) fake_reader.SUPPORTS_TAILING = True fake_reader.SUPPORTS_LINE_COUNT = True fake_reader.SUPPORTS_LINE_OFFSET = True fake_reader.SUPPORTS_TIME = True # Can't tail and specify lines at the same time args, _ = parse_args(["logs", "-l", "200", "--tail"]) assert not logs.validate_filtering_args(args, fake_reader) # Can't tail and specify time at the same time args, _ = parse_args(["logs", "--tail", "--from", "1w"]) assert not logs.validate_filtering_args(args, fake_reader) # Can't use both time and lines at the same time args, _ = parse_args(["logs", "--from", "1w", "-l", "100"]) assert not logs.validate_filtering_args(args, fake_reader) def test_pick_default_log_mode(): with mock.patch('paasta_tools.cli.cmds.logs.LogReader.tail_logs') as tail_logs: args, _ = parse_args(["logs"]) fake_reader = logs.LogReader() fake_reader.SUPPORTS_TAILING = True logs.pick_default_log_mode(args, fake_reader, service=None, levels=None, components=None, clusters=None) # Only supports tailing so that's the one that should be used assert tail_logs.call_count == 1 with mock.patch('paasta_tools.cli.cmds.logs.LogReader.print_logs_by_time') as logs_by_time: args, _ = parse_args(["logs"]) fake_reader = logs.LogReader() fake_reader.SUPPORTS_TAILING = True fake_reader.SUPPORTS_TIME = True logs.pick_default_log_mode(args, fake_reader, service=None, levels=None, components=None, clusters=None) # Supports tailing and time, but time should be prioritized assert logs_by_time.call_count == 1 with mock.patch('paasta_tools.cli.cmds.logs.LogReader.print_last_n_logs') as logs_by_lines: args, _ = parse_args(["logs"]) fake_reader = logs.LogReader() fake_reader.SUPPORTS_TAILING = True fake_reader.SUPPORTS_TIME = True fake_reader.SUPPORTS_LINE_COUNT = True logs.pick_default_log_mode(args, fake_reader, service=None, levels=None, components=None, clusters=None) # Supports tailing , time and line counts. Line counts should be prioritized assert logs_by_lines.call_count == 1
apache-2.0
-8,305,459,735,853,499,000
37.85124
118
0.639864
false
jrossi/paver
bootstrap.py
1
53941
#!/usr/bin/env python ## WARNING: This file is generated #!/usr/bin/env python """Create a "virtual" Python installation """ import sys import os import optparse import shutil import logging import distutils.sysconfig try: import subprocess except ImportError, e: if sys.version_info <= (2, 3): print 'ERROR: %s' % e print 'ERROR: this script requires Python 2.4 or greater; or at least the subprocess module.' print 'If you copy subprocess.py from a newer version of Python this script will probably work' sys.exit(101) else: raise try: set except NameError: from sets import Set as set join = os.path.join py_version = 'python%s.%s' % (sys.version_info[0], sys.version_info[1]) is_jython = sys.platform.startswith('java') expected_exe = is_jython and 'jython' or 'python' REQUIRED_MODULES = ['os', 'posix', 'posixpath', 'ntpath', 'genericpath', 'fnmatch', 'locale', 'encodings', 'codecs', 'stat', 'UserDict', 'readline', 'copy_reg', 'types', 're', 'sre', 'sre_parse', 'sre_constants', 'sre_compile', 'lib-dynload', 'config', 'zlib'] if sys.version_info[:2] == (2, 6): REQUIRED_MODULES.extend(['warnings', 'linecache', '_abcoll', 'abc']) if sys.version_info[:2] <= (2, 3): REQUIRED_MODULES.extend(['sets', '__future__']) class Logger(object): """ Logging object for use in command-line script. Allows ranges of levels, to avoid some redundancy of displayed information. """ DEBUG = logging.DEBUG INFO = logging.INFO NOTIFY = (logging.INFO+logging.WARN)/2 WARN = WARNING = logging.WARN ERROR = logging.ERROR FATAL = logging.FATAL LEVELS = [DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL] def __init__(self, consumers): self.consumers = consumers self.indent = 0 self.in_progress = None self.in_progress_hanging = False def debug(self, msg, *args, **kw): self.log(self.DEBUG, msg, *args, **kw) def info(self, msg, *args, **kw): self.log(self.INFO, msg, *args, **kw) def notify(self, msg, *args, **kw): self.log(self.NOTIFY, msg, *args, **kw) def warn(self, msg, *args, **kw): self.log(self.WARN, msg, *args, **kw) def error(self, msg, *args, **kw): self.log(self.WARN, msg, *args, **kw) def fatal(self, msg, *args, **kw): self.log(self.FATAL, msg, *args, **kw) def log(self, level, msg, *args, **kw): if args: if kw: raise TypeError( "You may give positional or keyword arguments, not both") args = args or kw rendered = None for consumer_level, consumer in self.consumers: if self.level_matches(level, consumer_level): if (self.in_progress_hanging and consumer in (sys.stdout, sys.stderr)): self.in_progress_hanging = False sys.stdout.write('\n') sys.stdout.flush() if rendered is None: if args: rendered = msg % args else: rendered = msg rendered = ' '*self.indent + rendered if hasattr(consumer, 'write'): consumer.write(rendered+'\n') else: consumer(rendered) def start_progress(self, msg): assert not self.in_progress, ( "Tried to start_progress(%r) while in_progress %r" % (msg, self.in_progress)) if self.level_matches(self.NOTIFY, self._stdout_level()): sys.stdout.write(msg) sys.stdout.flush() self.in_progress_hanging = True else: self.in_progress_hanging = False self.in_progress = msg def end_progress(self, msg='done.'): assert self.in_progress, ( "Tried to end_progress without start_progress") if self.stdout_level_matches(self.NOTIFY): if not self.in_progress_hanging: # Some message has been printed out since start_progress sys.stdout.write('...' + self.in_progress + msg + '\n') sys.stdout.flush() else: sys.stdout.write(msg + '\n') sys.stdout.flush() self.in_progress = None self.in_progress_hanging = False def show_progress(self): """If we are in a progress scope, and no log messages have been shown, write out another '.'""" if self.in_progress_hanging: sys.stdout.write('.') sys.stdout.flush() def stdout_level_matches(self, level): """Returns true if a message at this level will go to stdout""" return self.level_matches(level, self._stdout_level()) def _stdout_level(self): """Returns the level that stdout runs at""" for level, consumer in self.consumers: if consumer is sys.stdout: return level return self.FATAL def level_matches(self, level, consumer_level): """ >>> l = Logger() >>> l.level_matches(3, 4) False >>> l.level_matches(3, 2) True >>> l.level_matches(slice(None, 3), 3) False >>> l.level_matches(slice(None, 3), 2) True >>> l.level_matches(slice(1, 3), 1) True >>> l.level_matches(slice(2, 3), 1) False """ if isinstance(level, slice): start, stop = level.start, level.stop if start is not None and start > consumer_level: return False if stop is not None or stop <= consumer_level: return False return True else: return level >= consumer_level #@classmethod def level_for_integer(cls, level): levels = cls.LEVELS if level < 0: return levels[0] if level >= len(levels): return levels[-1] return levels[level] level_for_integer = classmethod(level_for_integer) def mkdir(path): if not os.path.exists(path): logger.info('Creating %s', path) os.makedirs(path) else: logger.info('Directory %s already exists', path) def copyfile(src, dest, symlink=True): if not os.path.exists(src): # Some bad symlink in the src logger.warn('Cannot find file %s (bad symlink)', src) return if os.path.exists(dest): logger.debug('File %s already exists', dest) return if not os.path.exists(os.path.dirname(dest)): logger.info('Creating parent directories for %s' % os.path.dirname(dest)) os.makedirs(os.path.dirname(dest)) if symlink and hasattr(os, 'symlink'): logger.info('Symlinking %s', dest) os.symlink(os.path.abspath(src), dest) else: logger.info('Copying to %s', dest) if os.path.isdir(src): shutil.copytree(src, dest, True) else: shutil.copy2(src, dest) def writefile(dest, content, overwrite=True): if not os.path.exists(dest): logger.info('Writing %s', dest) f = open(dest, 'wb') f.write(content) f.close() return else: f = open(dest, 'rb') c = f.read() f.close() if c != content: if not overwrite: logger.notify('File %s exists with different content; not overwriting', dest) return logger.notify('Overwriting %s with new content', dest) f = open(dest, 'wb') f.write(content) f.close() else: logger.info('Content %s already in place', dest) def rmtree(dir): if os.path.exists(dir): logger.notify('Deleting tree %s', dir) shutil.rmtree(dir) else: logger.info('Do not need to delete %s; already gone', dir) def make_exe(fn): if hasattr(os, 'chmod'): oldmode = os.stat(fn).st_mode & 07777 newmode = (oldmode | 0555) & 07777 os.chmod(fn, newmode) logger.info('Changed mode of %s to %s', fn, oct(newmode)) def install_setuptools(py_executable, unzip=False): setup_fn = 'setuptools-0.6c9-py%s.egg' % sys.version[:3] search_dirs = ['.', os.path.dirname(__file__), join(os.path.dirname(__file__), 'support-files')] if os.path.splitext(os.path.dirname(__file__))[0] != 'virtualenv': # Probably some boot script; just in case virtualenv is installed... try: import virtualenv except ImportError: pass else: search_dirs.append(os.path.join(os.path.dirname(virtualenv.__file__), 'support-files')) for dir in search_dirs: if os.path.exists(join(dir, setup_fn)): setup_fn = join(dir, setup_fn) break if is_jython and os._name == 'nt': # Jython's .bat sys.executable can't handle a command line # argument with newlines import tempfile fd, ez_setup = tempfile.mkstemp('.py') os.write(fd, EZ_SETUP_PY) os.close(fd) cmd = [py_executable, ez_setup] else: cmd = [py_executable, '-c', EZ_SETUP_PY] if unzip: cmd.append('--always-unzip') env = {} if logger.stdout_level_matches(logger.DEBUG): cmd.append('-v') if os.path.exists(setup_fn): logger.info('Using existing Setuptools egg: %s', setup_fn) cmd.append(setup_fn) if os.environ.get('PYTHONPATH'): env['PYTHONPATH'] = setup_fn + os.path.pathsep + os.environ['PYTHONPATH'] else: env['PYTHONPATH'] = setup_fn else: logger.info('No Setuptools egg found; downloading') cmd.extend(['--always-copy', '-U', 'setuptools']) logger.start_progress('Installing setuptools...') logger.indent += 2 cwd = None if not os.access(os.getcwd(), os.W_OK): cwd = '/tmp' try: call_subprocess(cmd, show_stdout=False, filter_stdout=filter_ez_setup, extra_env=env, cwd=cwd) finally: logger.indent -= 2 logger.end_progress() if is_jython and os._name == 'nt': os.remove(ez_setup) def filter_ez_setup(line): if not line.strip(): return Logger.DEBUG for prefix in ['Reading ', 'Best match', 'Processing setuptools', 'Copying setuptools', 'Adding setuptools', 'Installing ', 'Installed ']: if line.startswith(prefix): return Logger.DEBUG return Logger.INFO def main(): parser = optparse.OptionParser( version="1.3.2", usage="%prog [OPTIONS] DEST_DIR") parser.add_option( '-v', '--verbose', action='count', dest='verbose', default=0, help="Increase verbosity") parser.add_option( '-q', '--quiet', action='count', dest='quiet', default=0, help='Decrease verbosity') parser.add_option( '-p', '--python', dest='python', metavar='PYTHON_EXE', help='The Python interpreter to use, e.g., --python=python2.5 will use the python2.5 ' 'interpreter to create the new environment. The default is the interpreter that ' 'virtualenv was installed with (%s)' % sys.executable) parser.add_option( '--clear', dest='clear', action='store_true', help="Clear out the non-root install and start from scratch") parser.add_option( '--no-site-packages', dest='no_site_packages', action='store_true', help="Don't give access to the global site-packages dir to the " "virtual environment") parser.add_option( '--unzip-setuptools', dest='unzip_setuptools', action='store_true', help="Unzip Setuptools when installing it") parser.add_option( '--relocatable', dest='relocatable', action='store_true', help='Make an EXISTING virtualenv environment relocatable. ' 'This fixes up scripts and makes all .pth files relative') if 'extend_parser' in globals(): extend_parser(parser) options, args = parser.parse_args() global logger if 'adjust_options' in globals(): adjust_options(options, args) verbosity = options.verbose - options.quiet logger = Logger([(Logger.level_for_integer(2-verbosity), sys.stdout)]) if options.python and not os.environ.get('VIRTUALENV_INTERPRETER_RUNNING'): env = os.environ.copy() interpreter = resolve_interpreter(options.python) if interpreter == sys.executable: logger.warn('Already using interpreter %s' % interpreter) else: logger.notify('Running virtualenv with interpreter %s' % interpreter) env['VIRTUALENV_INTERPRETER_RUNNING'] = 'true' file = __file__ if file.endswith('.pyc'): file = file[:-1] os.execvpe(interpreter, [interpreter, file] + sys.argv[1:], env) if not args: print 'You must provide a DEST_DIR' parser.print_help() sys.exit(2) if len(args) > 1: print 'There must be only one argument: DEST_DIR (you gave %s)' % ( ' '.join(args)) parser.print_help() sys.exit(2) home_dir = args[0] if os.environ.get('WORKING_ENV'): logger.fatal('ERROR: you cannot run virtualenv while in a workingenv') logger.fatal('Please deactivate your workingenv, then re-run this script') sys.exit(3) if os.environ.get('PYTHONHOME'): if sys.platform == 'win32': name = '%PYTHONHOME%' else: name = '$PYTHONHOME' logger.warn('%s is set; this can cause problems creating environments' % name) if options.relocatable: make_environment_relocatable(home_dir) return create_environment(home_dir, site_packages=not options.no_site_packages, clear=options.clear, unzip_setuptools=options.unzip_setuptools) if 'after_install' in globals(): after_install(options, home_dir) def call_subprocess(cmd, show_stdout=True, filter_stdout=None, cwd=None, raise_on_returncode=True, extra_env=None): cmd_parts = [] for part in cmd: if len(part) > 40: part = part[:30]+"..."+part[-5:] if ' ' in part or '\n' in part or '"' in part or "'" in part: part = '"%s"' % part.replace('"', '\\"') cmd_parts.append(part) cmd_desc = ' '.join(cmd_parts) if show_stdout: stdout = None else: stdout = subprocess.PIPE logger.debug("Running command %s" % cmd_desc) if extra_env: env = os.environ.copy() env.update(extra_env) else: env = None try: proc = subprocess.Popen( cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout, cwd=cwd, env=env) except Exception, e: logger.fatal( "Error %s while executing command %s" % (e, cmd_desc)) raise all_output = [] if stdout is not None: stdout = proc.stdout while 1: line = stdout.readline() if not line: break line = line.rstrip() all_output.append(line) if filter_stdout: level = filter_stdout(line) if isinstance(level, tuple): level, line = level logger.log(level, line) if not logger.stdout_level_matches(level): logger.show_progress() else: logger.info(line) else: proc.communicate() proc.wait() if proc.returncode: if raise_on_returncode: if all_output: logger.notify('Complete output from command %s:' % cmd_desc) logger.notify('\n'.join(all_output) + '\n----------------------------------------') raise OSError( "Command %s failed with error code %s" % (cmd_desc, proc.returncode)) else: logger.warn( "Command %s had error code %s" % (cmd_desc, proc.returncode)) def create_environment(home_dir, site_packages=True, clear=False, unzip_setuptools=False): """ Creates a new environment in ``home_dir``. If ``site_packages`` is true (the default) then the global ``site-packages/`` directory will be on the path. If ``clear`` is true (default False) then the environment will first be cleared. """ # XXX: We'd use distutils.sysconfig.get_python_inc/lib but its # prefix arg is broken: http://bugs.python.org/issue3386 if sys.platform == 'win32': lib_dir = join(home_dir, 'Lib') inc_dir = join(home_dir, 'Include') bin_dir = join(home_dir, 'Scripts') elif is_jython: lib_dir = join(home_dir, 'Lib') inc_dir = join(home_dir, 'Include') bin_dir = join(home_dir, 'bin') else: lib_dir = join(home_dir, 'lib', py_version) inc_dir = join(home_dir, 'include', py_version) bin_dir = join(home_dir, 'bin') if sys.executable.startswith(bin_dir): print 'Please use the *system* python to run this script' return if clear: rmtree(lib_dir) ## FIXME: why not delete it? ## Maybe it should delete everything with #!/path/to/venv/python in it logger.notify('Not deleting %s', bin_dir) if hasattr(sys, 'real_prefix'): logger.notify('Using real prefix %r' % sys.real_prefix) prefix = sys.real_prefix else: prefix = sys.prefix mkdir(lib_dir) fix_lib64(lib_dir) stdlib_dirs = [os.path.dirname(os.__file__)] if sys.platform == 'win32': stdlib_dirs.append(join(os.path.dirname(stdlib_dirs[0]), 'DLLs')) elif sys.platform == 'darwin': stdlib_dirs.append(join(stdlib_dirs[0], 'site-packages')) for stdlib_dir in stdlib_dirs: if not os.path.isdir(stdlib_dir): continue if hasattr(os, 'symlink'): logger.info('Symlinking Python bootstrap modules') else: logger.info('Copying Python bootstrap modules') logger.indent += 2 try: for fn in os.listdir(stdlib_dir): if fn != 'site-packages' and os.path.splitext(fn)[0] in REQUIRED_MODULES: copyfile(join(stdlib_dir, fn), join(lib_dir, fn)) finally: logger.indent -= 2 mkdir(join(lib_dir, 'site-packages')) writefile(join(lib_dir, 'site.py'), SITE_PY) writefile(join(lib_dir, 'orig-prefix.txt'), prefix) site_packages_filename = join(lib_dir, 'no-global-site-packages.txt') if not site_packages: writefile(site_packages_filename, '') else: if os.path.exists(site_packages_filename): logger.info('Deleting %s' % site_packages_filename) os.unlink(site_packages_filename) stdinc_dir = join(prefix, 'include', py_version) if os.path.exists(stdinc_dir): copyfile(stdinc_dir, inc_dir) else: logger.debug('No include dir %s' % stdinc_dir) if sys.exec_prefix != prefix: if sys.platform == 'win32': exec_dir = join(sys.exec_prefix, 'lib') elif is_jython: exec_dir = join(sys.exec_prefix, 'Lib') else: exec_dir = join(sys.exec_prefix, 'lib', py_version) for fn in os.listdir(exec_dir): copyfile(join(exec_dir, fn), join(lib_dir, fn)) if is_jython: # Jython has either jython.jar and javalib/ dir, or just # jython-complete.jar for name in 'jython.jar', 'javalib', 'jython-complete.jar': src = join(prefix, name) if os.path.exists(src): copyfile(src, join(home_dir, name)) copyfile(join(prefix, 'cachedir'), join(home_dir, 'cachedir'), symlink=False) mkdir(bin_dir) py_executable = join(bin_dir, os.path.basename(sys.executable)) if 'Python.framework' in prefix: if py_executable.endswith('/Python'): # The name of the python executable is not quite what # we want, rename it. py_executable = os.path.join( os.path.dirname(py_executable), 'python') logger.notify('New %s executable in %s', expected_exe, py_executable) if sys.executable != py_executable: ## FIXME: could I just hard link? executable = sys.executable if (sys.platform == 'cygwin' and not os.path.exists(executable) and os.path.exists(executable + '.exe')): # Cygwin misreports sys.executable sometimes executable += '.exe' py_executable += '.exe' logger.info('Executable actually exists in %s' % executable) shutil.copyfile(executable, py_executable) make_exe(py_executable) if os.path.splitext(os.path.basename(py_executable))[0] != expected_exe: secondary_exe = os.path.join(os.path.dirname(py_executable), expected_exe) py_executable_ext = os.path.splitext(py_executable)[1] if py_executable_ext == '.exe': # python2.4 gives an extension of '.4' :P secondary_exe += py_executable_ext if os.path.exists(secondary_exe): logger.warn('Not overwriting existing %s script %s (you must use %s)' % (expected_exe, secondary_exe, py_executable)) else: logger.notify('Also creating executable in %s' % secondary_exe) shutil.copyfile(sys.executable, secondary_exe) make_exe(secondary_exe) if 'Python.framework' in prefix: logger.debug('MacOSX Python framework detected') # Copy the framework's dylib into the virtual # environment virtual_lib = os.path.join(home_dir, '.Python') if os.path.exists(virtual_lib): os.unlink(virtual_lib) copyfile( os.path.join(prefix, 'Python'), virtual_lib) # And then change the install_name of the copied python executable try: call_subprocess( ["install_name_tool", "-change", os.path.join(prefix, 'Python'), '@executable_path/../.Python', py_executable]) except: logger.fatal( "Could not call install_name_tool -- you must have Apple's development tools installed") raise # Some tools depend on pythonX.Y being present pth = py_executable + '%s.%s' % ( sys.version_info[0], sys.version_info[1]) if os.path.exists(pth): os.unlink(pth) os.symlink('python', pth) if sys.platform == 'win32' and ' ' in py_executable: # There's a bug with subprocess on Windows when using a first # argument that has a space in it. Instead we have to quote # the value: py_executable = '"%s"' % py_executable cmd = [py_executable, '-c', 'import sys; print sys.prefix'] logger.info('Testing executable with %s %s "%s"' % tuple(cmd)) proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) proc_stdout, proc_stderr = proc.communicate() proc_stdout = os.path.normcase(os.path.abspath(proc_stdout.strip())) if proc_stdout != os.path.normcase(os.path.abspath(home_dir)): logger.fatal( 'ERROR: The executable %s is not functioning' % py_executable) logger.fatal( 'ERROR: It thinks sys.prefix is %r (should be %r)' % (proc_stdout, os.path.normcase(os.path.abspath(home_dir)))) logger.fatal( 'ERROR: virtualenv is not compatible with this system or executable') sys.exit(100) else: logger.info('Got sys.prefix result: %r' % proc_stdout) pydistutils = os.path.expanduser('~/.pydistutils.cfg') if os.path.exists(pydistutils): logger.notify('Please make sure you remove any previous custom paths from ' 'your %s file.' % pydistutils) install_distutils(lib_dir, home_dir) install_setuptools(py_executable, unzip=unzip_setuptools) install_activate(home_dir, bin_dir) def install_activate(home_dir, bin_dir): if sys.platform == 'win32' or is_jython and os._name == 'nt': files = {'activate.bat': ACTIVATE_BAT, 'deactivate.bat': DEACTIVATE_BAT} if os.environ.get('OS') == 'Windows_NT' and os.environ.get('OSTYPE') == 'cygwin': files['activate'] = ACTIVATE_SH else: files = {'activate': ACTIVATE_SH} files['activate_this.py'] = ACTIVATE_THIS for name, content in files.items(): content = content.replace('__VIRTUAL_ENV__', os.path.abspath(home_dir)) content = content.replace('__VIRTUAL_NAME__', os.path.basename(os.path.abspath(home_dir))) content = content.replace('__BIN_NAME__', os.path.basename(bin_dir)) writefile(os.path.join(bin_dir, name), content) def install_distutils(lib_dir, home_dir): distutils_path = os.path.join(lib_dir, 'distutils') mkdir(distutils_path) ## FIXME: maybe this prefix setting should only be put in place if ## there's a local distutils.cfg with a prefix setting? home_dir = os.path.abspath(home_dir) ## FIXME: this is breaking things, removing for now: #distutils_cfg = DISTUTILS_CFG + "\n[install]\nprefix=%s\n" % home_dir writefile(os.path.join(distutils_path, '__init__.py'), DISTUTILS_INIT) writefile(os.path.join(distutils_path, 'distutils.cfg'), DISTUTILS_CFG, overwrite=False) def fix_lib64(lib_dir): """ Some platforms (particularly Gentoo on x64) put things in lib64/pythonX.Y instead of lib/pythonX.Y. If this is such a platform we'll just create a symlink so lib64 points to lib """ if [p for p in distutils.sysconfig.get_config_vars().values() if isinstance(p, basestring) and 'lib64' in p]: logger.debug('This system uses lib64; symlinking lib64 to lib') assert os.path.basename(lib_dir) == 'python%s' % sys.version[:3], ( "Unexpected python lib dir: %r" % lib_dir) lib_parent = os.path.dirname(lib_dir) assert os.path.basename(lib_parent) == 'lib', ( "Unexpected parent dir: %r" % lib_parent) copyfile(lib_parent, os.path.join(os.path.dirname(lib_parent), 'lib64')) def resolve_interpreter(exe): """ If the executable given isn't an absolute path, search $PATH for the interpreter """ if os.path.abspath(exe) != exe: paths = os.environ.get('PATH', '').split(os.pathsep) for path in paths: if os.path.exists(os.path.join(path, exe)): exe = os.path.join(path, exe) break if not os.path.exists(exe): logger.fatal('The executable %s (from --python=%s) does not exist' % (exe, exe)) sys.exit(3) return exe ############################################################ ## Relocating the environment: def make_environment_relocatable(home_dir): """ Makes the already-existing environment use relative paths, and takes out the #!-based environment selection in scripts. """ activate_this = os.path.join(home_dir, 'bin', 'activate_this.py') if not os.path.exists(activate_this): logger.fatal( 'The environment doesn\'t have a file %s -- please re-run virtualenv ' 'on this environment to update it' % activate_this) fixup_scripts(home_dir) fixup_pth_and_egg_link(home_dir) ## FIXME: need to fix up distutils.cfg OK_ABS_SCRIPTS = ['python', 'python%s' % sys.version[:3], 'activate', 'activate.bat', 'activate_this.py'] def fixup_scripts(home_dir): # This is what we expect at the top of scripts: shebang = '#!%s/bin/python' % os.path.normcase(os.path.abspath(home_dir)) # This is what we'll put: new_shebang = '#!/usr/bin/env python%s' % sys.version[:3] activate = "import os; activate_this=os.path.join(os.path.dirname(__file__), 'activate_this.py'); execfile(activate_this, dict(__file__=activate_this)); del os, activate_this" bin_dir = os.path.join(home_dir, 'bin') for filename in os.listdir(bin_dir): filename = os.path.join(bin_dir, filename) f = open(filename, 'rb') lines = f.readlines() f.close() if not lines: logger.warn('Script %s is an empty file' % filename) continue if lines[0].strip() != shebang: if os.path.basename(filename) in OK_ABS_SCRIPTS: logger.debug('Cannot make script %s relative' % filename) elif lines[0].strip() == new_shebang: logger.info('Script %s has already been made relative' % filename) else: logger.warn('Script %s cannot be made relative (it\'s not a normal script that starts with %s)' % (filename, shebang)) continue logger.notify('Making script %s relative' % filename) lines = [new_shebang+'\n', activate+'\n'] + lines[1:] f = open(filename, 'wb') f.writelines(lines) f.close() def fixup_pth_and_egg_link(home_dir): """Makes .pth and .egg-link files use relative paths""" home_dir = os.path.normcase(os.path.abspath(home_dir)) for path in sys.path: if not path: path = '.' if not os.path.isdir(path): continue path = os.path.normcase(os.path.abspath(path)) if not path.startswith(home_dir): logger.debug('Skipping system (non-environment) directory %s' % path) continue for filename in os.listdir(path): filename = os.path.join(path, filename) if filename.endswith('.pth'): if not os.access(filename, os.W_OK): logger.warn('Cannot write .pth file %s, skipping' % filename) else: fixup_pth_file(filename) if filename.endswith('.egg-link'): if not os.access(filename, os.W_OK): logger.warn('Cannot write .egg-link file %s, skipping' % filename) else: fixup_egg_link(filename) def fixup_pth_file(filename): lines = [] prev_lines = [] f = open(filename) prev_lines = f.readlines() f.close() for line in prev_lines: line = line.strip() if (not line or line.startswith('#') or line.startswith('import ') or os.path.abspath(line) != line): lines.append(line) else: new_value = make_relative_path(filename, line) if line != new_value: logger.debug('Rewriting path %s as %s (in %s)' % (line, new_value, filename)) lines.append(new_value) if lines == prev_lines: logger.info('No changes to .pth file %s' % filename) return logger.notify('Making paths in .pth file %s relative' % filename) f = open(filename, 'w') f.write('\n'.join(lines) + '\n') f.close() def fixup_egg_link(filename): f = open(filename) link = f.read().strip() f.close() if os.path.abspath(link) != link: logger.debug('Link in %s already relative' % filename) return new_link = make_relative_path(filename, link) logger.notify('Rewriting link %s in %s as %s' % (link, filename, new_link)) f = open(filename, 'w') f.write(new_link) f.close() def make_relative_path(source, dest, dest_is_directory=True): """ Make a filename relative, where the filename is dest, and it is being referred to from the filename source. >>> make_relative_path('/usr/share/something/a-file.pth', ... '/usr/share/another-place/src/Directory') '../another-place/src/Directory' >>> make_relative_path('/usr/share/something/a-file.pth', ... '/home/user/src/Directory') '../../../home/user/src/Directory' >>> make_relative_path('/usr/share/a-file.pth', '/usr/share/') './' """ source = os.path.dirname(source) if not dest_is_directory: dest_filename = os.path.basename(dest) dest = os.path.dirname(dest) dest = os.path.normpath(os.path.abspath(dest)) source = os.path.normpath(os.path.abspath(source)) dest_parts = dest.strip(os.path.sep).split(os.path.sep) source_parts = source.strip(os.path.sep).split(os.path.sep) while dest_parts and source_parts and dest_parts[0] == source_parts[0]: dest_parts.pop(0) source_parts.pop(0) full_parts = ['..']*len(source_parts) + dest_parts if not dest_is_directory: full_parts.append(dest_filename) if not full_parts: # Special case for the current directory (otherwise it'd be '') return './' return os.path.sep.join(full_parts) ############################################################ ## Bootstrap script creation: def create_bootstrap_script(extra_text, python_version=''): """ Creates a bootstrap script, which is like this script but with extend_parser, adjust_options, and after_install hooks. This returns a string that (written to disk of course) can be used as a bootstrap script with your own customizations. The script will be the standard virtualenv.py script, with your extra text added (your extra text should be Python code). If you include these functions, they will be called: ``extend_parser(optparse_parser)``: You can add or remove options from the parser here. ``adjust_options(options, args)``: You can change options here, or change the args (if you accept different kinds of arguments, be sure you modify ``args`` so it is only ``[DEST_DIR]``). ``after_install(options, home_dir)``: After everything is installed, this function is called. This is probably the function you are most likely to use. An example would be:: def after_install(options, home_dir): subprocess.call([join(home_dir, 'bin', 'easy_install'), 'MyPackage']) subprocess.call([join(home_dir, 'bin', 'my-package-script'), 'setup', home_dir]) This example immediately installs a package, and runs a setup script from that package. If you provide something like ``python_version='2.4'`` then the script will start with ``#!/usr/bin/env python2.4`` instead of ``#!/usr/bin/env python``. You can use this when the script must be run with a particular Python version. """ filename = __file__ if filename.endswith('.pyc'): filename = filename[:-1] f = open(filename, 'rb') content = f.read() f.close() py_exe = 'python%s' % python_version content = (('#!/usr/bin/env %s\n' % py_exe) + '## WARNING: This file is generated\n' + content) return content.replace('##EXT' 'END##', extra_text) def adjust_options(options, args): args[:] = ['.'] def after_install(options, home_dir): if sys.platform == 'win32': bin_dir = join(home_dir, 'Scripts') else: bin_dir = join(home_dir, 'bin') subprocess.call([join(bin_dir, 'easy_install'), 'nose']) subprocess.call([join(bin_dir, 'easy_install'), 'Sphinx>=0.6b1']) subprocess.call([join(bin_dir, 'easy_install'), 'docutils']) subprocess.call([join(bin_dir, 'easy_install'), 'virtualenv']) subprocess.call([join(bin_dir, 'python'), '-c', 'import sys; sys.path.append("."); import paver.command; paver.command.main()', 'develop']) ##file site.py SITE_PY = """ eJy1PP1z2zaWv/OvwNKToZTKdJJ2OztO3Zt8uFvvuEm2Tmdz63p0FAlJrCmSJUjL2pu7v/3eBwCC H7Ll3T1NJpYI4OHh4X3jgb7vvylLmSdiUyRNJoWSURWvRRnVayWWRSXqdVolx2VU1Tt4Gt9GK6lE XQi1UyH2Cj3v+b/48Z6Lz+tUGRTgW9TUxSaq0zjKsp1IN2VR1TIRSVOl+UqkeVqnUZb+A3oUeSie /+sYeBe5gJVnqazEnawUwFWiWIpPu3pd5GLSlLjml+Efo6+nM6HiKi1r6FBpnIEi66j2cikTQBN6 NgpImdbyWJUyTpdpbDtuiyZLRJlFsRT/9V+8NOoaBJ4qNnK7lpUUOSADMCXAKhEP+JpWIi4SGQrx VsYRTsDPW2J5DG2Ge6aQjHkhsiJfwZpyGUulomonJoumJkCEskgKwCkFDOo0y7xtUd2qKWwp7ccW HomI2aO7GGYPWCfOP+QcwPFj7v2Sp/czhg3cg+DqNbNNJZfpvYgQLPyU9zKe62eTdCmSdLkEGuT1 FLt4jIASWbo4KWk7vtM79P0JYWW5MoI5JKLMnbmRRoTex1wUgGyFlK+BrzdKTDZRmgN7/RTFhMvf 0jwptmpKOAN9lfitUbWDsTcZQRl6OyjPBJLX0L/Js/RWZrspEOTzWnqVVE1WIwsnaSXjuqhSqQgA oLYT8j5VACGC/edFMy8ZSZsxOTJVgATgVqBIoIhiI2xpvkxXTUUyIZYp8Brs4w8ffxbvz99evPmg ucIAYylbbQBngEJb4+AEE4iTRlUnWQEiGHqX+EdESYJiscL5Aa+2w8mje+NNYO1l2B/jbBGQ/b1c pFFupoE11iD+NJdH4/4bhszUGujzPw/PBgt/s48qtHD+tl0XIEV5tJFiHSniZeQM7zsN5/uwrNev gRsUwqmBVIo3J0lShAckcWk2KXIpSmCxLM3l1AMKLahvdxeBFT4U+THtdY8TAELl5dDoPJvSjLmE hQ5hvUYJN513tDLdxbP7vCkqEnXg/zwm7ZFF+S3hqIjt+dtCrtI8R4SQF7zgKKCJ1W0KnJiE4pJ6 kSSbTiJgfcM9USQa4CVkOuBJeR9tykyCrmzKEsn8iODTZLIWZq8z5jjoWZNCpF1rlzrKe6/CLz2u IzTrdSUBeLPoCN2yKEBYQcsSNmW0mfFs24I4xxuRJxqEPEE9YSx+B4q+UarZSNuIvAKahRjKWxZZ VmyBZKeeJ8QRdjJmtMuc0Apt8D/Axf8zWcdrz3NmsoA1KER+HygEAkpc5pqrNRIdbtOs3Fcyac6a oqgSWdFUhxH7hBE/sDOu1ftQ1NoM8XJxl4tNWqNKWmgjl7KNyoOa9eNrXjcsA2ytIpqZri2dNri8 rFxHC2mciIVcoiToTXpttx3m9EbmJOtZC9SPQFFoA7LIlC3IuGJBpbOsJZltgMHCF+Vp2WTUSSGD iQgm2pQEfxOhES60ewPszYbUQ4XEBjcG+wO4/QPEaLtOgT4xQAANg1oKtm+R1hWa9FYfeV0zbcbz /MCpF0ttm3jKZZRm2i5HuXdBD8+risQ3liWOmmliKFhhXqMztsqBjijmvu97nnFgdsp8Ley3+XzR pGjv5nOvrnanwB0Chdxj6OIDLJSm45ZlVWyw2aJ3BfoA9DKO8I7EJ1IUkr3SDjO9Rgq4Wrk0XdH8 OhwpUJV4n34+/+Hiy/mVOBPXrVaa9VXSDcx5nkfAmaTUgS9607bqCHqi7kpRfYkfwELTviapotEk gTKqG2A/QP1z1VAzLCPuNHrnH968vTyf/3J1/vP86uLzOSAIpkJ6R7RkAFc34KWpEPgbGCtRoTaR 3mAEPXj75so+8BK5BIa7lciWk+fkv02Z7LAK6FVo6/RbkeamnZrJu4EeZyKYz+MsUgob5/PAqnHj UV2ffnND/X6L7qKAgeOnkrC6HKHM8L+RSaMFAZ3AU57UHWI6Adtt4khJ7kULgnHzOcrefD7RqwGm JLYBx4HFKRCmC8pelYJ/RuRHWVyoIsOfCB+5mbgQnXqUdlyadtrDuyhrpDJzUDdAfyVrBDkB0xGY SYIZEXxqOwL9lihA+LQdjh9U52neSPtwE1pUh7RZ6jVXclPcyQSsKu6Rs2zxM7VAkFRmoPNgWSCw pOhZsIxDEGFIwEIPDIFGAXhwQ1AMQQwtjjguk7kC9uQYhxhWB2Cs48qquEvRiix2uhGUIIgQqkJj sTS0At3tDtVR14FiBPcmR0ptZQBCUjXsiRDeCBLVSNKKW0jgLlF+b+jrbV5s8zkHJWcoipOp3Uvk NL2b2KHdgiPxAygnQLIAj70lGkMBX04gsx0D8rB8WC5QlpxGAAQaHM2LKhxYxoenJXI4gNMijOlr QdxcSTQEd2YK8qENMRxI1BraB0ZsEBIszsqwlRXNZGitTDeY2CFJl+suQ44tugB6VAxBmU40NO5k 6Hd9CjIuLl0pdcahnv7y5QuzjVpTqIuILXDRaBuWpELDcgfqNgV/1JhaDpyJDSAAzgFMozRriuMr UZRsZmE/OSIHW3YFTt26rsvTk5PtdhvqQK+oVidqefLHP3377Z9esJJIEuIfWI4jLTrrEZ5QG7oq 4XdGkX1vdq7Hj2ne5UaCNZFkbsnHQPz+3KRJIU6Pp1ahIBe3yhv/NwYOFMjcTMpUBtr6LUbP1PGz 8Gvli2di4vadTNla6ejHKmqIYkghQRuoJBhRF2AfwJrFRZPXgaO+lPgKFDWEXolcNKvATt4xA+YH LBXldGJ54PjlDWLQ5QzDV0orqjlqCWKLNF8WDul/ZraJyGZqDYHkRZ09CIt241rMEDc5XN6tB+II jVlhqpA7UCK6XQ4WQNt5KDn40Q7P513pOjzm0zEExvR5hnm1uzFBwSFccC9mrtQ5XI1+EEjTllUz KBhXNYKCBtdhwdTW4MiKBwgyEOT3URTHG6t7AJ16SRwT+JldgB7uPANr12cFxzVAHjgTL+mJBMfp dND2gre2yTKK1Hs82qEKA+5sNNrpAvhyYgDMhF/94nNPvS0XH3ubwnswAqzg4B4ZbDlgJmwJmUxI pYl/5I+w08Dq7xvNNB4DgZtE/Q8DThif8QSVAmEqJ13G3cfhlrac0OghvN/gEE+ZXSKJVntkq687 RmXqQbu0THNUvc4ehXFWgJdolSLxUdve9RXIOcbHY7ZMC6AmQ0sOp9MZOXsd+Qt0P0yLrBoMIN34 GjHapIqMG5JpDf+BV0FxKyUZgJYEzYI5VMi6C/s3iJxdr/6yhz0soZEh3K77vJCBwdBwTPuRQDLq KBuEIAcSDmWRPEfWBhlwmLNBHdmGZrlXuAlGqEDMHANCWgZwp8Z2GC4QnoSYiyYBRcj3tZKl+Er4 sH19ST1Mdf9budREphOnA3kKOqQ9c8NdJ9Q964W+XYbuBr2UAy8L4OAFeDxuAtdlc8O0NhIHX72r ty1SoJApOPenLqo3hjBukvAPZ06PllhmEsNQnYk6qXoz09Sz261Bw4YbMJ0978+lH7txL46dBIV6 JTf3EP4FVariQgVTtKeHxMdWLphLhrSy2F+mCx/+dDbEn950IMmsjx5OmUTVNs05ZtcrPusSc4CO XXzHKp1cUfxzAqhg7unkhwpYmo6NTkAAUHbLEoJupf30IdgHV+pbuDzc7/i516df3wyXPxudwv2M E/P8vq4ihfTMmKzMxkjPocVFNQmLi/KdPkbSB4Po11eFgrBPfLz6IpAQnGHbRrunLb1lUcTm0TV1 PgZ10ESPkqu3OmIXQAT1GDLKyT/BmIcj+9TFPbCwJwB5EsM8sCkGkuWT/ljo9e0385GEmgvn228c Zjtkx8Ykc9Jzr+zMdGYqRldohlQyysiAO4PQdoMaux70KaesJSmm0nxwM+LJ4ce0Gz3cwh90HwRj 5nNE9IDQtVj8BgGg0jmjuyjNKJkKaBwfoyoysSuH4+P4dCA9jDLmecANeDEbDS/U9QvYmICD5elw OdrZeGMSjCNBnvmUkVI9CVTyYZkz3HMAa+4xCg+YhOHUnG5eGsXOBFbiOe7Ec7GlQyNKhIBdyAFK wjp/BA5aW3308K6pKj5AoA0tZXWMGXU+Lzdanw64h2BOPsgaMbHdYkosOaerxZiMBDpVZFcStFZ+ fG/WhQkwZX6XVjAW2GcS/Pjxp/NguOl6Ghw0Ds7dRyMTh+sehPsELRdo4gRPGcMUesqQf14NBx0O 7UuROdQx6RtNtiFhB1kb4/WP78Ej8VvnuKE9KYnXMr6dSzr9QTbFoU4G6x02Iyb2UKh7Cq+iJZUS wErirEFasdH9jIc9TR5TMrOWoLh1iRUe4NKZDgfryyxaiQkNTjBQ1NxIseRdVGkLVlYFFvWIJk1O Vmki5O9NlKETLpdLwAUzzbop5OkpXhTv+ViKS0eUjJsqrXdAgkgVOlFPJ1hOx8WOFzrpIMk5WSYg nmmdiitcNrYz4RJDrvH0JC4SvWQcYE5SkLvoObTnxRxnnVNt1IyRmvbDOH7s9WcoAIAPQGH9/lTH 2d0WSU3uaQDtuUtU1JIdUrpxQUGmBaFMphiX8G/62WVEl7f2YLnaj+XqYSxXfSxXo1iuuliuHsbS FQncWBtiGkkYCzP7OcjRI1M3QuRpzqN4zf2wAgcrbQCiKI1zbWSKS8c6cSgn4wkIqW3ndIgetkeg KZf2VAUnrDRI5H7MOGtH3hT9OYPpQFUP5qWYc/V9x8DdsSdhSAf2CxrO8pZEdRR25GKVFQsQW4vu rAUwE/0TYc5s5HfzBediepbK//Sfn3/8+AG7IyjfnEXSMNxENCy4lMnzqFqpoTS1XmUJ7Eg9uwfD NEwDPDowDuZZjvi/9wVWciDjiC0dJxaiBA+ADuZtN/f4Ogh6z/U5t37OTM6Z4TPh57XfLmoPkd58 +vT+zec3PgXo/v/6rsAY2nalw8XH9LAdhv6b291SHMeAUOu4tjV+7po6tG454nEba8D2wokXN70H rw4x2KPxR3eV/6+Ugi0BQoU6VfMUQh0cX/5LceWAPoYRB/Ubbh6cnRXb5sie46I4or/PSenk/lg1 K1m/Pf/zxYfLi7ef3nz+0XFU0OH4eHXySpz/9EXQkSOqWbbcEZ621Xi4DerPLWMWSQH/Goy2kqbm NAaMen95qbN/GyyLxTop1IwhPOeTcQuNQ0bOk9iH+kgbMcq0G+9UDNMJMFUUo1e/4dpXVehaKipE XqBL1egAQVeCm4pxOioJgUegs0sKBsFVC9BEFW+1iV0qzirrKuoRpLQlsWeNGYXEgxMoJ6dqMnQA 7DjZ5VkRJZqD4Uk7WKuj68DFNbgJVZmlEG+8DmxaWA/DE9eWYfRDe2jCeI3JqTMcZtYdedV7sUDd +jrgtenx05bRfm8Aw5bB3sO6c0knjlTrhfUSIsBOnGsM5D18tVuv90DBhmHyuMZNNEyXwuojCAHF OgU3F3hyDTYCvVmA0NuJbsrq1IlhZYEHfsG7TXL810ATpNv7119HutdVdvx3UYKvLvh0Ohghptv5 PbjnoQzF+ccfpgEjR7VR4q8NlhKC2aSkgyPldCTOpzLziZLZUh9ZdhUiNmhrRs294ZUsKz183IEL UAKeqQnZtmfK0C/ACgILe4ZLmfZAY5WkxQzvB7hHW+ZzJK7WMst0Yd3F+8tz8HCwcBMliDPD5zAd R/V4LKPrOfj+Qg8UHtpAc4VsXKGjRQd3SdjpNpooQpGj0Z2zPrtPlIwZjhpkXqooVS7aE1w2w3JK F0PkZtgOs7PM3cNuSGe3G9EdJYcZY/6posqoLmcAR9PTiN138OuxONTkvvh8Is1rUwqTpTFoU1C8 oFZnICpIYlBgzH9FztmnolKmcBoelrsqXa1rzPDB4JCKNrH7T2++XF58oCrIV1+3HuIIi87Ia53x 8eQZ1p5gZA5f3HoS5K35fIxzdRPCQB0Ef/pNfO55xhMMxnESDP/0m7hy/cyJWngFoKaasi8k6Kw6 w8akp5UIxtXGbPhxa0tazLpgKG+GhbL6CNFd35Afbc+eQaEchWl8Qup0WWoaTsxgt9ah/9FrXJaY 600m452gdUzCzGcBQ28HLfuqKtzPQBaxzBQwGvbuzmHqDgZd9XIcZtvHLRSCaUv7a+7k9Zx+cY6i DEScuIOnLpONq2LdnTmwU583ACa+0+gaSRxV6P6vua/9jA4mltgDn9oMxGIjNgUYdUtdatyAJ4V2 ARQIlZVNHMmdTZ+/6qzRsQmPr1HrLrCQP4Ii1EVeVE1bVMCJ8OV3dh+5idBCVXoqAsdfyWVe2AoE /GzX6Fu+7K5xVAYo4YZiV0X5Sk4Y1szA/KpL7D3pQtK2HY65Tnvnq5q7wUO938PgQ7EYz/EbzHp8 MOh3K3d9bdSlDnYYLTHuQqiiLWh3CLcnvFd7U9rYXZ8ITQIM5X8P9tDrEfQ0LPS7fh85acAPE8u4 l9Ze9eq5A9ug/cq4goCpVpTSd6yw8RBdw9zawLPWCvv2qT5jtr+t733I2X1nHkbJnaWzYl936AWo /l84A0U3QlKqhmyLqXVbIu9kVoCbBBEYFrv+Zotdp+FogP4IXi0qSOBftXse5bfkMb7728VMvPvw M/z/Vn6EGANvHMzE3wEB8a6oINbiWy+4EREWytYcRBWNwmsJBI2Sy3j1i++LfeqsAxPZuoK3W7pr 9YXAqqVqwxd8AUVeI90Ea62lqUuF36ZwfuiWGZdpbFd83Yhk2F9OjCW4J7pnuK43GSpOJ23Qbue1 f3nx7vzD1XlY3yNfmZ++k1bonqDjivTBXoWHGDNhn8QNPrlxPMgfZVaOOJA6BjOlyRiDiQDc9NLG XXwbNbK+dlRhIC3KXVLEIfYErqJLQKLegkc5dcKtRy1ex9wgrMlUH320bi0+BmqIX/sawIeONEav iUYSQtECy/L5ceiP26SZoHwk/Hl+u03cdKaur6YF9jFtVz3pDrdKaM101vBcZiLUzuxOmGsoWRqp zSJ2b2N8zM19YlAvlJ+Wy6jJaiFziDIo7KWLnaBl3QsULCfMLazb6VYBJS6ybbRTzlF4pISPs/p0 Nw0T6ZQyg6j0p+iWdTHe7BANX0MC6IQoxRKFM1Q18ZrlmMODEfVHqm+b5l+/CgZE5kk5Zoxbpw7W iS4UY7SStV4/P5hMr1+2ZpWyibFbZhTEJVgcl1OOQH2Wz58/98V/PO4JMCphVhS34KIA7LEAUVxS 8x4brhdnd2vo5ZqWEFgyXstreHBDWU/7vMkpWffAUNoQaf8aGAHuTWD50fTv2UROY1V8QMg9+KBB 245f8pTu+WOyRaLK1a9LoKvoCMewJOiGIFJxmgYcuMN+7IoGbz5g4k3zi7wHjk83dMcdWvFkg8PO NXpbVPRkuceicyZ8Auxj5l/PRnepqPYf8Jx/2mk05xd5WrdVuy/cQy99DQ/9WDYpmq9EtEXJMOvo EcO5RNNh1dbbLB5k0Y73XsTXbgqut0pufgx3YG2QtGK5NJjCQ7NJcSGr2BhV3LE0TmsHjOmHcHgw xPvaAIXeCEo+aHgyDImVaNv6B7svLqYf6fzv2Mykyz5q+yYITpdEea/MJQzb+SkxYwlp+dZ8mcIs HyjRq/2BzlziDzrNiCWbneuv7uWRJtfXWvncvb3rCnDoVQpWQVp27OgI540bFj4zrXbs21uwToUp hquE211a1U2UzfXVyzm6bHN7IKrxtJcOHrxOY30WcLALcD2Pdbkl+A6mOgLpiaVYpgAW4nUdrodu IX+35r0s0Nd71dHjmAR+wR6so8Gx51emFPwQlW8Kkge1vC6WM6pFCab9UqpBLzxRCHSl0qBsdZ/H /SQUDGzXNT4UQAYA4A87gsFXvXMip8CKy9z2HQ0MgH77zUNgXaUzWuNH+fiuItJXI9yiu/ZAgGXb +IBrcDNRUhJzmU0LIt/7QalhE2DvKpoT+37JNTQfvuQHFnzAeVuAkx0/U4F2uw2HPEQri99DpGo7 ef8UlfSocVqh24fFcuuIW1B8m/LUBkd8GSSnFO7ELaHAT307WkUa4fsZkKDH9W3w0PJ5/ENr1z08 u3JzNjNYe7eg26WBHrufX8ZowC6pJsRBdYkWo/m/R3Rn+thwPP2hP0v7bifOghAPatRmmiU3Uex+ xyubx/xqqLbKzSm3dne9t6a9FxMNOw85ecjNfFCHz+0NGzp9TTR7dQ3YA1dGpnZzMDzpoXKwScuL Y65tOe6+m4fN2yCAfMoVFme3x5If+27JQHf7AgP3KnBC73kid5qv8grbrb23hddQuWroL/oGMh5l 244EjA7G+WUrBE7TWuZ3aCSUDarxbzvF4M0Khvptl9ELrXaX+ZUAE9t9OuyiecTp0rpaWHTxiKvV LX96oqvVgX+gq6XfVwEspvHRZVKjBVGP+GTUp/OqCM8KD4yZA5EwOdp7r4LZhIlbXgQOU3rvt2++ iPhdQBZ5Iw9Y9jK8Mk0gfuJKC7eqr3ND3cw7KCYZJHhHXhcyXrc6ttyxcj63y75BTxuwT9kMBx7o /XVFeK8N0UVo7plFr0TG00+5nsH8ctLP5pFJBjEXtZke094G55q1B7HLvh11KguGUqhtq36nx564 amrrW2gvMSZHjW1DclNSZwO/iF5Q1H9/ImkyLEc2F+KAeWPpvJeBXsnAoOruixor0BERphbZ7s3s K5SoH6cflH03GKYXYxkagnTqgf3h+vxO+US2hwqex3pCv3WAETFqQyfsbJL7mRLXx3Tz4Bhl9Mb+ wj3TtvpvKaaya3t9V5lTIUzjQedlk7npaTtmMIBMAGU7iqVT6gYK4wTo3MqtAsGooloXTC92IgBn V+dgsS6B6Kiv3jvI45Gag72h1QtxvK8u3a3LFuLl/o5Jr/Rbj3jFI9QjI1Rjqn8djwWPxvcVnIvv CTKnywTd1+3YSkzTCrquGMLXu+uXpzbXgvyOze5tUKS9b/UzuIttieeDb4hwRhOvVDM6IsXz+Gkf /I3vsOZS7PfTBvcV9vhyJo/OkPxO+/jxoRnReT2a30fU8t0pLEhMnqkpLcopmtS42yfTwWJblTWE wdWKj8MYaD8AhVDGTTp++FIyqOcX2q1dNPRaHfvaH7zj48gDnQV2eYFHGO+nxa4/nO4YHDScajdt xSd36POe3m8Qa/aSuVcnXttvC/orb3lgj1PAtzXGx788YPzw1NgOf/VQnYHt9fVohTD7flhngKdO PQqZxyFYF1CYE1LTWKJmJByv27VknE6HTPESuSLFEyF0tPCVWeRHk+8315bfGgPv/wBR06QN """.decode("base64").decode("zlib") ##file ez_setup.py EZ_SETUP_PY = """ eJzNWmtv20YW/a5fwagwJCEyzfdDgbLoNikQoOgWaVNg4XjleVpsKJIlKTvaRf/73jvDp2Qp7SIf lkVqmxzeuc9zzx3pmxfFod7m2WQ6nf49z+uqLklhVKLeF3Wep5WRZFVN0pTUCSyavJPGId8bTySr jTo39pUYr8WnpVEQ9ok8iFmlH5rFYWn8tq9qWMDSPRdGvU2qiUxSga/UWxBCdsLgSSlYnZcH4ymp t0ZSLw2ScYNwrl7ADXFtnRdGLvVOrfzVajIx4JJlvjPEvzfqvpHsirysUctNr6VaN741X5xYVorf 96COQYyqECyRCTMeRVmBE3Dv/tUl/g6reP6UpTnhk11Slnm5NPJSeYdkBklrUWakFt2i3tKl2pTB Kp4bVW7Qg1HtiyI9JNnDBI0lRVHmRZng63mBQVB+uL8/tuD+3pxMfkE3Kb8ytTFKFEa5h98rNIWV SaHMa6KqtCweSsKHcTQxGSaN86pDNXnz9vtvP/zwy+bXt+9/fvePH421MbXMgMXT7smH9z+gW/HJ tq6L1c1NcSgSU+eWmZcPN01OVDdX1Q381212MzWucBOzce/tyr2bTHbc33BSExD4HxWwWf/GNexN 7evi4JiuKR4eZitjFkWOw4iMLdvxLR55EY3jgIbS8VkgAkZmywtSvFYKDWMSEc9yhedbjqQ08oVw pR17duj6jJ6R4ox18QM/DP2YRyTgkWSeZ4UWibkVOqHD4/iylE4XDwwgEbeDmDtUBIEtieuQQPiO 8GTknLPIHetCqWszS7LQjWMSuH4Yx6HPCI+lT6zAji5K6XRxIxIxuMsDwbjjOF4o7TCWISdBEEvC zkjxxroEjuX5xPEE94QtKAtDKSw3JsQTgQyFf1FK7xdGHWJHPugRccKkpA63QR/LpS61mfe8FHaU L9SVDvV9N+YBxDWUoUd4GNsOCCKxFZ2xiB3nC9jDBQdPBiF3uCOlsD3Lit3Akw7xzkSaHeWLtKzA ozIgxKEht6RLiUU9UNCK7JA54UUpnS6BHdixIwRzfemFIhLEDhgPiO2AVCc8J+UoX6QdQaJBEXEp IgiWH7MYpEibhzSM5JmsY0f5IizBQy+IHBbHEZU0dKmMLJf4lgAxtrgoxW+lECqkHUjOwTDf920v 8mwWQh7yOIoD/5yUo6yjFo1t1yaMUNexwBmQr6H0POZDwENbXpTSWQQpJ2HPgHuSSpfFIZWxFzAL XAXZK5yLUjqLIqw6KGDXYZzGLHQokx6koRNIJyLyXNb5Y4uEiCWPLFAHMg8STboCatMPAwGYYwfn Iu2PLSJSOIRLQAc7tGwhwLkhgIxPGQAXCc7VkX8Uo4i7MrC92GOMkCi0PUgc7oaUMe5yn5+REowt cv0gArSObDsARIkiL3RABCCf78WCOdZFKT1KMT8g0g8p+Be6AFRDYIEhnudCgfnkXDUGY4uoIyMS +g6Adkx86gLYWhBqLnwJLcF3z0gJxxY5FsRIxoQzlwS2L3zb9qEMoTVEwnbP5ks4tsgnkYx9L7JC 7gXEkjQImbSlA2GAR865CgjHFnmAlYQ7ICrEAvRcz7ZtyUXk2vAvPKdLdNTVLOxpTgweiTmNGKZg SEnkWtggrctSOosYJW4E2AC9w4tcZmHOQraBsxkT4OSLUjqL7NCxQwA5CHTMme1bfmwRP6KugDqP /XORjscWge7Ms6Ap2ehh6sWB8JikworAVmadi3R8hAyQZNCgHeG7UcQDQCcihBUAeLHA9c716UZK Z5EUEFpX+MQOqe0wCBPzPZuGgnguiURwUUrQeZdA2dgSUZM4ggMw2bEbuQC6fuxArwIpf0wGxA5Y ajWpy8NK8+YtqbZpQlvaDBxsIj4zAYzxnbrzFpltsxYeDtdNuJDG5pGkCbA2sYFbc9BpkwGtXxpI 5BYrZUAijfY+Uv+W5umHePEEOGINtA9FqBfNrfis7wJNb5eBnGbli3Un5bYVfdfLwwvoM5D616+R ZVY1FyXQ8/loBV5TNKmxoKH5V0CmCbBp/sIw5j/lVZXQdMDigZnD37u/LaYnwq46M0ePFqO/UB/x Oannjr5fQnDLTLlLO/SI46tFDU1eH3HyZafWhpJKrAfEfAmEfwMTxzqvTLYv4TedTN0LXKTksLb9 SRMkYP/f7ut8B35gMCQcYKLI+E1n9mDgw/FsRz5BLGEGegRXEXQQOA9NK0i91VPZfaP0vVFt833K cSgh2tdDae2Ale13VJQw6xGYGKtesJKFg0yG3jUkDC+dUvuMq1eEcT9yxL2Bo8n8aZuwbbu7AK1x wtTyjNnNbGGCktpL97glyhlMo1tRjubcpwRGJ9pnguBLyEid4ErlLAd/pKUg/NCrD3vAkHk/drva rhkxlZi60VJJo0Kp0jhEDZ4sz3ilfdOqURBIFHQqeATLKqlhXIQBcjCW6og39ueZUGOhHnG51guc mqfow2fHXNSymRlFI0yN5GW+h52EVkXXGTF2oqpg1NNzal909/cqX0qSwFz886Gqxe7tZ/RXpgMB Q2oN9/SASihCCxqPKYjG6OHVbDNU/Xwi1UajENi/NmbFp4dNKap8XzJRzRBhcPtdzvepqHDYHQDo 8WNdE1B1HPKgcdt80SMJpty6L5pBXTYeOyrBtuyWR4XWY0BbJCZ4VpT13FriJgOQa4C62+nVcEin 7WnNpgnMRgHzGmXoAAGwH8saOUg9fAbhu5daQBo6pHl0usNItNkk13zaa/x6PX3ZuGrxqpE9VGEs 4Fe98rs8k2nCanDNaoj+w8j/VbSf/rLts/9Mvs9fr6+qRVfLbQ2rE6mP2Rjwp4xksxpLqisRwAw8 hVE10py6YLXsswxS2TR+SgVkSLv8RB7WEJYyAJAAW1oNZVJW4Ih9heUwAwmHNvTG9YeB8jPzSN7H 7GM2/25fliAN4FwLuCqP+tYCulafy8Ik5UN1a91d7lkqfmklxjGARB+HczmstNujOr3DV74BaxWS 559Gop7LwfNZ8yaBkkjoHjv4j3n9fQ594XI+6077XFl/7XaLxQ/lOeqzb55pqqqMSd8UjDRnmpIo +NQ2JLU+6FMU4/+0yWqIxqPctsl+qcfiPdz1tMFq3L/ve+aZvpjrbtg2Q2wqrN6TtDeiaTLjRtKe FJfQa6gD2bqFFEp1nrV8dW0MwOz6qgLufVUh9Z4OC+foKFPnKsgd9g70mfFyTBEr8ihA+zVQct0U fsuTbN62kHapFleVDMUpnvwjdPOWWiNUta9DkVZ1NddiFysssG8f8wQTqBAE+2WrTtXVxwjP8VKp yEEQeqNqvZTmD6NVSMYxLuN38YKV5hMpszn6+frrXfqguwHWBsmr57L8SqUEHoDPxaPI8A8wpwBl J1uRFsj73ulsG3CPLlWAnGD+4xH9HF0xgZawNABdJnhrB+WcCXAkvAJ1iMwXEFo8IR4TGGerSr09 7AEKwc1JsyVAd8Nx+h1BZd5mszmZzAHExAo9rMTsCNsi3eK50I1pC+EFJeqnvPzUbLo0Ct1dclqT 5uMVRAqFElfVZIIoAh5girWrBSC5r8SmckrRdKuhAebia0YRkmJ5kjID0D0hVCrLllhNJ68Bo1DJ Wic4WTbEKRWieKV/zI+41zg7WxhWfbGaqi2O+p4quQYfTPiZFyKbnyz7xngPpP/mqUxqAB+IMfhX 0W3A8E9L/ITnCaOHdIGVWIYAjSwvy71KjlQcCVNxH6YHsvBaqPUtJrZX83HJuSEcDDBxIJkvxhpr FFHWaKxYTp/oFNwJD0xlhx7Du5dgGMShcHUMAbDBSu3C0rwS88UJRFT1SgkdPm+6WQtaoGCKv7Sw NfkzF/bvHWT6HAjL4/Jcx+577rtLn32pHvsWqFWzqm0Qz5Hpo88ULzFpPTx0WH0isV9zecBQk7p1 SsnGY8RoilAxw9IYzA4s3+3AUHPEIdvjHNIMZO3VxEi5OIVeoPy8eImnLXcLlaZPYlaqtBYGtvEv pgpain4+6lWo9mkPgUX7DCbAT/POrDHhTIbE3dxsGm9tNsYaRkLLtEx79pdHhH8CwCtwxbmYVnkq oFbPjMYt6Ydmoon9CaEvxS5/VHirIqE/ulYTMHSOGqA3/QLuHjH1s5S8Karfx2RlMHkN2c7pMPgn Bjr4eYF/H01tq/PZ/j+n5KUy6wR/UcpJNj9Xd2253Y1nduVsawGJD1Zh94fAMZUp+OT5DMVdvpID OvWV5hemMJ3m059PaNF02SLKFEDwQTWiEo9/IQmBJPUJPX1G3mz+HujUtP2ShVkcxtPnVH994vQb BuZi1hxrFl1/akeYqofnD+qpgSVC90laX+tzYhD5gMPdARF5mMVlM/8g12rPlTuxvUMU5+7ZNf6J K+Y9q1ZC2l6omuaspLP+WXfMjO/eNUfUsm2qzx5Ty67Z6RFQt+jbKf5xVa7g3xKwAsaHhmlqQtZu ZELz3VXzxV33slmBxV3rLHComE71pKCb9NAxEAEYIet2YlBfC1m3d80HUeuixfvz4XS+UYxhs2my vnNJI2NpKLe8aihR64BXx8buSA3T4Br0NCtBSradTz9mw+91fMzmt//64+7l4o+poieL4Rij3h5g 0TOIDY1cfbEmNQSiwIvpaZG2iKhVhf/frpRgU1Hvub24gzFMOfKleqofwugKj1Z3z5s/e2pyQjb0 qFN94IAJmNH6cb2ebTZYsJvNrPsUJEWJoKaq4deOaoft37f2HbxzfQ3O0qUyaF+D2umWO6u75/qi woheJi7S138BSGV4QQ== """.decode("base64").decode("zlib") ##file activate.sh ACTIVATE_SH = """ eJytU99P2zAQfvdfcaQ8ABqN+srUh6IhUYmViXSdNECum1waS6ld2U6zgva/75ykNP0xpGnkIYl9 n8/fffddB8aZtJDKHGFRWAczhMJiAqV0GQRWFyZGmEkVitjJlXAYwEVq9AJmwmYXrANrXUAslNIO TKFAOkikwdjla8YS3JyCs3N4ZUCPTOERLhUEp/z+7gufDB/G3wd3/NtgfBvAM3wGl6GqkP7x2/1j 0DcE/lpq4yrg216hLDo4OFTFU8mqb6eu3Ga6yBNI0BHnqigQKoEXm32CMpNxBplYIQj6UCjWi4UP u0y4Sq8mFakWizwn3ZyGOd1NMtBfqo1fLAUJ2xy1XYAfpK0uXBN2Us2bNDtALwScet4QZ0LN0UJJ TRKJf63BC07XGrRLYo7JnrjXg4j0vNT16md0yyc3D9HwfnRE5Kq0S7Mjz9/aFPWOdSnqHTSJgAc9 inrvtqgJbyjUkE30ZjTZEjshXkSkD4HSKkHrTOGNhnvcOhBhnsIGcLJ3+9aem3t/M3J0HZTGYE6t Vw5Wwkgxy9G2Db17MWMtnv2A89aS84A1CrSLYQf+JA1rbzeLFjrk/Ho44qPB1xvOrxpY2/psX0qf zPeg0iuYkrNRiQXC007ep2BayUgc96XzvpIiJ2Nb9FaFAe0o8t5cxs2MayNJlAaOCJlzy6swLMuy +4KOnLrqkptDq1NXCoOh8BlC9maZxxatKaU8SvBpOn2GuhbMLW5Pn71T1Hl9gFra8h77oJn/gHn/ z1n/9znfzDgp8gduuMqz """.decode("base64").decode("zlib") ##file activate.bat ACTIVATE_BAT = """ eJx1kEEOgjAQRfc9xSxoAlfQkIiBCBFKg8iKZBbSKhu6kPvHFqQ0Ct3N9P2flzmJx0uBkpK8xQhN VtX3KMeENSGiMyES0ksY1AidkP0gOuBVWfAafAL6mfC8CD3uXUgw4QuKZR7btr0c3aCoKTLMxl9I F8Yp8VdrFhUJYgAW2zeK6tT10eOvjV7RCXiqUcHtmnGz0nb/clN6DpCDJddi56q0bRHPGfu6Hm0s YTH5AJ7udMY= """.decode("base64").decode("zlib") ##file deactivate.bat DEACTIVATE_BAT = """ eJxzSE3OyFfIT0vj4spMU0hJTcvMS01RiPf3cYkP8wwKCXX0iQ8I8vcNCFHQ4FIAguLUEgWIgK0q FlWqXJpcICVYpGzx2OAY4oFsPpCLbjpQCLvZILVcXFaufi5cACHzOrI= """.decode("base64").decode("zlib") ##file distutils-init.py DISTUTILS_INIT = """ eJytlcuq2zAQhvd6iuGEEhuC6ToQzqIXyKKl0C66EzrWOFarSEaSc3n7jizH8SWhXdSLoIx+jWa+ GY/VsbEugPVMpZW/DsuzcEaZg4eb4YM1lTp8E86jg9X0r/JgbAABJ+VCKzSaExytbDVuwFs4I5TC QOsRVIBgoVJGQqgRfJBavTEmlTPiiLCjYIpGhLroLXHLhzYo7Xm0jxS/rDLZTJ6Nwyo4r5RGzvMN rAc365ypanBirDvGRTa9JofdIpRscWbwn28Z0HNjVsRF1pni8/KDMh2RGa6CRpS/xQFBBHjnQTQN UuAR0BuRMolQxKJtKYKyBoRPxqsPeLw7en3JGWqPKQzepcB5oQxRCNn7Dcyy62R4wRJsg3eIHdGp lMhxrowK5K65rvO8cChklrNbW0T5qH3Kjj9jq9X9ziKuKNdQ1ui3jFkteewAnsQdRE+FjbLiI/04 9dbGdIuFikmsYGHNPOqqr0FlW+otKt6jS5Kw0yWE/F6LWWMtMNzTKavDOjlZjToJLySgG2Z++7BW Q2yjqiy0ndRhaJ1JYvYPTCjyJadpAYbCzKpwwMCba6itoRqX5GgQFtOtjvuwOTuXNVoE7hssVaXK HSXWOKzUZffVGuzzJ1DJGGdFtG+H16O3d5fH7tI8WcYwlsFOL71dmbNnQdIwkLbkvG+Nx5vsWf5j NLOzC5I00J6RjLPuKUnaXJL0QRgpnIy7T8mORUDL/4l6EdU0pr+AJ8Vz8KPNx+BnJGdnY49/3v/8 8mkL32vbagn71N4z9v3hUxytr/G9SM8+QE3jVNBwpU9ghNhW1YY+W777e64VeZIWvVnTZEMat2ku 01dMFuwP2YOSYA== """.decode("base64").decode("zlib") ##file distutils.cfg DISTUTILS_CFG = """ eJxNj00KwkAMhfc9xYNuxe4Ft57AjYiUtDO1wXSmNJnK3N5pdSEEAu8nH6lxHVlRhtDHMPATA4uH xJ4EFmGbvfJiicSHFRzUSISMY6hq3GLCRLnIvSTnEefN0FIjw5tF0Hkk9Q5dRunBsVoyFi24aaLg 9FDOlL0FPGluf4QjcInLlxd6f6rqkgPu/5nHLg0cXCscXoozRrP51DRT3j9QNl99AP53T2Q= """.decode("base64").decode("zlib") ##file activate_this.py ACTIVATE_THIS = """ eJx1UsGOnDAMvecrIlYriDRlKvU20h5aaY+teuilGo1QALO4CwlKAjP8fe1QGGalRoLEefbzs+Mk Sb7NcvRo3iTcoGqwgyy06As+HWSNVciKaBTFywYoJWc7yit2ndBVwEkHkIzKCV0YdQdmkvShs6YH E3IhfjFaaSNLoHxQy2sLJrL0ow98JQmEG/rAYn7OobVGogngBgf0P0hjgwgt7HOUaI5DdBVJkggR 3HwSktaqWcCtgiHIH7qHV+esW2CnkRJ+9R5cQGsikkWEV/J7leVGs9TV4TvcO5QOOrTHYI+xeCjY JR/m9GPDHv2oSZunUokS2A/WBelnvx6tF6LUJO2FjjlH5zU6Q+Kz/9m69LxvSZVSwiOlGnT1rt/A 77j+WDQZ8x9k2mFJetOle88+lc8sJJ/AeerI+fTlQigTfVqJUiXoKaaC3AqmI+KOnivjMLbvBVFU 1JDruuadNGcPmkgiBTnQXUGUDd6IK9JEQ9yPdM96xZP8bieeMRqTuqbxIbbey2DjVUNzRs1rosFS TsLAdS/0fBGNdTGKhuqD7mUmsFlgGjN2eSj1tM3GnjfXwwCmzjhMbR4rLZXXk+Z/6Hp7Pn2+kJ49 jfgLHgI4Jg== """.decode("base64").decode("zlib") if __name__ == '__main__': main() ## TODO: ## Copy python.exe.manifest ## Monkeypatch distutils.sysconfig
bsd-3-clause
6,592,291,573,075,368,000
42.997553
179
0.696613
false
nouiz/pydy
examples/Kane1985/Chapter5/Example5.1.py
1
2090
#!/usr/bin/env python # -*- coding: utf-8 -*- """Example 5.1 from Kane 1985.""" from __future__ import division from sympy import Dummy, Matrix from sympy import expand, solve, symbols, trigsimp from sympy.physics.mechanics import ReferenceFrame, Point, dot, dynamicsymbols from util import msprint, subs, partial_velocities from util import generalized_active_forces, potential_energy g, m1, m2, k, L, omega, t = symbols('g m1 m2 k L ω t') q1, q2, q3 = dynamicsymbols('q1:4') qd1, qd2, qd3 = dynamicsymbols('q1:4', level=1) u1, u2, u3 = dynamicsymbols('u1:4') ## --- Define ReferenceFrames --- A = ReferenceFrame('A') B = A.orientnew('B', 'Axis', [omega * t, A.y]) E = B.orientnew('E', 'Axis', [q3, B.z]) ## --- Define Points and their velocities --- pO = Point('O') pO.set_vel(A, 0) pP1 = pO.locatenew('P1', q1*B.x + q2*B.y) pD_star = pP1.locatenew('D*', L*E.x) pP1.set_vel(B, pP1.pos_from(pO).dt(B)) pD_star.v2pt_theory(pP1, B, E) pP1.v1pt_theory(pO, A, B) pD_star.v2pt_theory(pP1, A, E) ## --- Expressions for generalized speeds u1, u2, u3 --- kde = [u1 - dot(pP1.vel(A), E.x), u2 - dot(pP1.vel(A), E.y), u3 - dot(E.ang_vel_in(B), E.z)] kde_map = solve(kde, [qd1, qd2, qd3]) ## --- Velocity constraints --- vc = [dot(pD_star.vel(B), E.y)] vc_map = solve(subs(vc, kde_map), [u3]) ## --- Define forces on each point in the system --- K = k*E.x - k/L*dot(pP1.pos_from(pO), E.y)*E.y gravity = lambda m: -m*g*A.y forces = [(pP1, K), (pP1, gravity(m1)), (pD_star, gravity(m2))] ## --- Calculate generalized active forces --- partials = partial_velocities(zip(*forces)[0], [u1, u2], A, kde_map, vc_map) Fr_tilde, _ = generalized_active_forces(partials, forces) Fr_tilde = map(expand, map(trigsimp, Fr_tilde)) print('Finding a potential energy function V.') V = potential_energy(Fr_tilde, [q1, q2, q3], [u1, u2], kde_map, vc_map) if V is not None: print('V = {0}'.format(msprint(V))) print('Substituting αi = 0, C = 0...') zero_vars = dict(zip(symbols('C α1:4'), [0] * 4)) print('V = {0}'.format(msprint(V.subs(zero_vars))))
bsd-3-clause
5,703,394,762,670,879,000
32.66129
78
0.62482
false
js850/pele
pele/takestep/adaptive_step_temperature.py
1
6316
import numpy as np from pele.takestep import TakestepInterface __all__ = ["AdaptiveStepsizeTemperature"] class AdaptiveStepsizeTemperature(TakestepInterface): """ adjust both the stepsize and the temperature adaptively Parameters ---------- stepclass : the step taking class target_new_min_prob : the target probability for a step ending up in a new minimum. Used to adjust the stepsize. target_new_min_accept_prob : the target probability that a step that ends in a new minimum is accepted. Use to adjust the temperature Note: the total acceptance probability is accrat = 1 - target_new_min_prob * (1 - target_new_min_accept_prob) so if you want a total acceptance probability of 0.5, you must choose both other probabilities accordingly interval : the interval at which to adjust temperature and stepsize Tfactor : the factor with which to multiply (or divide) the temperature sfactor : the factor with which to multiply (or divide) the stepsize ediff : if two minima have energies that are within ediff from each other then they are considered to be the same minimum verbose : print status messages Notes ----- We will base the stepsize adjustment on the probability of ending up in a different minimum We will base the temperature adjustment on the probability of accepting a move that ended up in a different minimum """ def __init__(self, stepclass, target_new_min_prob=0.8, target_new_min_accept_prob=0.3, interval=100, Tfactor=0.95, sfactor=0.95, ediff=.001, verbose=False): self.stepclass = stepclass self.target_new_min_accept_prob = target_new_min_accept_prob self.target_new_min_prob = target_new_min_prob self.interval = interval self.Tfactor = Tfactor self.sfactor = sfactor self.ediff = ediff self.verbose = verbose self.energy = None self.coords = None self.ncalls_tot = 0 self.reset() def reset(self): self.nattempts = 0 self.naccept = 0 self.nsame = 0 def takeStep(self, *args, **kwargs): """ basinhopping calls this to take a step """ self.stepclass.takeStep(*args, **kwargs) def updateStep(self, accepted, driver=None): """ this is the function basinhopping uses to report results """ self.ncalls_tot += 1 trial_energy = driver.trial_energy trial_coords = driver.trial_coords if self.energy is None: #first time called. Save energy and coords self.energy = driver.markovE self.coords = driver.coords.copy() return self.nattempts += 1 if accepted: self.naccept += 1 #determine if the new minima is the same as the last one same = False if abs(self.energy - trial_energy) <= self.ediff: #if np.std(self.coords - trial_coords) <= self.xdiff: same = True self.nsame += 1 #print abs(self.energy - trial_energy), np.std(self.coords - trial_coords), np.max(np.abs(self.coords - trial_coords)) if not same and accepted: self.energy = driver.markovE self.coords = driver.coords.copy() if self.nattempts % self.interval == 0: # if self.verbose: # print " acceptance probability %.4g" % (float(self.naccept) / self.nattempts) self.adjustStep() self.adjustTemp(driver) self.reset() def adjustStep(self): """adjust the step size increase the step size if we're ending up in the same minima too often, else decrease the step size """ fnew = 1. - float(self.nsame) / self.nattempts if fnew < self.target_new_min_prob: self.stepclass.scale(1. / self.sfactor) else: self.stepclass.scale(self.sfactor) #print some status info if self.verbose: print "adaptive step and temperature: naccept nsame ndiff naccept_diff %d %d %d %d new min probability %.4g" % ( self.naccept, self.nsame, self.nattempts-self.nsame, self.naccept-self.nsame, float(self.naccept - self.nsame) / self.nattempts) print " stepsize is now %.4g ratio %.4g target %.4g" %(self.stepclass.stepsize, fnew, self.target_new_min_prob) def adjustTemp(self, driver): """adjust the temperature increase the temeperature if new minima are rejected too often, else decrease the temperature """ ndiff = self.nattempts - self.nsame ndiff_accept = self.naccept - self.nsame if ndiff == 0: faccept = 1 else: faccept = float(ndiff_accept) / ndiff if faccept > self.target_new_min_accept_prob: driver.acceptTest.temperature *= self.Tfactor else: driver.acceptTest.temperature /= self.Tfactor if self.verbose: print " temperature is now %.4g ratio %.4g target %.4g" % (driver.acceptTest.temperature, faccept, self.target_new_min_accept_prob) if __name__ == "__main__": import numpy as np from pele.takestep import displace from pele.systems import LJCluster #from pele.takestep import adaptive natoms = 38 sys = LJCluster(natoms=38) # random initial coordinates coords = sys.get_random_configuration() takeStep = displace.RandomDisplacement( stepsize=0.4 ) tsAdaptive = AdaptiveStepsizeTemperature(takeStep, interval=300, verbose=True) db = sys.create_database() opt = sys.get_basinhopping(database=db, takestep=tsAdaptive, coords=coords) opt.printfrq = 50 opt.run(5000)
gpl-3.0
5,482,753,251,001,163,000
34.488764
126
0.581539
false
t794104/ansible
lib/ansible/modules/cloud/podman/podman_image_info.py
1
8244
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = """ module: podman_image_info author: - Sam Doran (@samdoran) version_added: '2.8' short_description: Gather info about images using podman notes: - Podman may required elevated privileges in order to run properly. description: - Gather info about images using C(podman) options: executable: description: - Path to C(podman) executable if it is not in the C($PATH) on the machine running C(podman) default: 'podman' type: str name: description: - List of tags or UID to gather info about. If no name is given return info about all images. """ EXAMPLES = """ - name: Gather info for all images podman_image_info: - name: Gather info on a specific image podman_image_info: name: nginx - name: Gather info on several images podman_image_info: name: - redis - quay.io/bitnami/wildfly """ RETURN = """ images: description: info from all or specificed images returned: always type: dict sample: [ { "Annotations": {}, "Architecture": "amd64", "Author": "", "Comment": "from Bitnami with love", "ContainerConfig": { "Cmd": [ "nami", "start", "--foreground", "wildfly" ], "Entrypoint": [ "/app-entrypoint.sh" ], "Env": [ "PATH=/opt/bitnami/java/bin:/opt/bitnami/wildfly/bin:/opt/bitnami/nami/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "IMAGE_OS=debian-9", "NAMI_VERSION=0.0.9-0", "GPG_KEY_SERVERS_LIST=ha.pool.sks-keyservers.net \ hkp://p80.pool.sks-keyservers.net:80 keyserver.ubuntu.com hkp://keyserver.ubuntu.com:80 pgp.mit.edu", "TINI_VERSION=v0.13.2", "TINI_GPG_KEY=595E85A6B1B4779EA4DAAEC70B588DFF0527A9B7", "GOSU_VERSION=1.10", "GOSU_GPG_KEY=B42F6819007F00F88E364FD4036A9C25BF357DD4", "BITNAMI_IMAGE_VERSION=14.0.1-debian-9-r12", "BITNAMI_APP_NAME=wildfly", "WILDFLY_JAVA_HOME=", "WILDFLY_JAVA_OPTS=", "WILDFLY_MANAGEMENT_HTTP_PORT_NUMBER=9990", "WILDFLY_PASSWORD=bitnami", "WILDFLY_PUBLIC_CONSOLE=true", "WILDFLY_SERVER_AJP_PORT_NUMBER=8009", "WILDFLY_SERVER_HTTP_PORT_NUMBER=8080", "WILDFLY_SERVER_INTERFACE=0.0.0.0", "WILDFLY_USERNAME=user", "WILDFLY_WILDFLY_HOME=/home/wildfly", "WILDFLY_WILDFLY_OPTS=-Dwildfly.as.deployment.ondemand=false" ], "ExposedPorts": { "8080/tcp": {}, "9990/tcp": {} }, "Labels": { "maintainer": "Bitnami <[email protected]>" } }, "Created": "2018-09-25T04:07:45.934395523Z", "Digest": "sha256:5c7d8e2dd66dcf4a152a4032a1d3c5a33458c67e1c1335edd8d18d738892356b", "GraphDriver": { "Data": { "LowerDir": "/var/lib/containers/storage/overlay/a9dbf5616cc16919a8ac0dfc60aff87a72b5be52994c4649fcc91a089a12931\ f/diff:/var/lib/containers/storage/overlay/67129bd46022122a7d8b7acb490092af6c7ce244ce4fbd7d9e2d2b7f5979e090/diff:/var/lib/containers/storage/overlay/7c51242c\ 4c5db5c74afda76d7fdbeab6965d8b21804bb3fc597dee09c770b0ca/diff:/var/lib/containers/storage/overlay/f97315dc58a9c002ba0cabccb9933d4b0d2113733d204188c88d72f75569b57b/diff:/var/lib/containers/storage/overlay/1dbde2dd497ddde2b467727125b900958a051a72561e58d29abe3d660dcaa9a7/diff:/var/lib/containers/storage/overlay/4aad9d80f30c3f0608f58173558b7554d84dee4dc4479672926eca29f75e6e33/diff:/var/lib/containers/storage/overlay/6751fc9b6868254870c062d75a511543fc8cfda2ce6262f4945f107449219632/diff:/var/lib/containers/storage/overlay/a27034d79081347421dd24d7e9e776c18271cd9a6e51053cb39af4d3d9c400e8/diff:/var/lib/containers/storage/overlay/537cf0045ed9cd7989f7944e7393019c81b16c1799a2198d8348cd182665397f/diff:/var/lib/containers/storage/overlay/27578615c5ae352af4e8449862d61aaf5c11b105a7d5905af55bd01b0c656d6e/diff:/var/lib/containers/storage/overlay/566542742840fe3034b3596f7cb9e62a6274c95a69f368f9e713746f8712c0b6/diff", "MergedDir": "/var/lib/containers/storage/overlay/72bb96d6\ c53ad57a0b1e44cab226a6251598accbead40b23fac89c19ad8c25ca/merged", "UpperDir": "/var/lib/containers/storage/overlay/72bb96d6c53ad57a0b1e44cab226a6251598accbead40b23fac89c19ad8c25ca/diff", "WorkDir": "/var/lib/containers/storage/overlay/72bb96d6c53ad57a0b1e44cab226a6251598accbead40b23fac89c19ad8c25ca/work" }, "Name": "overlay" }, "Id": "bcacbdf7a119c0fa934661ca8af839e625ce6540d9ceb6827cdd389f823d49e0", "Labels": { "maintainer": "Bitnami <[email protected]>" }, "ManifestType": "application/vnd.docker.distribution.manifest.v1+prettyjws", "Os": "linux", "Parent": "", "RepoDigests": [ "quay.io/bitnami/wildfly@sha256:5c7d8e2dd66dcf4a152a4032a1d3c5a33458c67e1c1335edd8d18d738892356b" ], "RepoTags": [ "quay.io/bitnami/wildfly:latest" ], "RootFS": { "Layers": [ "sha256:75391df2c87e076b0c2f72d20c95c57dc8be7ee684cc07273416cce622b43367", "sha256:7dd303f041039bfe8f0833092673ac35f93137d10e0fbc4302021ea65ad57731", "sha256:720d9edf0cd2a9bb56b88b80be9070dbfaad359514c70094c65066963fed485d", "sha256:6a567ecbf97725501a634fcb486271999aa4591b633b4ae9932a46b40f5aaf47", "sha256:59e9a6db8f178f3da868614564faabb2820cdfb69be32e63a4405d6f7772f68c", "sha256:310a82ccb092cd650215ab375da8943d235a263af9a029b8ac26a281446c04db", "sha256:36cb91cf4513543a8f0953fed785747ea18b675bc2677f3839889cfca0aac79e" ], "Type": "layers" }, "Size": 569919342, "User": "", "Version": "17.06.0-ce", "VirtualSize": 569919342 } ] """ import json from ansible.module_utils.basic import AnsibleModule def get_image_info(module, executable, name): if not isinstance(name, list): name = [name] command = [executable, 'image', 'inspect'] command.extend(name) rc, out, err = module.run_command(command) if rc != 0: module.fail_json(msg="Unable to gather info for '{0}': {1}".format(', '.join(name), err)) return out def get_all_image_info(module, executable): command = [executable, 'image', 'ls', '-q'] rc, out, err = module.run_command(command) name = out.strip().split('\n') out = get_image_info(module, executable, name) return out def main(): module = AnsibleModule( argument_spec=dict( executable=dict(type='str', default='podman'), name=dict(type='list') ), supports_check_mode=True, ) executable = module.params['executable'] name = module.params.get('name') executable = module.get_bin_path(executable, required=True) if name: results = json.loads(get_image_info(module, executable, name)) else: results = json.loads(get_all_image_info(module, executable)) results = dict( changed=False, images=results ) module.exit_json(**results) if __name__ == '__main__': main()
gpl-3.0
-807,328,375,958,436,000
38.444976
911
0.617783
false
robertour/miller-knowles
miller_knowles.py
1
14416
import random import time import csv import sys from sortedcontainers import SortedSet import networkx as nx from networkx.classes.function import neighbors from networkx.classes import graph from networkx.classes.graph import Graph import matplotlib.pyplot as plt import numpy as np from sortedcontainers.sortedlist import SortedList np.set_printoptions(threshold=np.nan) from games import PD from variables import * class SocialNetwork(object): ID = 0 strategies = [COOP, DEFE] def __init__(self, fluct, rep, nt_seed, nt_desc, nt_randomseed, coop_prob = JUST_COOPERATORS, randomseed = None, b=1, n_per_gen=10, e_per_gen=2, epsilon = 0.99, max=1000, tourn=0.01, X=0.025, K=sys.maxsize, X2= 0.025): # this is for identification of the network self.id = self.__class__.ID self.__class__.ID += 1 self.fluct = fluct self.rep = rep self.nt_desc = nt_desc self.nt_randomseed = nt_randomseed self.coop_prob = coop_prob # set the PD game self.T = b self.R = 1 self.P = 0 self.S = 0 # seed for the network, this is useful to replicate exactly the same # experiment, particularly useful for debugging if randomseed == None: self.randomseed = time.time() else: print("WARNING: random seed is not null. Are you sure?") self.randomseed = randomseed random.seed(self.randomseed) # main parameters self.b = b self.n_per_gen = n_per_gen self.e_per_gen = e_per_gen if (epsilon >= 1.0): raise ValueError("""Epsilon cannot be bigger or equal to 1.0. You can use epsilon that are similar to 1.0, e.g 0.999999999 """) else: self.epsilon = epsilon self.max = max self.tourn = tourn self.X = X self.K = K self.X2 = X2 # counters self.gen = 0 self.count = 0 self.cooperators = 0 self.removed_nodes = 0 self.total_fit = 0 self.total_efit = 0 self.degrees = 0 self.size = 0 g = self.g = nx.Graph() # crate auxiliary network structures to increase efficiency self._max = max+n_per_gen self.eps_fitness = np.empty(self._max) self.degrees = np.empty(self._max) self.fitness = np.empty(self._max) self.fitness_of = np.empty(self._max, dtype=np.int_) self.free_indexes = [] self.node_set = SortedSet() # initialize the auxiliary structures for i in range(0, self._max): self.degrees[i] = 0 self.fitness_of[i] = -1 self.free_indexes.append(i) # create the network self.__create_from_seed(nt_seed, coop_prob) # define the game the nodes are going to play self.game = PD(b, self.fitness) self.treatment = '_'.join(str(x) for x in (self.nt_desc, self.coop_prob, self.fluct, self.b, self.X, self.K, self.X2)) self.signature = str(self.id) + '_' + \ str(self.rep) + '(' + self.treatment + ')' def __create_from_seed(self, seed, coop_prob): """ This method use the networks structure that comes in the parameter seed as a template for the graph. It adds the necessary attributes to run the algorithm, such as which nodes are cooperators and defectors based on the coop_prob parameter. A value from 0 to 1 indicating a probability of any node of being a cooperators. Assumes that it is called from the constructor. So it assumes a new SocialNetwork. """ self.count = -1 g = self.g # add nodes from the seed to the network for node in seed.nodes_iter(data = True): # define the attributes of the node id = node[0] if coop_prob == 1 or random.uniform(0,1) < coop_prob: st = COOP self.cooperators += 1 else: st = DEFE r_index = self.free_indexes.pop() # add the node g.add_node(id, st=st, nst=st, r_index=r_index) self.node_set.add(id) self.fitness_of[r_index] = id self.fitness[r_index] = 0 # update parameters of the graph if id > self.count: self.count = id self.size += 1 self.count += 1 # add edges from the seed to the network for e0, e1 in seed.edges_iter(): g.add_edge(e0, e1) self.__remove_isolated_nodes() def __remove_isolated_nodes(self): g = self.g to_remove = [] for n, adj in g.adj.items(): if (len(adj) == 0): to_remove.append(n) for n in to_remove: r_index = g.node[n]['r_index'] self.fitness_of[r_index] = -1 self.free_indexes.append(r_index) self.node_set.discard(n) g.remove_node(n) self.size -= 1 def add_node(self, st): """ Add a node to the network """ # calculate rest of the node attributes id = self.count r_index = self.free_indexes.pop() # add node self.g.add_node(id, st=st, nst=st, r_index=r_index, gen=self.gen) # update network structures self.node_set.add(id) self.fitness_of[r_index] = id self.fitness[r_index] = 0 self.degrees[r_index] = 0 # update network parameters if st == COOP: self.cooperators += 1 self.size += 1 self.count += 1 return id def play_games_and_remove_isolated_nodes(self): g = self.g node = g.node node_set = self.node_set adjacency = self.g.adj f = self.fitness ef = self.eps_fitness eps = self.epsilon degrees = self.degrees f.fill(0) total_fit = 0 total_efit = 0 total_degrees = 0 to_remove=[] for n1 in node_set: adj = adjacency[n1] len_adj = len(adj) # make sure to remove the nodes that has no more edges if (len_adj == 0): to_remove.append(n1) self.removed_nodes += 1 else: att1 = node[n1] r_index1 = att1['r_index'] #update the strategy n1_e = att1['st'] = att1['nst'] # play against all the neighbors for n2 in adj.keys(): # make sure to play just once, nodes should be in order # make sure all the adjacent nodes are in order if (n2 > n1): att2 = node[n2] if n1_e == att2['nst']: if n1_e == COOP: f[r_index1] += self.R f[att2['r_index']] += self.R total_fit += self.R + self.R else: f[r_index1] += self.P f[att2['r_index']] += self.P total_fit += self.P + self.P else: if n1_e == COOP: f[r_index1] += self.S f[att2['r_index']] += self.T total_fit += self.S + self.T else: f[r_index1] += self.T f[att2['r_index']] += self.S total_fit += self.T + self.S # this epsilon is important to give some of the nodes # some chance to cooperate ef[r_index1] = 1 - eps + eps * f[r_index1] total_efit += ef[r_index1] # keep the degrees updates for PA degrees[r_index1] = len_adj total_degrees += degrees[r_index1] # set the class attribute self.total_fit = total_fit self.total_efit = total_efit self.total_degrees = total_degrees # population will collapse if self.size - len(to_remove) < self.e_per_gen: print ("population collapsed with", count_coop(sn), "cooperators and", self.size - count_coop(sn), "defectors" ) # remove nodes that didn't have any edges for n in to_remove: r_index = g.node[n]['r_index'] self.fitness_of[r_index] = -1 self.free_indexes.append(r_index) self.node_set.discard(n) g.remove_node(n) self.size -= 1 def update_strategies(self): g = self.g self.gen += 1 cooperators = 0 degrees = self.degrees for n1 in g.nodes_iter(data = True): neighbors_n1 = g.neighbors(n1[0]) r_index1 = n1[1]['r_index'] n2_index = random.choice(neighbors_n1) n2 = g.node[n2_index] # check that the strategies are actually different if n1[1]['st'] != n2['st']: r_n1 = self.fitness[r_index1] r_n2 = self.fitness[n2['r_index']] # Look to see if difference is less than a millionth of # largest value and then assume equivalence epsilon_fitness = max(r_n2,r_n1) / 1000000 # if the neighbor has a bigger accumulated fitness if r_n2 > r_n1 + epsilon_fitness: # probP = (neighbour_fitness - focal_node_fitness) # ---------------------------------------- # b * max[k_focal_node, k_neighbour] if random.random() < \ (1.0 * (r_n2 - r_n1)) / \ (self.b * max(len(neighbors_n1), \ len(g.neighbors(n2_index)))): # update the strategy to a temporary vector n1[1]['nst'] = n2['st'] """ Poncela´s Formula gives to much weight to the number of nodes, this is an alternate version that would be worth to test: probability P = neighbour_fitness focal_node_fitness ------------------ - ----------------- b * k_neighbour b * k_focal_node if random.random() < (1.0 * r_n2) / \ (self.b*len(g.neighbors(n2_index)))-\ (1.0 * r_n1) / \ (self.b*len(neighbors_n1)): n1[1]['nst'] = n2['st'] """ # update cooperators counter if n1[1]['nst'] == COOP: cooperators += 1 self.cooperators = cooperators def growth_initial(self, growth): """ This method make sure that the first growth completes the nodes necessary to get to a consistent increment of 10 per generation. It just applies for starting networks that are smaller than self.n_per_gen """ if self.size < self.n_per_gen: temp = self.n_per_gen self.n_per_gen = self.n_per_gen - self.count growth(self) self.n_per_gen = temp def attrition(self, selection_method): g = self.g # it should be call losers winners = selection_method(self) # remove the winning nodes for winner in winners: # remove the node from the graph and update fitness arrays r_index = g.node[winner]['r_index'] self.fitness_of[r_index] = -1 self.free_indexes.append(r_index) self.node_set.discard(winner) g.remove_node(winner) self.size -= 1 # I have moved the removal of nodes with no edges to the play_games # phase to save optimize the code. The auxiliary method remove_isolated # has been created in order to produce real results. def remove_isolated(self, select_winners): g = self.g to_remove = [] for n, adj in g.adj.items(): if (len(adj) == 0): to_remove.append(n) self.removed_nodes += 1 if self.size - len(to_remove) < self.e_per_gen: print ("population collapsed with", self.count_coop(), "cooperators and", self.size - self.count_coop(), "defectors" ) for n in to_remove: r_index = g.node[n]['r_index'] self.fitness_of[r_index] = -1 self.free_indexes.append(r_index) self.node_set.discard(n) g.remove_node(n) self.size -= 1
gpl-2.0
4,134,597,714,533,187,000
33.737349
83
0.450364
false
randomsync/robotframework-mqttlibrary
setup.py
1
1278
from setuptools import setup from os.path import abspath, dirname, join here = dirname(abspath(__file__)) # Get version exec(compile(open(join(here, 'src', 'MQTTLibrary', 'version.py'), "rb").read(), join(here, 'src', 'MQTTLibrary', 'version.py'), 'exec')) # Get the long description with open(join(here, 'README.rst')) as f: long_description = f.read() setup( name = 'robotframework-mqttlibrary', version = VERSION, description = 'MQTT Keyword Library Robot Framework', long_description = long_description, url = 'https://github.com/randomsync/robotframework-mqttlibrary', author = 'Gaurav Gupta', author_email = '[email protected]', license = 'Apache License 2.0', classifiers = [ 'Development Status :: 3 - Alpha', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.7', 'Topic :: Software Development :: Testing', ], keywords = 'robotframework testing testautomation mqtt', package_dir = {'': 'src'}, packages = ['MQTTLibrary'], install_requires = ['robotframework', 'paho-mqtt'], )
apache-2.0
742,123,678,447,997,400
35.514286
136
0.597027
false
jmchilton/galaxy-central
modules/docutils/parsers/rst/languages/sv.py
1
3861
# Author: Adam Chodorowski # Contact: [email protected] # Revision: $Revision: 3184 $ # Date: $Date: 2005-04-07 21:36:11 +0200 (Thu, 07 Apr 2005) $ # Copyright: This module has been placed in the public domain. # New language mappings are welcome. Before doing a new translation, please # read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be # translated for each language: one in docutils/languages, the other in # docutils/parsers/rst/languages. """ Swedish language mappings for language-dependent features of reStructuredText. """ __docformat__ = 'reStructuredText' directives = { u'observera': 'attention', u'caution (translation required)': 'caution', u'fara': 'danger', u'fel': 'error', u'v\u00e4gledning': 'hint', u'viktigt': 'important', u'notera': 'note', u'tips': 'tip', u'varning': 'warning', u'admonition (translation required)': 'admonition', u'sidebar (translation required)': 'sidebar', u'\u00e4mne': 'topic', u'line-block (translation required)': 'line-block', u'parsed-literal (translation required)': 'parsed-literal', u'mellanrubrik': 'rubric', u'epigraph (translation required)': 'epigraph', u'highlights (translation required)': 'highlights', u'pull-quote (translation required)': 'pull-quote', u'compound (translation required)': 'compound', # u'fr\u00e5gor': 'questions', # NOTE: A bit long, but recommended by http://www.nada.kth.se/dataterm/: # u'fr\u00e5gor-och-svar': 'questions', # u'vanliga-fr\u00e5gor': 'questions', u'table (translation required)': 'table', u'csv-table (translation required)': 'csv-table', u'list-table (translation required)': 'list-table', u'meta': 'meta', # u'bildkarta': 'imagemap', # FIXME: Translation might be too literal. u'bild': 'image', u'figur': 'figure', u'inkludera': 'include', u'r\u00e5': 'raw', # FIXME: Translation might be too literal. u'ers\u00e4tt': 'replace', u'unicode': 'unicode', u'class (translation required)': 'class', u'role (translation required)': 'role', u'inneh\u00e5ll': 'contents', u'sektionsnumrering': 'sectnum', u'target-notes (translation required)': 'target-notes', u'header (translation required)': 'header', u'footer (translation required)': 'footer', # u'fotnoter': 'footnotes', # u'citeringar': 'citations', } """Swedish name to registered (in directives/__init__.py) directive name mapping.""" roles = { u'abbreviation (translation required)': 'abbreviation', u'acronym (translation required)': 'acronym', u'index (translation required)': 'index', u'subscript (translation required)': 'subscript', u'superscript (translation required)': 'superscript', u'title-reference (translation required)': 'title-reference', u'pep-reference (translation required)': 'pep-reference', u'rfc-reference (translation required)': 'rfc-reference', u'emphasis (translation required)': 'emphasis', u'strong (translation required)': 'strong', u'literal (translation required)': 'literal', u'named-reference (translation required)': 'named-reference', u'anonymous-reference (translation required)': 'anonymous-reference', u'footnote-reference (translation required)': 'footnote-reference', u'citation-reference (translation required)': 'citation-reference', u'substitution-reference (translation required)': 'substitution-reference', u'target (translation required)': 'target', u'uri-reference (translation required)': 'uri-reference', u'r\u00e5': 'raw',} """Mapping of Swedish role names to canonical role names for interpreted text. """
mit
-1,002,549,217,628,744,800
42.875
81
0.652163
false
magne-max/zipline-ja
zipline/data/bundles/quandl_xjpx.py
1
1446
""" """ from io import BytesIO from itertools import count import tarfile from time import time, sleep from click import progressbar from logbook import Logger import pandas as pd import requests from six.moves.urllib.parse import urlencode from boto import connect_s3 import tarfile from . import core as bundles from zipline.utils.cli import maybe_show_progress log = Logger(__name__) seconds_per_call = (pd.Timedelta('10 minutes') / 2000).total_seconds() # Invalid symbols that quandl has had in its metadata: excluded_symbols = frozenset({'TEST123456789'}) ACCESS_KEY = 'AKIAJHRHBH4FEBEGWPWA' SECRET_KEY = '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' BUCKET_NAME = 'zipline-store' @bundles.register('quandl-xjpx', create_writers=False) def quandl_xjpx_bundle(environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir): conn = connect_s3(ACCESS_KEY, SECRET_KEY) bucket = conn.get_bucket(BUCKET_NAME) file_key = bucket.get_key('quandl-xjpx.tar') bs = BytesIO() file_key.get_contents_to_file(bs) bs.seek(0) with tarfile.open('r', fileobj=bs) as tar: tar.extractall(output_dir)
apache-2.0
1,609,607,526,491,879,200
27.352941
70
0.628631
false
czardoz/hornet
hornet/common/helpers.py
1
1802
# !/usr/bin/env python # # Hornet - SSH Honeypot # # Copyright (C) 2015 Aniket Panse <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import logging import random from paramiko import RSAKey logger = logging.getLogger(__name__) def get_rsa_key_file(filename, password=None): try: key = RSAKey(filename=filename, password=password) except IOError: logger.info('RSA Key file not found, generating a new one: %s', filename) key = RSAKey.generate(1024) key.write_private_key_file(filename, password=password) return key def get_random_item(collection): if isinstance(collection, dict): all_keys = list(collection.keys()) r = random.choice(all_keys) return collection[r] elif isinstance(collection, list): return random.choice(collection) # http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size # By Fred Cicera def human_readable(num, suffix=''): for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: if abs(num) < 1024.0: return "%3.1f%s%s" % (num, unit, suffix) num /= 1024.0 return "%.1f%s%s" % (num, 'Yi', suffix)
gpl-3.0
-3,653,899,845,252,679,700
32.37037
104
0.68535
false
google/citest
tests/service_testing/http_predicate_test.py
1
2695
# Copyright 2018 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=missing-docstring """Test HttpResponsePredicate""" import unittest from citest.base import ExecutionContext from citest.service_testing import ( HttpResponsePredicate, HttpResponseType) class HttpResponsePredicateTest(unittest.TestCase): def test_response_ok(self): tests = [(200, None, HttpResponseType(http_code=200, output='OK')), (None, 'OK', HttpResponseType(http_code=200, output='OK')), (404, None, HttpResponseType(http_code=404, output='NOT_FOUND')), (200, '^(?i)not found$', HttpResponseType(http_code=200, output='NOT FOUND')), (200, '(?i)not found', HttpResponseType(http_code=200, output='NOT FOUND')), (None, '(?i)not found', HttpResponseType(http_code=404, output='file not found!'))] for want_code, want_regex, have_response in tests: context = ExecutionContext() predicate = HttpResponsePredicate( http_code=want_code, content_regex=want_regex) result = predicate(context, have_response) self.assertTrue(result) self.assertEquals(have_response, result.value) def test_response_bad(self): tests = [(201, None, HttpResponseType(http_code=200, output='OK')), (None, 'OK', HttpResponseType(http_code=200, output='BAD')), (404, None, HttpResponseType(http_code=200, output='NOT_FOUND')), (200, '^not found$', HttpResponseType(http_code=200, output='NOT FOUND')), (200, 'not found', HttpResponseType(http_code=200, output='NOT FOUND')), (200, 'not found', HttpResponseType(http_code=404, output='file not found!'))] for want_code, want_regex, have_response in tests: context = ExecutionContext() predicate = HttpResponsePredicate( http_code=want_code, content_regex=want_regex) result = predicate(context, have_response) self.assertFalse(result) self.assertEquals(have_response, result.value) if __name__ == '__main__': unittest.main()
apache-2.0
4,577,354,240,214,632,000
39.223881
78
0.660111
false
edx/edx-ora2
openassessment/assessment/serializers/base.py
1
10553
""" Serializers common to all assessment types. """ from copy import deepcopy import logging from rest_framework import serializers from rest_framework.fields import DateTimeField, IntegerField from django.core.cache import cache from openassessment.assessment.models import Assessment, AssessmentPart, Criterion, CriterionOption, Rubric logger = logging.getLogger(__name__) # pylint: disable=invalid-name class InvalidRubric(Exception): """This can be raised during the deserialization process.""" def __init__(self, errors): Exception.__init__(self, repr(errors)) self.errors = deepcopy(errors) class CriterionOptionSerializer(serializers.ModelSerializer): """Serializer for :class:`CriterionOption`""" # Django Rest Framework v3 no longer requires `PositiveIntegerField`s # to be positive by default, so we need to explicitly set the `min_value` # on the serializer field. points = IntegerField(min_value=0) class Meta: model = CriterionOption fields = ('order_num', 'points', 'name', 'label', 'explanation') class CriterionSerializer(serializers.ModelSerializer): """Serializer for :class:`Criterion`""" options = CriterionOptionSerializer(required=True, many=True) class Meta: model = Criterion fields = ('order_num', 'name', 'label', 'prompt', 'options', 'points_possible') class RubricSerializer(serializers.ModelSerializer): """Serializer for :class:`Rubric`.""" criteria = CriterionSerializer(required=True, many=True) class Meta: model = Rubric fields = ('id', 'content_hash', 'structure_hash', 'criteria', 'points_possible') def validate_criteria(self, value): """Make sure we have at least one Criterion in the Rubric.""" if not value: raise serializers.ValidationError("Must have at least one criterion") return value @classmethod def serialized_from_cache(cls, rubric, local_cache=None): """For a given `Rubric` model object, return a serialized version. This method will attempt to use the cache if possible, first looking at the `local_cache` dict you can pass in, and then looking at whatever Django cache is configured. Args: rubric (Rubric): The Rubric model to get the serialized form of. local_cach (dict): Mapping of `rubric.content_hash` to serialized rubric dictionary. We include this so that we can call this method in a loop. Returns: dict: `Rubric` fields as a dictionary, with `criteria` and `options` relations followed. """ # Optional local cache you can send in (for when you're calling this # in a loop). local_cache = local_cache or {} # Check our in-memory cache... if rubric.content_hash in local_cache: return local_cache[rubric.content_hash] # Check the external cache (e.g. memcached) rubric_dict_cache_key = ( "RubricSerializer.serialized_from_cache.{}" .format(rubric.content_hash) ) rubric_dict = cache.get(rubric_dict_cache_key) if rubric_dict: local_cache[rubric.content_hash] = rubric_dict return rubric_dict # Grab it from the database rubric_dict = RubricSerializer(rubric).data cache.set(rubric_dict_cache_key, rubric_dict) local_cache[rubric.content_hash] = rubric_dict return rubric_dict def create(self, validated_data): """ Create the rubric model, including its nested models. Args: validated_data (dict): Dictionary of validated data for the rubric, including nested Criterion and CriterionOption data. Returns: Rubric """ criteria_data = validated_data.pop("criteria") rubric = Rubric.objects.create(**validated_data) # Create each nested criterion in the rubric, linking it to the rubric for criterion_dict in criteria_data: options_data = criterion_dict.pop("options") criterion = Criterion.objects.create(rubric=rubric, **criterion_dict) # Create each option in the criterion, linking it to the criterion CriterionOption.objects.bulk_create( CriterionOption(criterion=criterion, **option_dict) for option_dict in options_data ) return rubric class AssessmentPartSerializer(serializers.ModelSerializer): """Serializer for :class:`AssessmentPart`.""" class Meta: model = AssessmentPart fields = ('option', 'criterion', 'feedback') class AssessmentSerializer(serializers.ModelSerializer): """Simplified serializer for :class:`Assessment` that's lighter on the DB.""" # Django Rest Framework v3 uses the Django setting `DATETIME_FORMAT` # when serializing datetimes. This differs from v2, which always # returned a datetime. To preserve the old behavior, we explicitly # set `format` to None. # http://www.django-rest-framework.org/api-guide/fields/#datetimefield scored_at = DateTimeField(format=None, required=False) class Meta: model = Assessment fields = ( 'submission_uuid', 'rubric', 'scored_at', 'scorer_id', 'score_type', 'feedback', ) def serialize_assessments(assessments_qset): assessments = list(assessments_qset.select_related("rubric")) rubric_cache = {} return [ full_assessment_dict( assessment, RubricSerializer.serialized_from_cache( assessment.rubric, rubric_cache ) ) for assessment in assessments ] def full_assessment_dict(assessment, rubric_dict=None): """ Return a dict representation of the Assessment model, including nested assessment parts. We do some of the serialization ourselves here instead of relying on the Django REST Framework serializers. This is for performance reasons -- we have a cached rubric easily available, and we don't want to follow all the DB relations from assessment -> assessment part -> option -> criterion. Args: assessment (Assessment): The Assessment model to serialize Returns: dict with keys 'rubric' (serialized Rubric model) and 'parts' (serialized assessment parts) """ assessment_cache_key = "assessment.full_assessment_dict.{}.{}.{}".format( assessment.id, assessment.submission_uuid, assessment.scored_at.isoformat() ) assessment_dict = cache.get(assessment_cache_key) if assessment_dict: return assessment_dict assessment_dict = AssessmentSerializer(assessment).data if not rubric_dict: rubric_dict = RubricSerializer.serialized_from_cache(assessment.rubric) assessment_dict["rubric"] = rubric_dict # This part looks a little goofy, but it's in the name of saving dozens of # SQL lookups. The rubric_dict has the entire serialized output of the # `Rubric`, its child `Criterion` and grandchild `CriterionOption`. This # includes calculated things like `points_possible` which aren't actually in # the DB model. Instead of invoking the serializers for `Criterion` and # `CriterionOption` again, we simply index into the places we expect them to # be from the big, saved `Rubric` serialization. parts = [] for part in assessment.parts.order_by('criterion__order_num').all().select_related("criterion", "option"): criterion_dict = dict(rubric_dict["criteria"][part.criterion.order_num]) options_dict = None if part.option is not None: options_dict = criterion_dict["options"][part.option.order_num] options_dict["criterion"] = criterion_dict parts.append({ "option": options_dict, "criterion": criterion_dict, "feedback": part.feedback }) # Now manually built up the dynamically calculated values on the # `Assessment` so we can again avoid DB calls. assessment_dict["parts"] = parts assessment_dict["points_earned"] = sum( part_dict["option"]["points"] if part_dict["option"] is not None else 0 for part_dict in parts ) assessment_dict["points_possible"] = rubric_dict["points_possible"] assessment_dict["id"] = assessment.id cache.set(assessment_cache_key, assessment_dict) return assessment_dict def rubric_from_dict(rubric_dict): """Given a dict of rubric information, return the corresponding Rubric This will create the Rubric and its children if it does not exist already. Sample data (one criterion, two options):: { "prompts": [{"description": "Create a plan to deliver ora2!"}], "criteria": [ { "order_num": 0, "name": "realistic", "prompt": "Is the deadline realistic?", "options": [ { "order_num": 0, "points": 0, "name": "No", "explanation": "We need more time!" }, { "order_num": 1, "points": 2, "name": "Yes", "explanation": "We got this." }, ] } ] } """ rubric_dict = deepcopy(rubric_dict) # Calculate the hash based on the rubric content... content_hash = Rubric.content_hash_from_dict(rubric_dict) try: rubric = Rubric.objects.get(content_hash=content_hash) except Rubric.DoesNotExist as ex: rubric_dict["content_hash"] = content_hash rubric_dict["structure_hash"] = Rubric.structure_hash_from_dict(rubric_dict) for crit_idx, criterion in enumerate(rubric_dict.get("criteria", {})): if "order_num" not in criterion: criterion["order_num"] = crit_idx for opt_idx, option in enumerate(criterion.get("options", {})): if "order_num" not in option: option["order_num"] = opt_idx rubric_serializer = RubricSerializer(data=rubric_dict) if not rubric_serializer.is_valid(): raise InvalidRubric(rubric_serializer.errors) from ex rubric = rubric_serializer.save() return rubric
agpl-3.0
548,526,941,250,778,600
34.531987
110
0.630721
false
jonobacon/ubuntu-accomplishments-viewer
accomplishments_viewer/PreferencesAccomplishmentsViewerDialog.py
1
4585
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*- ### BEGIN LICENSE # Copyright (C) 2012 Jono Bacon <[email protected]> # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License version 3, as published # by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranties of # MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR # PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see <http://www.gnu.org/licenses/>. ### END LICENSE # This is your preferences dialog. # # Define your preferences in # data/glib-2.0/schemas/net.launchpad.accomplishments-viewer.gschema.xml # See http://developer.gnome.org/gio/stable/GSettings.html for more info. import gettext import locale from locale import gettext as _ from accomplishments.util.paths import locale_dir locale.bindtextdomain('accomplishments-viewer', locale_dir) gettext.bindtextdomain('accomplishments-viewer', locale_dir) locale.textdomain('accomplishments-viewer') import logging logger = logging.getLogger('accomplishments_viewer') from accomplishments_viewer_lib.PreferencesDialog import PreferencesDialog class PreferencesAccomplishmentsViewerDialog(PreferencesDialog): __gtype_name__ = "PreferencesAccomplishmentsViewerDialog" def finish_initializing(self, builder): # pylint: disable=E1002 """Set up the preferences dialog""" super(PreferencesAccomplishmentsViewerDialog, self).finish_initializing(builder) self.cb_daemonsessionstart = self.builder.get_object("cb_daemonsessionstart") self.cb_hideu1bubbles = self.builder.get_object("cb_hideu1bubbles") self.pref_publish = self.builder.get_object("pref_publish") self.pref_publish_label = self.builder.get_object("pref_publish_label") self.pref_publish_icon = self.builder.get_object("pref_publish_icon") self.publishedstatus = None #trophydir = self.libaccom.get_config_value("config", "trophypath") # Bind each preference widget to gsettings #settings = Gio.Settings("net.launchpad.accomplishments-viewer") #widget = self.builder.get_object('example_entry') #settings.bind("example", widget, "text", Gio.SettingsBindFlags.DEFAULT) # Code for other initialization actions should be added here. def prepare(self, daemon_handle): self.libaccom = daemon_handle self.populate_settings() self.publishedstatus = self.libaccom.get_published_status() if self.publishedstatus == 0: self.pref_publish.set_label(_("Publish...")) else: self.pref_publish.set_label(_("Stop Publishing")) def populate_settings(self): self.cb_daemonsessionstart.handler_block_by_func(self.cb_daemonsessionstart_toggled) self.cb_hideu1bubbles.handler_block_by_func(self.cb_hideu1bubbles_toggled) self.cb_daemonsessionstart.set_active(bool(self.libaccom.get_daemon_session_start())) print self.libaccom.get_daemon_session_start() print type(self.libaccom.get_daemon_session_start()) self.cb_hideu1bubbles.set_active(self.libaccom.get_block_ubuntuone_notification_bubbles()) self.cb_daemonsessionstart.handler_unblock_by_func(self.cb_daemonsessionstart_toggled) self.cb_hideu1bubbles.handler_unblock_by_func(self.cb_hideu1bubbles_toggled) def cb_daemonsessionstart_toggled(self, widget): print widget.get_active() self.libaccom.set_daemon_session_start(widget.get_active()) def cb_hideu1bubbles_toggled(self, widget): self.libaccom.set_block_ubuntuone_notification_bubbles(widget.get_active()) def on_pref_publish_clicked(self, widget): if self.publishedstatus == 0: self.libaccom.publish_trophies_online() self.pref_publish_label.set_text(_("Please see your web browser to continue...")) self.pref_publish.set_label(_("Stop Publishing")) self.pref_publish_icon.set_visible(True) self.publishedstatus = 1 else: self.libaccom.unpublish_trophies_online() self.pref_publish_label.set_text(_("Trophies are no longer published.")) self.pref_publish.set_label(_("Publish...")) self.pref_publish_icon.set_visible(True) self.publishedstatus = 0
gpl-3.0
-5,662,133,192,924,543,000
44.39604
98
0.714722
false
5g-empower/empower-runtime
empower/managers/ranmanager/lvapp/wifiapp.py
1
2751
#!/usr/bin/env python3 # # Copyright (c) 2019 Roberto Riggio # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Base Wi-Fi App class.""" from empower_core.app import EApp import empower.managers.ranmanager.lvapp as lvapp from empower.managers.ranmanager.lvapp.resourcepool import ResourcePool EVERY = 2000 class EWiFiApp(EApp): """Base Wi-Fi App class.""" MODULES = [lvapp] def blocks(self): """Return the ResourseBlocks available to this app.""" # Initialize the Resource Pool pool = ResourcePool() # Update the pool with all the available ResourseBlocks for wtp in self.wtps.values(): for block in wtp.blocks.values(): pool.append(block) return pool @property def wtps(self): """Return the WTPs.""" return self.context.wtps @property def lvaps(self): """Return the LVAPs.""" return self.context.lvaps @property def vaps(self): """Return the VAPs.""" return self.context.vaps def handle_client_leave(self, lvap): """Called when a client leaves a network (no check on project).""" if not self.context.wifi_props: return if lvap.ssid == self.context.wifi_props.ssid: self.handle_lvap_leave(lvap) def handle_lvap_leave(self, lvap): """Called when an LVAP leaves a network.""" def handle_client_join(self, lvap): """Called when a client joins a network (no check on project).""" if not self.context.wifi_props: return if lvap.ssid == self.context.wifi_props.ssid: self.handle_lvap_join(lvap) def handle_lvap_join(self, lvap): """Called when an LVAP joins a network.""" def handle_device_down(self, wtp): """Called when a device disconnects from the controller.""" self.handle_wtp_down(wtp) def handle_wtp_down(self, wtp): """Called when a wtp disconnects to the controller.""" def handle_device_up(self, wtp): """Called when a device connects to the controller.""" self.handle_wtp_up(wtp) def handle_wtp_up(self, wtp): """Called when a wtp connects to the controller."""
apache-2.0
770,378,161,406,520,600
25.708738
74
0.641948
false
keelerm84/powerline
powerline/lib/inotify.py
1
6156
# vim:fileencoding=utf-8:noet from __future__ import unicode_literals, absolute_import __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>' __docformat__ = 'restructuredtext en' import sys import os import errno class INotifyError(Exception): pass _inotify = None def load_inotify(): ''' Initialize the inotify library ''' global _inotify if _inotify is None: if hasattr(sys, 'getwindowsversion'): # On windows abort before loading the C library. Windows has # multiple, incompatible C runtimes, and we have no way of knowing # if the one chosen by ctypes is compatible with the currently # loaded one. raise INotifyError('INotify not available on windows') if sys.platform == 'darwin': raise INotifyError('INotify not available on OS X') import ctypes if not hasattr(ctypes, 'c_ssize_t'): raise INotifyError('You need python >= 2.7 to use inotify') from ctypes.util import find_library name = find_library('c') if not name: raise INotifyError('Cannot find C library') libc = ctypes.CDLL(name, use_errno=True) for function in ("inotify_add_watch", "inotify_init1", "inotify_rm_watch"): if not hasattr(libc, function): raise INotifyError('libc is too old') # inotify_init1() prototype = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, use_errno=True) init1 = prototype(('inotify_init1', libc), ((1, "flags", 0),)) # inotify_add_watch() prototype = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, ctypes.c_char_p, ctypes.c_uint32, use_errno=True) add_watch = prototype(('inotify_add_watch', libc), ( (1, "fd"), (1, "pathname"), (1, "mask")), use_errno=True) # inotify_rm_watch() prototype = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, ctypes.c_int, use_errno=True) rm_watch = prototype(('inotify_rm_watch', libc), ( (1, "fd"), (1, "wd")), use_errno=True) # read() prototype = ctypes.CFUNCTYPE(ctypes.c_ssize_t, ctypes.c_int, ctypes.c_void_p, ctypes.c_size_t, use_errno=True) read = prototype(('read', libc), ( (1, "fd"), (1, "buf"), (1, "count")), use_errno=True) _inotify = (init1, add_watch, rm_watch, read) return _inotify class INotify(object): # See <sys/inotify.h> for the flags defined below # Supported events suitable for MASK parameter of INOTIFY_ADD_WATCH. ACCESS = 0x00000001 # File was accessed. MODIFY = 0x00000002 # File was modified. ATTRIB = 0x00000004 # Metadata changed. CLOSE_WRITE = 0x00000008 # Writtable file was closed. CLOSE_NOWRITE = 0x00000010 # Unwrittable file closed. OPEN = 0x00000020 # File was opened. MOVED_FROM = 0x00000040 # File was moved from X. MOVED_TO = 0x00000080 # File was moved to Y. CREATE = 0x00000100 # Subfile was created. DELETE = 0x00000200 # Subfile was deleted. DELETE_SELF = 0x00000400 # Self was deleted. MOVE_SELF = 0x00000800 # Self was moved. # Events sent by the kernel. UNMOUNT = 0x00002000 # Backing fs was unmounted. Q_OVERFLOW = 0x00004000 # Event queued overflowed. IGNORED = 0x00008000 # File was ignored. # Helper events. CLOSE = (CLOSE_WRITE | CLOSE_NOWRITE) # Close. MOVE = (MOVED_FROM | MOVED_TO) # Moves. # Special flags. ONLYDIR = 0x01000000 # Only watch the path if it is a directory. DONT_FOLLOW = 0x02000000 # Do not follow a sym link. EXCL_UNLINK = 0x04000000 # Exclude events on unlinked objects. MASK_ADD = 0x20000000 # Add to the mask of an already existing watch. ISDIR = 0x40000000 # Event occurred against dir. ONESHOT = 0x80000000 # Only send event once. # All events which a program can wait on. ALL_EVENTS = (ACCESS | MODIFY | ATTRIB | CLOSE_WRITE | CLOSE_NOWRITE | OPEN | MOVED_FROM | MOVED_TO | CREATE | DELETE | DELETE_SELF | MOVE_SELF) # See <bits/inotify.h> CLOEXEC = 0x80000 NONBLOCK = 0x800 def __init__(self, cloexec=True, nonblock=True): import ctypes import struct self._init1, self._add_watch, self._rm_watch, self._read = load_inotify() flags = 0 if cloexec: flags |= self.CLOEXEC if nonblock: flags |= self.NONBLOCK self._inotify_fd = self._init1(flags) if self._inotify_fd == -1: raise INotifyError(os.strerror(ctypes.get_errno())) self._buf = ctypes.create_string_buffer(5000) self.fenc = sys.getfilesystemencoding() or 'utf-8' self.hdr = struct.Struct(b'iIII') if self.fenc == 'ascii': self.fenc = 'utf-8' # We keep a reference to os to prevent it from being deleted # during interpreter shutdown, which would lead to errors in the # __del__ method self.os = os def handle_error(self): import ctypes eno = ctypes.get_errno() extra = '' if eno == errno.ENOSPC: extra = 'You may need to increase the inotify limits on your system, via /proc/sys/inotify/max_user_*' raise OSError(eno, self.os.strerror(eno) + str(extra)) def __del__(self): # This method can be called during interpreter shutdown, which means we # must do the absolute minimum here. Note that there could be running # daemon threads that are trying to call other methods on this object. try: self.os.close(self._inotify_fd) except (AttributeError, TypeError): pass def close(self): if hasattr(self, '_inotify_fd'): self.os.close(self._inotify_fd) del self.os del self._add_watch del self._rm_watch del self._inotify_fd def read(self, get_name=True): import ctypes buf = [] while True: num = self._read(self._inotify_fd, self._buf, len(self._buf)) if num == 0: break if num < 0: en = ctypes.get_errno() if en == errno.EAGAIN: break # No more data if en == errno.EINTR: continue # Interrupted, try again raise OSError(en, self.os.strerror(en)) buf.append(self._buf.raw[:num]) raw = b''.join(buf) pos = 0 lraw = len(raw) while lraw - pos >= self.hdr.size: wd, mask, cookie, name_len = self.hdr.unpack_from(raw, pos) pos += self.hdr.size name = None if get_name: name = raw[pos:pos + name_len].rstrip(b'\0').decode(self.fenc) pos += name_len self.process_event(wd, mask, cookie, name) def process_event(self, *args): raise NotImplementedError()
mit
-1,538,661,437,192,925,000
32.456522
112
0.671702
false
pixlra/HARP-fork
Various/ThirdParty/pyqtgraph/graphicsItems/PlotItem/PlotItem.py
1
48518
# -*- coding: utf-8 -*- """ PlotItem.py - Graphics item implementing a scalable ViewBox with plotting powers. Copyright 2010 Luke Campagnola Distributed under MIT/X11 license. See license.txt for more infomation. This class is one of the workhorses of pyqtgraph. It implements a graphics item with plots, labels, and scales which can be viewed inside a QGraphicsScene. If you want a widget that can be added to your GUI, see PlotWidget instead. This class is very heavily featured: - Automatically creates and manages PlotCurveItems - Fast display and update of plots - Manages zoom/pan ViewBox, scale, and label elements - Automatic scaling when data changes - Control panel with a huge feature set including averaging, decimation, display, power spectrum, svg/png export, plot linking, and more. """ from pyqtgraph.Qt import QtGui, QtCore, QtSvg, USE_PYSIDE import pyqtgraph.pixmaps if USE_PYSIDE: from .plotConfigTemplate_pyside import * else: from .plotConfigTemplate_pyqt import * import pyqtgraph.functions as fn from pyqtgraph.widgets.FileDialog import FileDialog import weakref import numpy as np import os from .. PlotDataItem import PlotDataItem from .. ViewBox import ViewBox from .. AxisItem import AxisItem from .. LabelItem import LabelItem from .. LegendItem import LegendItem from .. GraphicsWidget import GraphicsWidget from .. ButtonItem import ButtonItem from .. InfiniteLine import InfiniteLine from pyqtgraph.WidgetGroup import WidgetGroup __all__ = ['PlotItem'] try: from metaarray import * HAVE_METAARRAY = True except: HAVE_METAARRAY = False class PlotItem(GraphicsWidget): """ **Bases:** :class:`GraphicsWidget <pyqtgraph.GraphicsWidget>` Plot graphics item that can be added to any graphics scene. Implements axes, titles, and interactive viewbox. PlotItem also provides some basic analysis functionality that may be accessed from the context menu. Use :func:`plot() <pyqtgraph.PlotItem.plot>` to create a new PlotDataItem and add it to the view. Use :func:`addItem() <pyqtgraph.PlotItem.addItem>` to add any QGraphicsItem to the view. This class wraps several methods from its internal ViewBox: :func:`setXRange <pyqtgraph.ViewBox.setXRange>`, :func:`setYRange <pyqtgraph.ViewBox.setYRange>`, :func:`setRange <pyqtgraph.ViewBox.setRange>`, :func:`autoRange <pyqtgraph.ViewBox.autoRange>`, :func:`setXLink <pyqtgraph.ViewBox.setXLink>`, :func:`setYLink <pyqtgraph.ViewBox.setYLink>`, :func:`setAutoPan <pyqtgraph.ViewBox.setAutoPan>`, :func:`setAutoVisible <pyqtgraph.ViewBox.setAutoVisible>`, :func:`viewRect <pyqtgraph.ViewBox.viewRect>`, :func:`viewRange <pyqtgraph.ViewBox.viewRange>`, :func:`setMouseEnabled <pyqtgraph.ViewBox.setMouseEnabled>`, :func:`enableAutoRange <pyqtgraph.ViewBox.enableAutoRange>`, :func:`disableAutoRange <pyqtgraph.ViewBox.disableAutoRange>`, :func:`setAspectLocked <pyqtgraph.ViewBox.setAspectLocked>`, :func:`invertY <pyqtgraph.ViewBox.invertY>`, :func:`register <pyqtgraph.ViewBox.register>`, :func:`unregister <pyqtgraph.ViewBox.unregister>` The ViewBox itself can be accessed by calling :func:`getViewBox() <pyqtgraph.PlotItem.getViewBox>` ==================== ======================================================================= **Signals** sigYRangeChanged wrapped from :class:`ViewBox <pyqtgraph.ViewBox>` sigXRangeChanged wrapped from :class:`ViewBox <pyqtgraph.ViewBox>` sigRangeChanged wrapped from :class:`ViewBox <pyqtgraph.ViewBox>` ==================== ======================================================================= """ sigRangeChanged = QtCore.Signal(object, object) ## Emitted when the ViewBox range has changed sigYRangeChanged = QtCore.Signal(object, object) ## Emitted when the ViewBox Y range has changed sigXRangeChanged = QtCore.Signal(object, object) ## Emitted when the ViewBox X range has changed lastFileDir = None managers = {} def __init__(self, parent=None, name=None, labels=None, title=None, viewBox=None, axisItems=None, enableMenu=True, **kargs): """ Create a new PlotItem. All arguments are optional. Any extra keyword arguments are passed to PlotItem.plot(). ============== ========================================================================================== **Arguments** *title* Title to display at the top of the item. Html is allowed. *labels* A dictionary specifying the axis labels to display:: {'left': (args), 'bottom': (args), ...} The name of each axis and the corresponding arguments are passed to :func:`PlotItem.setLabel() <pyqtgraph.PlotItem.setLabel>` Optionally, PlotItem my also be initialized with the keyword arguments left, right, top, or bottom to achieve the same effect. *name* Registers a name for this view so that others may link to it *viewBox* If specified, the PlotItem will be constructed with this as its ViewBox. *axisItems* Optional dictionary instructing the PlotItem to use pre-constructed items for its axes. The dict keys must be axis names ('left', 'bottom', 'right', 'top') and the values must be instances of AxisItem (or at least compatible with AxisItem). ============== ========================================================================================== """ GraphicsWidget.__init__(self, parent) self.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding) ## Set up control buttons path = os.path.dirname(__file__) #self.autoImageFile = os.path.join(path, 'auto.png') #self.lockImageFile = os.path.join(path, 'lock.png') self.autoBtn = ButtonItem(pyqtgraph.pixmaps.getPixmap('auto'), 14, self) self.autoBtn.mode = 'auto' self.autoBtn.clicked.connect(self.autoBtnClicked) #self.autoBtn.hide() self.buttonsHidden = False ## whether the user has requested buttons to be hidden self.mouseHovering = False self.layout = QtGui.QGraphicsGridLayout() self.layout.setContentsMargins(1,1,1,1) self.setLayout(self.layout) self.layout.setHorizontalSpacing(0) self.layout.setVerticalSpacing(0) if viewBox is None: viewBox = ViewBox() self.vb = viewBox self.vb.sigStateChanged.connect(self.viewStateChanged) self.setMenuEnabled(enableMenu, enableMenu) ## en/disable plotitem and viewbox menus if name is not None: self.vb.register(name) self.vb.sigRangeChanged.connect(self.sigRangeChanged) self.vb.sigXRangeChanged.connect(self.sigXRangeChanged) self.vb.sigYRangeChanged.connect(self.sigYRangeChanged) self.layout.addItem(self.vb, 2, 1) self.alpha = 1.0 self.autoAlpha = True self.spectrumMode = False self.legend = None ## Create and place axis items if axisItems is None: axisItems = {} self.axes = {} for k, pos in (('top', (1,1)), ('bottom', (3,1)), ('left', (2,0)), ('right', (2,2))): axis = axisItems.get(k, AxisItem(orientation=k)) axis.linkToView(self.vb) self.axes[k] = {'item': axis, 'pos': pos} self.layout.addItem(axis, *pos) axis.setZValue(-1000) axis.setFlag(axis.ItemNegativeZStacksBehindParent) self.titleLabel = LabelItem('', size='11pt') self.layout.addItem(self.titleLabel, 0, 1) self.setTitle(None) ## hide for i in range(4): self.layout.setRowPreferredHeight(i, 0) self.layout.setRowMinimumHeight(i, 0) self.layout.setRowSpacing(i, 0) self.layout.setRowStretchFactor(i, 1) for i in range(3): self.layout.setColumnPreferredWidth(i, 0) self.layout.setColumnMinimumWidth(i, 0) self.layout.setColumnSpacing(i, 0) self.layout.setColumnStretchFactor(i, 1) self.layout.setRowStretchFactor(2, 100) self.layout.setColumnStretchFactor(1, 100) ## Wrap a few methods from viewBox for m in [ 'setXRange', 'setYRange', 'setXLink', 'setYLink', 'setAutoPan', 'setAutoVisible', 'setRange', 'autoRange', 'viewRect', 'viewRange', 'setMouseEnabled', 'enableAutoRange', 'disableAutoRange', 'setAspectLocked', 'invertY', 'register', 'unregister']: ## NOTE: If you update this list, please update the class docstring as well. setattr(self, m, getattr(self.vb, m)) self.items = [] self.curves = [] self.itemMeta = weakref.WeakKeyDictionary() self.dataItems = [] self.paramList = {} self.avgCurves = {} ### Set up context menu w = QtGui.QWidget() self.ctrl = c = Ui_Form() c.setupUi(w) dv = QtGui.QDoubleValidator(self) menuItems = [ ('Transforms', c.transformGroup), ('Downsample', c.decimateGroup), ('Average', c.averageGroup), ('Alpha', c.alphaGroup), ('Grid', c.gridGroup), ('Points', c.pointsGroup), ] self.ctrlMenu = QtGui.QMenu() self.ctrlMenu.setTitle('Plot Options') self.subMenus = [] for name, grp in menuItems: sm = QtGui.QMenu(name) act = QtGui.QWidgetAction(self) act.setDefaultWidget(grp) sm.addAction(act) self.subMenus.append(sm) self.ctrlMenu.addMenu(sm) self.stateGroup = WidgetGroup() for name, w in menuItems: self.stateGroup.autoAdd(w) self.fileDialog = None c.alphaGroup.toggled.connect(self.updateAlpha) c.alphaSlider.valueChanged.connect(self.updateAlpha) c.autoAlphaCheck.toggled.connect(self.updateAlpha) c.xGridCheck.toggled.connect(self.updateGrid) c.yGridCheck.toggled.connect(self.updateGrid) c.gridAlphaSlider.valueChanged.connect(self.updateGrid) c.fftCheck.toggled.connect(self.updateSpectrumMode) c.logXCheck.toggled.connect(self.updateLogMode) c.logYCheck.toggled.connect(self.updateLogMode) c.downsampleSpin.valueChanged.connect(self.updateDownsampling) c.downsampleCheck.toggled.connect(self.updateDownsampling) c.autoDownsampleCheck.toggled.connect(self.updateDownsampling) c.subsampleRadio.toggled.connect(self.updateDownsampling) c.meanRadio.toggled.connect(self.updateDownsampling) c.clipToViewCheck.toggled.connect(self.updateDownsampling) self.ctrl.avgParamList.itemClicked.connect(self.avgParamListClicked) self.ctrl.averageGroup.toggled.connect(self.avgToggled) self.ctrl.maxTracesCheck.toggled.connect(self.updateDecimation) self.ctrl.maxTracesSpin.valueChanged.connect(self.updateDecimation) self.hideAxis('right') self.hideAxis('top') self.showAxis('left') self.showAxis('bottom') if labels is None: labels = {} for label in list(self.axes.keys()): if label in kargs: labels[label] = kargs[label] del kargs[label] for k in labels: if isinstance(labels[k], basestring): labels[k] = (labels[k],) self.setLabel(k, *labels[k]) if title is not None: self.setTitle(title) if len(kargs) > 0: self.plot(**kargs) def implements(self, interface=None): return interface in ['ViewBoxWrapper'] def getViewBox(self): """Return the :class:`ViewBox <pyqtgraph.ViewBox>` contained within.""" return self.vb def setLogMode(self, x=None, y=None): """ Set log scaling for x and/or y axes. This informs PlotDataItems to transform logarithmically and switches the axes to use log ticking. Note that *no other items* in the scene will be affected by this; there is (currently) no generic way to redisplay a GraphicsItem with log coordinates. """ if x is not None: self.ctrl.logXCheck.setChecked(x) if y is not None: self.ctrl.logYCheck.setChecked(y) def showGrid(self, x=None, y=None, alpha=None): """ Show or hide the grid for either axis. ============== ===================================== **Arguments:** x (bool) Whether to show the X grid y (bool) Whether to show the Y grid alpha (0.0-1.0) Opacity of the grid ============== ===================================== """ if x is None and y is None and alpha is None: raise Exception("Must specify at least one of x, y, or alpha.") ## prevent people getting confused if they just call showGrid() if x is not None: self.ctrl.xGridCheck.setChecked(x) if y is not None: self.ctrl.yGridCheck.setChecked(y) if alpha is not None: v = np.clip(alpha, 0, 1)*self.ctrl.gridAlphaSlider.maximum() self.ctrl.gridAlphaSlider.setValue(v) #def paint(self, *args): #prof = debug.Profiler('PlotItem.paint', disabled=True) #QtGui.QGraphicsWidget.paint(self, *args) #prof.finish() ## bad idea. #def __getattr__(self, attr): ## wrap ms #return getattr(self.vb, attr) def close(self): #print "delete", self ## Most of this crap is needed to avoid PySide trouble. ## The problem seems to be whenever scene.clear() leads to deletion of widgets (either through proxies or qgraphicswidgets) ## the solution is to manually remove all widgets before scene.clear() is called if self.ctrlMenu is None: ## already shut down return self.ctrlMenu.setParent(None) self.ctrlMenu = None #self.ctrlBtn.setParent(None) #self.ctrlBtn = None #self.autoBtn.setParent(None) #self.autoBtn = None for k in self.axes: i = self.axes[k]['item'] i.close() self.axes = None self.scene().removeItem(self.vb) self.vb = None ## causes invalid index errors: #for i in range(self.layout.count()): #self.layout.removeAt(i) #for p in self.proxies: #try: #p.setWidget(None) #except RuntimeError: #break #self.scene().removeItem(p) #self.proxies = [] #self.menuAction.releaseWidget(self.menuAction.defaultWidget()) #self.menuAction.setParent(None) #self.menuAction = None #if self.manager is not None: #self.manager.sigWidgetListChanged.disconnect(self.updatePlotList) #self.manager.removeWidget(self.name) #else: #print "no manager" def registerPlot(self, name): ## for backward compatibility self.vb.register(name) def updateGrid(self, *args): alpha = self.ctrl.gridAlphaSlider.value() x = alpha if self.ctrl.xGridCheck.isChecked() else False y = alpha if self.ctrl.yGridCheck.isChecked() else False self.getAxis('top').setGrid(x) self.getAxis('bottom').setGrid(x) self.getAxis('left').setGrid(y) self.getAxis('right').setGrid(y) def viewGeometry(self): """Return the screen geometry of the viewbox""" v = self.scene().views()[0] b = self.vb.mapRectToScene(self.vb.boundingRect()) wr = v.mapFromScene(b).boundingRect() pos = v.mapToGlobal(v.pos()) wr.adjust(pos.x(), pos.y(), pos.x(), pos.y()) return wr def avgToggled(self, b): if b: self.recomputeAverages() for k in self.avgCurves: self.avgCurves[k][1].setVisible(b) def avgParamListClicked(self, item): name = str(item.text()) self.paramList[name] = (item.checkState() == QtCore.Qt.Checked) self.recomputeAverages() def recomputeAverages(self): if not self.ctrl.averageGroup.isChecked(): return for k in self.avgCurves: self.removeItem(self.avgCurves[k][1]) self.avgCurves = {} for c in self.curves: self.addAvgCurve(c) self.replot() def addAvgCurve(self, curve): ## Add a single curve into the pool of curves averaged together ## If there are plot parameters, then we need to determine which to average together. remKeys = [] addKeys = [] if self.ctrl.avgParamList.count() > 0: ### First determine the key of the curve to which this new data should be averaged for i in range(self.ctrl.avgParamList.count()): item = self.ctrl.avgParamList.item(i) if item.checkState() == QtCore.Qt.Checked: remKeys.append(str(item.text())) else: addKeys.append(str(item.text())) if len(remKeys) < 1: ## In this case, there would be 1 average plot for each data plot; not useful. return p = self.itemMeta.get(curve,{}).copy() for k in p: if type(k) is tuple: p['.'.join(k)] = p[k] del p[k] for rk in remKeys: if rk in p: del p[rk] for ak in addKeys: if ak not in p: p[ak] = None key = tuple(p.items()) ### Create a new curve if needed if key not in self.avgCurves: plot = PlotDataItem() plot.setPen(fn.mkPen([0, 200, 0])) plot.setShadowPen(fn.mkPen([0, 0, 0, 100], width=3)) plot.setAlpha(1.0, False) plot.setZValue(100) self.addItem(plot, skipAverage=True) self.avgCurves[key] = [0, plot] self.avgCurves[key][0] += 1 (n, plot) = self.avgCurves[key] ### Average data together (x, y) = curve.getData() if plot.yData is not None: newData = plot.yData * (n-1) / float(n) + y * 1.0 / float(n) plot.setData(plot.xData, newData) else: plot.setData(x, y) def autoBtnClicked(self): if self.autoBtn.mode == 'auto': self.enableAutoRange() self.autoBtn.hide() else: self.disableAutoRange() def viewStateChanged(self): self.updateButtons() def enableAutoScale(self): """ Enable auto-scaling. The plot will continuously scale to fit the boundaries of its data. """ print("Warning: enableAutoScale is deprecated. Use enableAutoRange(axis, enable) instead.") self.vb.enableAutoRange(self.vb.XYAxes) def addItem(self, item, *args, **kargs): """ Add a graphics item to the view box. If the item has plot data (PlotDataItem, PlotCurveItem, ScatterPlotItem), it may be included in analysis performed by the PlotItem. """ self.items.append(item) vbargs = {} if 'ignoreBounds' in kargs: vbargs['ignoreBounds'] = kargs['ignoreBounds'] self.vb.addItem(item, *args, **vbargs) if hasattr(item, 'implements') and item.implements('plotData'): self.dataItems.append(item) #self.plotChanged() params = kargs.get('params', {}) self.itemMeta[item] = params #item.setMeta(params) self.curves.append(item) #self.addItem(c) if hasattr(item, 'setLogMode'): item.setLogMode(self.ctrl.logXCheck.isChecked(), self.ctrl.logYCheck.isChecked()) if isinstance(item, PlotDataItem): ## configure curve for this plot (alpha, auto) = self.alphaState() item.setAlpha(alpha, auto) item.setFftMode(self.ctrl.fftCheck.isChecked()) item.setDownsampling(*self.downsampleMode()) item.setClipToView(self.clipToViewMode()) item.setPointMode(self.pointMode()) ## Hide older plots if needed self.updateDecimation() ## Add to average if needed self.updateParamList() if self.ctrl.averageGroup.isChecked() and 'skipAverage' not in kargs: self.addAvgCurve(item) #c.connect(c, QtCore.SIGNAL('plotChanged'), self.plotChanged) #item.sigPlotChanged.connect(self.plotChanged) #self.plotChanged() name = kargs.get('name', getattr(item, 'opts', {}).get('name', None)) if name is not None and hasattr(self, 'legend') and self.legend is not None: self.legend.addItem(item, name=name) def addDataItem(self, item, *args): print("PlotItem.addDataItem is deprecated. Use addItem instead.") self.addItem(item, *args) def listDataItems(self): """Return a list of all data items (PlotDataItem, PlotCurveItem, ScatterPlotItem, etc) contained in this PlotItem.""" return self.dataItems[:] def addCurve(self, c, params=None): print("PlotItem.addCurve is deprecated. Use addItem instead.") self.addItem(c, params) def addLine(self, x=None, y=None, z=None, **kwds): """ Create an InfiniteLine and add to the plot. If *x* is specified, the line will be vertical. If *y* is specified, the line will be horizontal. All extra keyword arguments are passed to :func:`InfiniteLine.__init__() <pyqtgraph.InfiniteLine.__init__>`. Returns the item created. """ pos = kwds.get('pos', x if x is not None else y) angle = kwds.get('angle', 0 if x is None else 90) line = InfiniteLine(pos, angle, **kwds) self.addItem(line) if z is not None: line.setZValue(z) return line def removeItem(self, item): """ Remove an item from the internal ViewBox. """ if not item in self.items: return self.items.remove(item) if item in self.dataItems: self.dataItems.remove(item) if item.scene() is not None: self.vb.removeItem(item) if item in self.curves: self.curves.remove(item) self.updateDecimation() self.updateParamList() #item.connect(item, QtCore.SIGNAL('plotChanged'), self.plotChanged) #item.sigPlotChanged.connect(self.plotChanged) def clear(self): """ Remove all items from the ViewBox. """ for i in self.items[:]: self.removeItem(i) self.avgCurves = {} def clearPlots(self): for i in self.curves[:]: self.removeItem(i) self.avgCurves = {} def plot(self, *args, **kargs): """ Add and return a new plot. See :func:`PlotDataItem.__init__ <pyqtgraph.PlotDataItem.__init__>` for data arguments Extra allowed arguments are: clear - clear all plots before displaying new data params - meta-parameters to associate with this data """ clear = kargs.get('clear', False) params = kargs.get('params', None) if clear: self.clear() item = PlotDataItem(*args, **kargs) if params is None: params = {} self.addItem(item, params=params) return item def addLegend(self, size=None, offset=(30, 30)): """ Create a new LegendItem and anchor it over the internal ViewBox. Plots will be automatically displayed in the legend if they are created with the 'name' argument. """ self.legend = LegendItem(size, offset) self.legend.setParentItem(self.vb) return self.legend def scatterPlot(self, *args, **kargs): if 'pen' in kargs: kargs['symbolPen'] = kargs['pen'] kargs['pen'] = None if 'brush' in kargs: kargs['symbolBrush'] = kargs['brush'] del kargs['brush'] if 'size' in kargs: kargs['symbolSize'] = kargs['size'] del kargs['size'] return self.plot(*args, **kargs) def replot(self): self.update() def updateParamList(self): self.ctrl.avgParamList.clear() ## Check to see that each parameter for each curve is present in the list for c in self.curves: for p in list(self.itemMeta.get(c, {}).keys()): if type(p) is tuple: p = '.'.join(p) ## If the parameter is not in the list, add it. matches = self.ctrl.avgParamList.findItems(p, QtCore.Qt.MatchExactly) if len(matches) == 0: i = QtGui.QListWidgetItem(p) if p in self.paramList and self.paramList[p] is True: i.setCheckState(QtCore.Qt.Checked) else: i.setCheckState(QtCore.Qt.Unchecked) self.ctrl.avgParamList.addItem(i) else: i = matches[0] self.paramList[p] = (i.checkState() == QtCore.Qt.Checked) ## Qt's SVG-writing capabilities are pretty terrible. def writeSvgCurves(self, fileName=None): if fileName is None: self.fileDialog = FileDialog() if PlotItem.lastFileDir is not None: self.fileDialog.setDirectory(PlotItem.lastFileDir) self.fileDialog.setFileMode(QtGui.QFileDialog.AnyFile) self.fileDialog.setAcceptMode(QtGui.QFileDialog.AcceptSave) self.fileDialog.show() self.fileDialog.fileSelected.connect(self.writeSvg) return #if fileName is None: #fileName = QtGui.QFileDialog.getSaveFileName() if isinstance(fileName, tuple): raise Exception("Not implemented yet..") fileName = str(fileName) PlotItem.lastFileDir = os.path.dirname(fileName) rect = self.vb.viewRect() xRange = rect.left(), rect.right() svg = "" fh = open(fileName, 'w') dx = max(rect.right(),0) - min(rect.left(),0) ymn = min(rect.top(), rect.bottom()) ymx = max(rect.top(), rect.bottom()) dy = max(ymx,0) - min(ymn,0) sx = 1. sy = 1. while dx*sx < 10: sx *= 1000 while dy*sy < 10: sy *= 1000 sy *= -1 #fh.write('<svg viewBox="%f %f %f %f">\n' % (rect.left()*sx, rect.top()*sx, rect.width()*sy, rect.height()*sy)) fh.write('<svg>\n') fh.write('<path fill="none" stroke="#000000" stroke-opacity="0.5" stroke-width="1" d="M%f,0 L%f,0"/>\n' % (rect.left()*sx, rect.right()*sx)) fh.write('<path fill="none" stroke="#000000" stroke-opacity="0.5" stroke-width="1" d="M0,%f L0,%f"/>\n' % (rect.top()*sy, rect.bottom()*sy)) for item in self.curves: if isinstance(item, PlotCurveItem): color = fn.colorStr(item.pen.color()) opacity = item.pen.color().alpha() / 255. color = color[:6] x, y = item.getData() mask = (x > xRange[0]) * (x < xRange[1]) mask[:-1] += mask[1:] m2 = mask.copy() mask[1:] += m2[:-1] x = x[mask] y = y[mask] x *= sx y *= sy #fh.write('<g fill="none" stroke="#%s" stroke-opacity="1" stroke-width="1">\n' % color) fh.write('<path fill="none" stroke="#%s" stroke-opacity="%f" stroke-width="1" d="M%f,%f ' % (color, opacity, x[0], y[0])) for i in range(1, len(x)): fh.write('L%f,%f ' % (x[i], y[i])) fh.write('"/>') #fh.write("</g>") for item in self.dataItems: if isinstance(item, ScatterPlotItem): pRect = item.boundingRect() vRect = pRect.intersected(rect) for point in item.points(): pos = point.pos() if not rect.contains(pos): continue color = fn.colorStr(point.brush.color()) opacity = point.brush.color().alpha() / 255. color = color[:6] x = pos.x() * sx y = pos.y() * sy fh.write('<circle cx="%f" cy="%f" r="1" fill="#%s" stroke="none" fill-opacity="%f"/>\n' % (x, y, color, opacity)) #fh.write('<path fill="none" stroke="#%s" stroke-opacity="%f" stroke-width="1" d="M%f,%f ' % (color, opacity, x[0], y[0])) #for i in xrange(1, len(x)): #fh.write('L%f,%f ' % (x[i], y[i])) #fh.write('"/>') ## get list of curves, scatter plots fh.write("</svg>\n") def writeSvg(self, fileName=None): if fileName is None: fileName = QtGui.QFileDialog.getSaveFileName() fileName = str(fileName) PlotItem.lastFileDir = os.path.dirname(fileName) self.svg = QtSvg.QSvgGenerator() self.svg.setFileName(fileName) res = 120. view = self.scene().views()[0] bounds = view.viewport().rect() bounds = QtCore.QRectF(0, 0, bounds.width(), bounds.height()) self.svg.setResolution(res) self.svg.setViewBox(bounds) self.svg.setSize(QtCore.QSize(bounds.width(), bounds.height())) painter = QtGui.QPainter(self.svg) view.render(painter, bounds) painter.end() ## Workaround to set pen widths correctly import re data = open(fileName).readlines() for i in range(len(data)): line = data[i] m = re.match(r'(<g .*)stroke-width="1"(.*transform="matrix\(([^\)]+)\)".*)', line) if m is not None: #print "Matched group:", line g = m.groups() matrix = list(map(float, g[2].split(','))) #print "matrix:", matrix scale = max(abs(matrix[0]), abs(matrix[3])) if scale == 0 or scale == 1.0: continue data[i] = g[0] + ' stroke-width="%0.2g" ' % (1.0/scale) + g[1] + '\n' #print "old line:", line #print "new line:", data[i] open(fileName, 'w').write(''.join(data)) def writeImage(self, fileName=None): if fileName is None: self.fileDialog = FileDialog() if PlotItem.lastFileDir is not None: self.fileDialog.setDirectory(PlotItem.lastFileDir) self.fileDialog.setFileMode(QtGui.QFileDialog.AnyFile) self.fileDialog.setAcceptMode(QtGui.QFileDialog.AcceptSave) self.fileDialog.show() self.fileDialog.fileSelected.connect(self.writeImage) return #if fileName is None: #fileName = QtGui.QFileDialog.getSaveFileName() if isinstance(fileName, tuple): raise Exception("Not implemented yet..") fileName = str(fileName) PlotItem.lastFileDir = os.path.dirname(fileName) self.png = QtGui.QImage(int(self.size().width()), int(self.size().height()), QtGui.QImage.Format_ARGB32) painter = QtGui.QPainter(self.png) painter.setRenderHints(painter.Antialiasing | painter.TextAntialiasing) self.scene().render(painter, QtCore.QRectF(), self.mapRectToScene(self.boundingRect())) painter.end() self.png.save(fileName) def writeCsv(self, fileName=None): if fileName is None: self.fileDialog = FileDialog() if PlotItem.lastFileDir is not None: self.fileDialog.setDirectory(PlotItem.lastFileDir) self.fileDialog.setFileMode(QtGui.QFileDialog.AnyFile) self.fileDialog.setAcceptMode(QtGui.QFileDialog.AcceptSave) self.fileDialog.show() self.fileDialog.fileSelected.connect(self.writeCsv) return #if fileName is None: #fileName = QtGui.QFileDialog.getSaveFileName() fileName = str(fileName) PlotItem.lastFileDir = os.path.dirname(fileName) fd = open(fileName, 'w') data = [c.getData() for c in self.curves] i = 0 while True: done = True for d in data: if i < len(d[0]): fd.write('%g,%g,'%(d[0][i], d[1][i])) done = False else: fd.write(' , ,') fd.write('\n') if done: break i += 1 fd.close() def saveState(self): state = self.stateGroup.state() state['paramList'] = self.paramList.copy() state['view'] = self.vb.getState() return state def restoreState(self, state): if 'paramList' in state: self.paramList = state['paramList'].copy() self.stateGroup.setState(state) self.updateSpectrumMode() self.updateDownsampling() self.updateAlpha() self.updateDecimation() if 'powerSpectrumGroup' in state: state['fftCheck'] = state['powerSpectrumGroup'] if 'gridGroup' in state: state['xGridCheck'] = state['gridGroup'] state['yGridCheck'] = state['gridGroup'] self.stateGroup.setState(state) self.updateParamList() if 'view' not in state: r = [[float(state['xMinText']), float(state['xMaxText'])], [float(state['yMinText']), float(state['yMaxText'])]] state['view'] = { 'autoRange': [state['xAutoRadio'], state['yAutoRadio']], 'linkedViews': [state['xLinkCombo'], state['yLinkCombo']], 'targetRange': r, 'viewRange': r, } self.vb.setState(state['view']) def widgetGroupInterface(self): return (None, PlotItem.saveState, PlotItem.restoreState) def updateSpectrumMode(self, b=None): if b is None: b = self.ctrl.fftCheck.isChecked() for c in self.curves: c.setFftMode(b) self.enableAutoRange() self.recomputeAverages() def updateLogMode(self): x = self.ctrl.logXCheck.isChecked() y = self.ctrl.logYCheck.isChecked() for i in self.items: if hasattr(i, 'setLogMode'): i.setLogMode(x,y) self.getAxis('bottom').setLogMode(x) self.getAxis('top').setLogMode(x) self.getAxis('left').setLogMode(y) self.getAxis('right').setLogMode(y) self.enableAutoRange() self.recomputeAverages() def setDownsampling(self, ds=None, auto=None, mode=None): """Change the default downsampling mode for all PlotDataItems managed by this plot. =========== ================================================================= Arguments ds (int) Reduce visible plot samples by this factor, or (bool) To enable/disable downsampling without changing the value. auto (bool) If True, automatically pick *ds* based on visible range mode 'subsample': Downsample by taking the first of N samples. This method is fastest and least accurate. 'mean': Downsample by taking the mean of N samples. 'peak': Downsample by drawing a saw wave that follows the min and max of the original data. This method produces the best visual representation of the data but is slower. =========== ================================================================= """ if ds is not None: if ds is False: self.ctrl.downsampleCheck.setChecked(False) elif ds is True: self.ctrl.downsampleCheck.setChecked(True) else: self.ctrl.downsampleCheck.setChecked(True) self.ctrl.downsampleSpin.setValue(ds) if auto is not None: if auto and ds is not False: self.ctrl.downsampleCheck.setChecked(True) self.ctrl.autoDownsampleCheck.setChecked(auto) if mode is not None: if mode == 'subsample': self.ctrl.subsampleRadio.setChecked(True) elif mode == 'mean': self.ctrl.meanRadio.setChecked(True) elif mode == 'peak': self.ctrl.peakRadio.setChecked(True) else: raise ValueError("mode argument must be 'subsample', 'mean', or 'peak'.") def updateDownsampling(self): ds, auto, method = self.downsampleMode() clip = self.ctrl.clipToViewCheck.isChecked() for c in self.curves: c.setDownsampling(ds, auto, method) c.setClipToView(clip) self.recomputeAverages() def downsampleMode(self): if self.ctrl.downsampleCheck.isChecked(): ds = self.ctrl.downsampleSpin.value() else: ds = 1 auto = self.ctrl.downsampleCheck.isChecked() and self.ctrl.autoDownsampleCheck.isChecked() if self.ctrl.subsampleRadio.isChecked(): method = 'subsample' elif self.ctrl.meanRadio.isChecked(): method = 'mean' elif self.ctrl.peakRadio.isChecked(): method = 'peak' return ds, auto, method def setClipToView(self, clip): """Set the default clip-to-view mode for all PlotDataItems managed by this plot. If *clip* is True, then PlotDataItems will attempt to draw only points within the visible range of the ViewBox.""" self.ctrl.clipToViewCheck.setChecked(clip) def clipToViewMode(self): return self.ctrl.clipToViewCheck.isChecked() def updateDecimation(self): if self.ctrl.maxTracesCheck.isChecked(): numCurves = self.ctrl.maxTracesSpin.value() else: numCurves = -1 curves = self.curves[:] split = len(curves) - numCurves for i in range(len(curves)): if numCurves == -1 or i >= split: curves[i].show() else: if self.ctrl.forgetTracesCheck.isChecked(): curves[i].clear() self.removeItem(curves[i]) else: curves[i].hide() def updateAlpha(self, *args): (alpha, auto) = self.alphaState() for c in self.curves: c.setAlpha(alpha**2, auto) def alphaState(self): enabled = self.ctrl.alphaGroup.isChecked() auto = self.ctrl.autoAlphaCheck.isChecked() alpha = float(self.ctrl.alphaSlider.value()) / self.ctrl.alphaSlider.maximum() if auto: alpha = 1.0 ## should be 1/number of overlapping plots if not enabled: auto = False alpha = 1.0 return (alpha, auto) def pointMode(self): if self.ctrl.pointsGroup.isChecked(): if self.ctrl.autoPointsCheck.isChecked(): mode = None else: mode = True else: mode = False return mode def resizeEvent(self, ev): if self.autoBtn is None: ## already closed down return btnRect = self.mapRectFromItem(self.autoBtn, self.autoBtn.boundingRect()) y = self.size().height() - btnRect.height() self.autoBtn.setPos(0, y) def getMenu(self): return self.ctrlMenu def getContextMenus(self, event): ## called when another item is displaying its context menu; we get to add extras to the end of the menu. if self.menuEnabled(): return self.ctrlMenu else: return None def setMenuEnabled(self, enableMenu=True, enableViewBoxMenu='same'): """ Enable or disable the context menu for this PlotItem. By default, the ViewBox's context menu will also be affected. (use enableViewBoxMenu=None to leave the ViewBox unchanged) """ self._menuEnabled = enableMenu if enableViewBoxMenu is None: return if enableViewBoxMenu is 'same': enableViewBoxMenu = enableMenu self.vb.setMenuEnabled(enableViewBoxMenu) def menuEnabled(self): return self._menuEnabled def hoverEvent(self, ev): if ev.enter: self.mouseHovering = True if ev.exit: self.mouseHovering = False self.updateButtons() def getLabel(self, key): pass def _checkScaleKey(self, key): if key not in self.axes: raise Exception("Scale '%s' not found. Scales are: %s" % (key, str(list(self.axes.keys())))) def getScale(self, key): return self.getAxis(key) def getAxis(self, name): """Return the specified AxisItem. *name* should be 'left', 'bottom', 'top', or 'right'.""" self._checkScaleKey(name) return self.axes[name]['item'] def setLabel(self, axis, text=None, units=None, unitPrefix=None, **args): """ Set the label for an axis. Basic HTML formatting is allowed. ============= ================================================================= **Arguments** axis must be one of 'left', 'bottom', 'right', or 'top' text text to display along the axis. HTML allowed. units units to display after the title. If units are given, then an SI prefix will be automatically appended and the axis values will be scaled accordingly. (ie, use 'V' instead of 'mV'; 'm' will be added automatically) ============= ================================================================= """ self.getAxis(axis).setLabel(text=text, units=units, **args) self.showAxis(axis) def setLabels(self, **kwds): """ Convenience function allowing multiple labels and/or title to be set in one call. Keyword arguments can be 'title', 'left', 'bottom', 'right', or 'top'. Values may be strings or a tuple of arguments to pass to setLabel. """ for k,v in kwds.items(): if k == 'title': self.setTitle(v) else: if isinstance(v, basestring): v = (v,) self.setLabel(k, *v) def showLabel(self, axis, show=True): """ Show or hide one of the plot's axis labels (the axis itself will be unaffected). axis must be one of 'left', 'bottom', 'right', or 'top' """ self.getScale(axis).showLabel(show) def setTitle(self, title=None, **args): """ Set the title of the plot. Basic HTML formatting is allowed. If title is None, then the title will be hidden. """ if title is None: self.titleLabel.setVisible(False) self.layout.setRowFixedHeight(0, 0) self.titleLabel.setMaximumHeight(0) else: self.titleLabel.setMaximumHeight(30) self.layout.setRowFixedHeight(0, 30) self.titleLabel.setVisible(True) self.titleLabel.setText(title, **args) def showAxis(self, axis, show=True): """ Show or hide one of the plot's axes. axis must be one of 'left', 'bottom', 'right', or 'top' """ s = self.getScale(axis) p = self.axes[axis]['pos'] if show: s.show() else: s.hide() def hideAxis(self, axis): """Hide one of the PlotItem's axes. ('left', 'bottom', 'right', or 'top')""" self.showAxis(axis, False) def showScale(self, *args, **kargs): print("Deprecated. use showAxis() instead") return self.showAxis(*args, **kargs) def hideButtons(self): """Causes auto-scale button ('A' in lower-left corner) to be hidden for this PlotItem""" #self.ctrlBtn.hide() self.buttonsHidden = True self.updateButtons() def showButtons(self): """Causes auto-scale button ('A' in lower-left corner) to be visible for this PlotItem""" #self.ctrlBtn.hide() self.buttonsHidden = False self.updateButtons() def updateButtons(self): if self._exportOpts is False and self.mouseHovering and not self.buttonsHidden and not all(self.vb.autoRangeEnabled()): self.autoBtn.show() else: self.autoBtn.hide() def _plotArray(self, arr, x=None, **kargs): if arr.ndim != 1: raise Exception("Array must be 1D to plot (shape is %s)" % arr.shape) if x is None: x = np.arange(arr.shape[0]) if x.ndim != 1: raise Exception("X array must be 1D to plot (shape is %s)" % x.shape) c = PlotCurveItem(arr, x=x, **kargs) return c def _plotMetaArray(self, arr, x=None, autoLabel=True, **kargs): inf = arr.infoCopy() if arr.ndim != 1: raise Exception('can only automatically plot 1 dimensional arrays.') ## create curve try: xv = arr.xvals(0) except: if x is None: xv = np.arange(arr.shape[0]) else: xv = x c = PlotCurveItem(**kargs) c.setData(x=xv, y=arr.view(np.ndarray)) if autoLabel: name = arr._info[0].get('name', None) units = arr._info[0].get('units', None) self.setLabel('bottom', text=name, units=units) name = arr._info[1].get('name', None) units = arr._info[1].get('units', None) self.setLabel('left', text=name, units=units) return c def setExportMode(self, export, opts=None): GraphicsWidget.setExportMode(self, export, opts) self.updateButtons() #if export: #self.autoBtn.hide() #else: #self.autoBtn.show()
gpl-3.0
-2,996,013,384,908,142,600
37.173092
148
0.549075
false
gbour/wave
tests/testsuite/036_acl.py
1
5424
# -*- coding: UTF8 -*- import os import types import tempfile from lib import env from lib.env import debug from lib.erl import application as app, auth, acl from TestSuite import * from mqttcli import MqttClient from nyamuk.event import * from twisted.internet import defer class Acl(TestSuite): def __init__(self): TestSuite.__init__(self, "Acl") @defer.inlineCallbacks def setup_suite(self): ## configuring auth (fd, auth_file) = tempfile.mkstemp(prefix='wave-auth-'); os.close(fd) print "auth file:", auth_file with open(auth_file, 'w') as f: f.write( """ctrl:$2a$12$4xhMVs/zgy6T/GZobBAdc.bpbL2yaXnckX5YE9z5abEnGzsSaIeGq foo:$2a$12$EwUNtApVj6j2z9VQlMf98O8Xc.650HdRFK6Rr4sVG6bc/tdjjgXOW """) yield app.set_auth(required=True, filename=auth_file) yield auth.switch(auth_file) ## configuring acls (fd, acl_file) = tempfile.mkstemp(prefix='wave-acl-'); os.close(fd) print "acl file:", acl_file with open(acl_file, 'w') as f: f.write(""" # testsuite acl file ctrl\tallow\tr\ttest/# anonymous\tallow\tr\ttest/anonymous/sub/1 anonymous\tallow\tr\ttest/anonymous/sub/2/+ anonymous\tallow\tw\ttest/anonymous/pub/1 anonymous\tallow\tw\ttest/anonymous/pub/2/# foo\tallow\tr\ttest/foo/sub/1 foo\tallow\tr\ttest/foo/sub/2/+ foo\tallow\tw\ttest/foo/pub/1 foo\tallow\tw\ttest/foo/pub/2/# """) users = { 'anonymous': {'user': None , 'password': None}, 'foo': {'user': 'foo', 'password': 'bar'}, } i = 1 for user in sorted(users.keys()): @defer.inlineCallbacks def _init1(self): yield app.set_acl(enabled=False) defer.returnValue(self._t_check(client=user, acl=False, **users[user])) setattr(self, "test_{0:03}".format(i), types.MethodType(catch(desc( "user '{0}', no acl".format(user))( _init1)), self)) i += 1 @defer.inlineCallbacks def _init2(self): yield app.set_acl(enabled=True, filename=acl_file); yield acl.switch(acl_file) defer.returnValue(self._t_check(client=user, acl=True, **users[user])) setattr(self, "test_{0:03}".format(i), types.MethodType(catch(desc( "user '{0}', acls enabled".format(user))( _init2)), self)) i += 1 @defer.inlineCallbacks def cleanup_suite(self): yield app.set_auth(required=False) yield app.set_acl(enabled=False) # @defer.inlineCallbacks def _t_check(self, client, acl, user, password): ctrl = MqttClient("ctrl:{seq}", connect=4, username='ctrl', password='ctrl') c = MqttClient("client:{seq}", connect=4, username=user, password=password) ctrl.subscribe("test/#", qos=0) ## subscribe # MUST FAIL when acl on ret = c.subscribe("test/{0}/sub/0".format(client), qos=0) if not isinstance(ret, EventSuback): debug("{0}, acl {1}: {2}".format(client, acl, ret)) return False if acl and ret.granted_qos != [0x80] or\ not acl and ret.granted_qos != [0]: debug("{0}, acl {1}: {2}".format(client, acl, ret)) return False ## publish # NOTE: publish never reports failure or success topic = "test/{0}/pub/0".format(client); msg = env.gen_msg(10) c.publish(topic, msg) e = ctrl.recv() if acl and e != None: debug("{0}, acl {1}: {2}".format(client, acl, e)) return False elif not acl and (not isinstance(e, EventPublish) or\ e.msg.topic != topic or\ e.msg.payload != msg): debug("{0}, acl {1}: {2}".format(client, acl, e)) return False # MUST ALWAYS SUCCEED ret = c.subscribe("test/{0}/sub/1".format(client), qos=0) if not isinstance(ret, EventSuback) or ret.granted_qos != [0]: debug("{0}, acl {1}: {2}".format(client, acl, ret)) return False if acl: ret = c.subscribe("test/{0}/sub/1/extra".format(client), qos=0) if not isinstance(ret, EventSuback) or ret.granted_qos != [0x80]: debug("{0}, acl {1}: {2}".format(client, acl, ret)) return false topic = "test/{0}/pub/1".format(client); msg = env.gen_msg(10) c.publish(topic, msg) e = ctrl.recv() if not isinstance(e, EventPublish) or\ e.msg.topic != topic or\ e.msg.payload != msg: debug("{0}, acl {1}: {2}".format(client, acl, e)) return False if acl: msg = env.gen_msg(10) c.publish("test/{0}/pub/1/extra".format(client), msg) e = ctrl.recv() if e != None: debug("{0}, acl {1}: {2}".format(client, acl, e)) return False topic = "test/{0}/pub/2/foo/bar".format(client); msg = env.gen_msg(10) c.publish(topic, msg) e = ctrl.recv() if not isinstance(e, EventPublish) or\ e.msg.topic != topic or\ e.msg.payload != msg: debug("{0}, acl {1}: {2}".format(client, acl, e)) return False ctrl.disconnect(); c.disconnect() return True
agpl-3.0
-2,830,121,941,140,285,400
31.872727
94
0.551254
false
patta42/pySICM
pySICMgui/widgetFloatingBackstep.py
1
7883
# -*- coding: utf-8 -*- # Copyright (C) 2015 Patrick Happel <[email protected]> # # This file is part of pySICM. # # pySICM is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License as published by the Free Software # Foundation, either version 2 of the License, or (at your option) any later # version. # # pySICM is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # pySICM. If not, see <http://www.gnu.org/licenses/>. '''This module implements a widget that allows reading a Analog Input channel on the controller''' from PyQt4 import QtCore from PyQt4.QtGui import QWidget, QLayout, QVBoxLayout, QPalette, QSpinBox, QLabel from pySICMgui.defaultScanwidget import DefaultScanWidget import matplotlib.pyplot import matplotlib.colors import json, time, struct, numpy, datetime import pySICM.helpers as Helpers from matplotlibwidget import MatplotlibWidget from pySICMgui.DataDisplayWidget import DataDisplayWidget try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s class WidgetFloatingBackstep(DefaultScanWidget): def __init__ (self, mainwin, parent = None, **kwargs): super(WidgetFloatingBackstep, self).__init__(mainwin, parent, mode='floatingBackstep', **kwargs) self.setWindowTitle('Floating Backstep Scan') self.populateForm() self.addGraph('data',xlabel='x',ylabel='y') self.addGraph('prescan',xlabel='x',ylabel='y') self.progressBar.setValue(0) self.data = numpy.zeros((128,128), numpy.uint16) self.data = numpy.outer( numpy.linspace(0,numpy.sqrt(256),128), numpy.linspace(0,numpy.sqrt(256),128)) self.predata = numpy.ones((128,128), numpy.uint16) self.getGraph('data').update(self.data) self.getGraph('prescan').update(self.data) if kwargs: if 'data' in kwargs: self.getGraph('prescan').update(kwargs['data']) def addGraph(self, name, **kwargs): mpw = DataDisplayWidget() self._graphs[name] = mpw if name=='prescan': self.dataLayout.addWidget(mpw,0,0) else: mpw.asNewScanCallback = self.asNewScan self.dataLayout.addWidget(mpw,0,1) def getSetting(self, setting): if setting in self.settingConvertFuncs: return self.settingConvertFuncs[setting](self.settings[setting].text()) def sendSettings(self): client = self.mainwin.client client.sendLine('SET mode=floatingBackstep') for setting, field in self.settings.iteritems(): client.sendLine('SET floatingBackstep.'+str(setting)+'='+str(field.text())) def _prepare(self): self.mainwin.serverLog=str(time.time()) self.sendSettings() xl = int(self.settings['x-px'].text()) yl = int(self.settings['y-px'].text()) xlp = int(self.settings['x-px-pre'].text()) ylp = int(self.settings['y-px-pre'].text()) self.data = numpy.zeros((xl, yl), numpy.uint16) self.prescan = numpy.zeros((xlp, ylp), numpy.uint16) self.progressBar.setMaximum(xlp*ylp) self.mainwin.stat.progressBar.setMaximum(xl*yl+xlp*ylp) self.expectData(self.updateData, length = 1, form = 'int', rang=[0, 2**16]) self.x = -1 self.y = 0 self.min_data = None self.max_data = None self.lastDraw = time.time() def fake(self): self._prepare() self.receiveData('FAKE') def updateData(self, data, *args): finished = False self.x+=1 if self.isPrescan: if self.x >= int(self.settings['x-px-pre'].text()): self.y += 1 self.x = 0 if self.y < int(self.settings['y-px-pre'].text()): self.prescan[self.y, self.x] = data[0] if self.x + 1 == int(self.settings['x-px-pre'].text()) and self.y +1 == int(self.settings['y-px-pre'].text()): print "Prescan finished" self.info['client_prescan_end_timestamp'] = int( round( time.time() * 1e3 ) ) self.info['client_prescan_duration'] = self.info['client_prescan_end_timestamp'] - self.info['client_prescan_start_timestamp'] self.info['client_scan_start_timestamp'] = int( round( time.time() * 1e3 ) ) self.info['client_scan_start_time'] = str( datetime.datetime.now() ) self.x = -1 self.y = 0 self.isPrescan = False xl = int(self.settings['x-px'].text()) yl = int(self.settings['y-px'].text()) self.progressBar.setMaximum(xl*yl) self.getGraph('prescan').update(self.prescan) mpw = self.getGraph('prescan') d = self.prescan progress = int(self.settings['x-px-pre'].text())*self.y+self.x total_progress = progress else: if self.x >= int(self.settings['x-px'].text()): self.y += 1 self.x = 0 if self.y < int(self.settings['y-px'].text()): self.data[self.y, self.x] = data[0] if self.x +1 == int(self.settings['x-px'].text()) and self.y +1 == int(self.settings['y-px'].text()): print "Scan finished" finished = True self.info['client_scan_end_timestamp'] = int( round( time.time() * 1e3 ) ) self.info['client_scan_duration'] = self.info['client_scan_end_timestamp'] - self.info['client_scan_start_timestamp'] self.unexpectData() self.lastDraw = 0 d = self.data mpw = self.getGraph('data') total_progress = (int(self.settings['x-px-pre'].text()) *int(self.settings['y-px-pre'].text()) +int(self.settings['x-px'].text()) *self.y+self.x) progress = int(self.settings['x-px'].text())*self.y+self.x if time.time() - self.lastDraw > .2 or finished: mpw.update(d) self.progressBar.setValue(progress) self.mainwin.stat.progressBar.setValue(total_progress) self.lastDraw = time.time() # if finished: # # try: def scan(self): self._prepare() self.isPrescan = True self.receiveData('SCAN') self.info['client_prescan_start_time'] = str(datetime.datetime.now()) self.info['client_prescan_start_timestamp'] = int( round( time.time() * 1e3 ) ) def stop(self): self.mainwin.client.sendLine('STOP') self.unexpectData() def asNewScan(self, selection, data): settings = {} for k,v in self.settings.iteritems(): settings[k] = v.text() xpxsize = self.getSetting('x-Size') / self.getSetting('x-px') ypxsize = self.getSetting('y-Size') / self.getSetting('y-px') xoff = round(selection['x']) * xpxsize + self.getSetting('XOffset') yoff = round(selection['y']) * ypxsize + self.getSetting('YOffset') settings['XOffset'] = str(xoff) settings['YOffset'] = str(yoff) settings['x-Size'] = str(round(selection['w']) * xpxsize) settings['y-Size'] = str(round(selection['w']) * xpxsize) self.mainwin.openScanModeWin('floatingBackstep', data=data, settings=settings) print "As new scan..."
gpl-3.0
3,839,185,758,363,953,000
40.272251
142
0.584676
false
satanas/libturpial
libturpial/config.py
1
13588
# -*- coding: utf-8 -*- """Module to handle basic configuration of Turpial""" import os import base64 import shutil import logging import ConfigParser from libturpial.api.models.proxy import Proxy from libturpial.common import get_username_from, get_protocol_from from libturpial.exceptions import (EmptyOAuthCredentials, ExpressionAlreadyFiltered) try: from xdg import BaseDirectory XDG_CACHE = True except: XDG_CACHE = False APP_CFG = { 'General': { 'update-interval': '5', 'queue-interval': '30', 'minimize-on-close': 'on', # TODO: Deprecate in next mayor version 'statuses': '60', }, 'Columns': { }, 'Services': { 'shorten-url': 'is.gd', 'upload-pic': 'pic.twitter.com', }, 'Proxy': { 'username': '', 'password': '', 'server': '', 'port': '', 'protocol': 'http', }, 'Advanced': { 'socket-timeout': '20', 'show-user-avatars': 'on', }, # TODO: Deprecate all of this config options in next mayor version 'Window': { 'size': '320,480', }, 'Notifications': { 'on-updates': 'on', 'on-actions': 'on', }, 'Sounds': { 'on-login': 'on', 'on-updates': 'on', }, 'Browser': { 'cmd': '', }, } ACCOUNT_CFG = { 'OAuth': { 'key': '', 'secret': '', }, 'Login': { 'username': '', 'protocol': '', } } USERDIR = os.path.expanduser('~') BASEDIR = os.path.join(USERDIR, '.config', 'turpial') class ConfigBase(object): """Base configuration""" def __init__(self, default=None): self.__config = {} if default is None: self.default = APP_CFG else: self.default = default self.cfg = ConfigParser.ConfigParser() self.filepath = '' self.extra_sections = {} def register_extra_option(self, section, option, default_value): """ Registers a new configuration option under a specified section with a default value. Returns a maping with the new option as a key and the value """ if section in self.__config: if option in self.__config[section]: # TODO: raise an exception maybe? return if section not in self.extra_sections: self.extra_sections[section] = {} self.extra_sections[section][option] = default_value self.write(section, option, default_value) return {option: default_value} def create(self): for section, v in self.default.iteritems(): for option, value in self.default[section].iteritems(): self.write(section, option, value) # TODO: Return True on success? def load(self): self.__config = dict(self.default) self.__config.update(self.extra_sections) on_disk = {} self.cfg.read(self.configpath) for section in self.cfg.sections(): if section not in on_disk: on_disk[section] = {} for option in self.cfg.options(section): on_disk[section][option] = self.cfg.get(section, option) self.__config.update(on_disk) # ConfigParser doesn't store on disk empty sections, so we need to remove them # just to compare against saved on disk on_memory = dict(self.__config) for key in on_memory.keys(): if on_memory[key] == {}: del on_memory[key] if on_disk != on_memory: self.save() def load_failsafe(self): self.__config = self.default def save(self, config=None): if config is None: config = dict(self.__config) self.__config = {} for section, _v in config.iteritems(): for option, value in config[section].iteritems(): self.write(section, option, value) def write(self, section, option, value): if section not in self.__config: self.__config[section] = {} self.__config[section][option] = value _fd = open(self.configpath, 'w') if not self.cfg.has_section(section): self.cfg.add_section(section) self.cfg.set(section, option, value) self.cfg.write(_fd) _fd.close() def write_section(self, section, items): if self.cfg.has_section(section): self.cfg.remove_section(section) self.cfg.add_section(section) else: self.cfg.add_section(section) self.__config[section] = {} for option, value in items.iteritems(): self.__config[section][option] = value self.cfg.set(section, option, value) _fd = open(self.configpath, 'w') self.cfg.write(_fd) _fd.close() # WARN: Next version boolean will be the default answer def read(self, section, option, boolean=False): try: value = self.__config[section][option] if boolean: if value == 'on': return True elif value == 'off': return False else: return value else: return value except Exception: return None def read_section(self, section): try: return self.__config[section] except Exception: return None def read_all(self): try: return self.__config except Exception: return None class AppConfig(ConfigBase): """ Handle app configuration """ def __init__(self, basedir=BASEDIR, default=None): ConfigBase.__init__(self, default) self.log = logging.getLogger('AppConfig') self.log.debug('Started') self.basedir = basedir self.configpath = os.path.join(self.basedir, 'config') self.filterpath = os.path.join(self.basedir, 'filtered') self.friendspath = os.path.join(self.basedir, 'friends') if not os.path.isdir(self.basedir): os.makedirs(self.basedir) if not os.path.isfile(self.configpath): self.create() if not os.path.isfile(self.filterpath): open(self.filterpath, 'w').close() if not os.path.isfile(self.friendspath): open(self.friendspath, 'w').close() self.log.debug('CONFIG_FILE: %s' % self.configpath) self.log.debug('FILTERS_FILE: %s' % self.filterpath) self.log.debug('FRIENDS_FILE: %s' % self.friendspath) self.load() def load_filters(self): muted = [] _fd = open(self.filterpath, 'r') for line in _fd: if line == '\n': continue muted.append(line.strip('\n')) _fd.close() return muted def save_filters(self, filter_list): _fd = open(self.filterpath, 'w') for expression in filter_list: _fd.write(expression + '\n') _fd.close() return filter_list # TODO: Return added expresion? def append_filter(self, expression): for term in self.load_filters(): if term == expression: raise ExpressionAlreadyFiltered _fd = open(self.filterpath, 'a') _fd.write(expression + '\n') _fd.close() # TODO: Return removed expression? def remove_filter(self, expression): new_list = [] for term in self.load_filters(): if term == expression: continue new_list.append(term) self.save_filters(new_list) def load_friends(self): friends = [] _fd = open(self.friendspath, 'r') for line in _fd: if line == '\n': continue friends.append(line.strip('\n')) _fd.close() return friends # TODO: Validate success somehow def save_friends(self, lst): _fd = open(self.friendspath, 'w') for friend in lst: _fd.write(friend + '\n') _fd.close() return lst def get_stored_accounts(self): accounts = [] acc_dir = os.path.join(BASEDIR, 'accounts') for root, dirs, files in os.walk(acc_dir): for acc_dir in dirs: filepath = os.path.join(root, acc_dir, 'config') if os.path.isfile(filepath): accounts.append(acc_dir) return accounts def get_stored_columns(self): columns = [] stored_cols = self.read_section('Columns') if not stored_cols or len(stored_cols) == 0: return columns indexes = stored_cols.keys() indexes.sort() for i in indexes: value = stored_cols[i] if value != '': columns.append(value) return columns # Assumes secure request by default def get_proxy(self): temp = self.read_section('Proxy') secure = True if temp.get('protocol', 'https').lower() == 'https' else False return Proxy(temp['server'], temp['port'], temp['username'], temp['password'], secure) def get_socket_timeout(self): return int(self.read('Advanced', 'socket-timeout')) def delete(self): try: os.remove(self.configpath) self.log.debug('Deleted current config. Please restart Turpial') return True except AttributeError: return False class AccountConfig(ConfigBase): def __init__(self, account_id): ConfigBase.__init__(self, default=ACCOUNT_CFG) self.log = logging.getLogger('AccountConfig') self.basedir = os.path.join(BASEDIR, 'accounts', account_id) if XDG_CACHE: cachedir = BaseDirectory.xdg_cache_home self.imgdir = os.path.join(cachedir, 'turpial', account_id, 'images') else: self.imgdir = os.path.join(self.basedir, 'images') self.configpath = os.path.join(self.basedir, 'config') self.log.debug('CACHEDIR: %s' % self.imgdir) self.log.debug('CONFIGFILE: %s' % self.configpath) if not os.path.isdir(self.basedir): os.makedirs(self.basedir) if not os.path.isdir(self.imgdir): os.makedirs(self.imgdir) if not self.exists(account_id): self.create() try: self.load() except Exception, exc: self.load_failsafe() if not self.exists(account_id): self.write('Login', 'username', get_username_from(account_id)) self.write('Login', 'protocol', get_protocol_from(account_id)) @staticmethod def exists(account_id): basedir = os.path.join(BASEDIR, 'accounts', account_id) configpath = os.path.join(basedir, 'config') if not os.path.isfile(configpath): return False return True # DEPRECATE: Remove verifier in the next stable version def save_oauth_credentials(self, key, secret, verifier=None): self.write('OAuth', 'key', key) self.write('OAuth', 'secret', secret) # DEPRECATE: Remove verifier in the next stable version def load_oauth_credentials(self): key = self.read('OAuth', 'key') secret = self.read('OAuth', 'secret') if key and secret: return key, secret else: raise EmptyOAuthCredentials def forget_oauth_credentials(self): self.write('OAuth', 'key', '') self.write('OAuth', 'secret', '') def transform(self, pw, us): a = base64.b16encode(pw) b = us[0] + a + ('%s' % us[-1]) c = base64.b32encode(b) d = ('%s' % us[-1]) + c + us[0] e = base64.b64encode(d) f = [e[i] for i in range(len(e))] f.reverse() return ''.join(f) def revert(self, pw, us): if pw == '': return None z = [pw[i] for i in range(len(pw))] z.reverse() y = ''.join(z) x = base64.b64decode(y) w = ('%s' % x[1:len(x)])[:-1] v = base64.b32decode(w) u = ('%s' % v[:len(v) - 1])[1:] return base64.b16decode(u) # TODO: Return True on success? def dismiss(self): if os.path.isdir(self.imgdir): shutil.rmtree(self.imgdir) self.log.debug('Removed cache directory') if os.path.isfile(self.configpath): os.remove(self.configpath) self.log.debug('Removed configuration file') if os.path.isdir(self.basedir): shutil.rmtree(self.basedir) self.log.debug('Removed base directory') def delete_cache(self): """ Returns a list of unsusccessful deletions """ unsuccessful = list() for root, dirs, files in os.walk(self.imgdir): for f in files: try: path = os.path.join(root, f) self.log.debug("Deleting %s" % path) os.remove(path) except AttributeError: unsuccessful.append(path) self.log.debug('There were unsuccessful deletions %s' % unsuccessful) return unsuccessful def calculate_cache_size(self): size = 0 for root, dirs, files in os.walk(self.imgdir): for f in files: path = os.path.join(root, f) size += os.path.getsize(path) return size
gpl-3.0
-8,496,646,395,468,883,000
29.195556
94
0.544893
false
orbkit/orbkit
setup.py
1
2718
#!/usr/bin/env python import sys from setuptools import find_packages, setup, Extension from Cython.Distutils import build_ext from os.path import join import numpy with open('README.rst', 'r') as fh: # Remove header for line in fh: if 'ORBKIT' in line: next(fh) next(fh) break long_description = fh.read() if len(sys.argv) and 'bdist_wheel' in sys.argv[1:]: print("OpenMP for detCI disabled") ompcompileflags = [] omplinkflags = [] else: ompcompileflags = ['-fopenmp'] omplinkflags = ['-fopenmp'] setup( name='orbkit', packages=find_packages(), include_package_data=True, version='1.1.0', license='lgpl-3.0', description='A Toolbox for Post-Processing Quantum Chemical Wavefunction Data', long_description=long_description, long_description_content_type='text/x-rst', url='https://github.com/orbkit/orbkit', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Operating System :: OS Independent', 'License :: OSI Approved :: ' 'GNU Lesser General Public License v3 or later (LGPLv3+)', 'Programming Language :: Python :: 3', 'Topic :: Scientific/Engineering :: Physics', 'Topic :: Scientific/Engineering :: Chemistry', 'Topic :: Scientific/Engineering :: Visualization' ], cmdclass={'build_ext': build_ext}, entry_points={'console_scripts': ['orbkit = orbkit.main:run_standalone']}, build_requires=['numpy'], install_requires=['numpy', 'scipy', 'matplotlib', 'h5py', 'setuptools'], ext_modules=[ Extension( 'orbkit.cy_grid', sources=['orbkit/cy_grid.pyx'], include_dirs=[numpy.get_include()], ), Extension( 'orbkit.cy_core', sources=[ 'orbkit/cy_core.pyx', 'orbkit/c_grid-based.c', 'orbkit/c_support.c' ], include_dirs=[numpy.get_include()], depends=[join('orbkit', '*.h')], ), Extension( 'orbkit.cy_overlap', sources=[ 'orbkit/cy_overlap.pyx', 'orbkit/c_non-grid-based.c', 'orbkit/c_support.c' ], include_dirs=[numpy.get_include()], depends=[join('orbkit', '*.h')], ), # detCI@ORBKIT Extension( 'orbkit.detci.cy_occ_check', sources=['orbkit/detci/cy_occ_check.pyx'], include_dirs=[numpy.get_include()], ), Extension( 'orbkit.detci.cy_ci', sources=['orbkit/detci/cy_ci.pyx'], extra_compile_args=ompcompileflags, extra_link_args=omplinkflags, include_dirs=[numpy.get_include()], ), # Libcint Extension( 'orbkit.libcint_interface.cy_mo_integrals', sources=['orbkit/libcint_interface/cy_mo_integrals.pyx'], include_dirs=[numpy.get_include()], ), ], )
lgpl-3.0
-2,076,100,841,732,056,000
27.020619
81
0.62362
false
itzsarim/wordfilter
app.py
1
1871
from flask import Flask, render_template, request, jsonify # Initialize the Flask application app = Flask(__name__) # This route will show a form to perform an AJAX request # jQuery is loaded to execute the request and update the # value of the operation @app.route('/') def index(): return render_template('index.html') # Route that will process the AJAX request, sum up two # integer numbers (defaulted to zero) and return the # result as a proper JSON response (Content-Type, etc.) @app.route('/_word_filter') def word_filter(): #get text from template a = request.args.get('a', 0, type=str) result=[] result=filter(a) return jsonify(result=result) def filter(a): storefile = [] z=a #split the input string y=a.split() storefile=y temp = storefile #make a hash for replacement replacements = { 'maverick':'********', 'ranger':'******', 'rocket':'******', 'king':'****', 'heat':'****', 'chicago':'*******', 'bear':'****', 'wizard':'******', 'seattle':'*******', 'atlanta':'*******', 'miami':'*****', 'austin':'******', 'boston':'******', 'pittsburgh':'**********', 'saint':'*****', 'hawk':'****' } #replace the input string for src, target in replacements.iteritems(): z=z.replace(src,target) z=z.split() result=[] i=0 #check for other conditions, like if only half of the word is replaced with * for line in y: if( line in replacements): temp=replacements[line] result.append(temp) else: if (z[i].count('*')==len(z[i])): result.append(z[i]) else: result.append(line) i = i+1 #return the result return result if __name__ == '__main__': app.run( debug=True )
mit
-5,892,173,469,965,913,000
19.56044
81
0.53768
false
spatialfrog/soil_tools
qgis/qgis_progressing_framework_scripts/src/1_connect_soil_db.py
1
3864
""" purpose: loads the database table "availableSoilTableJoins", cmp table if selected and slc shapefile notes: - loads "permittedOperations" table into qgis toc. becomes drop down in processing framework for next script. will allows user to specific what tables they wish to use and if a join operation is required - loading the slc shapefile is required. user could load via standard qgis interface tools - user can select to load cmp table if this is the table they wish to work with input: db must have been created output: loads db table "availableSoilTableJoins" into qgis canvas license: - gpl3 by: richard burcher [email protected] 2014 """ #========== # sets up gui in qgis processing framework ##[AAFC Soil Tools]=group ##soil_database=file ##slc_shapefile=vector #=========== from PyQt4.QtCore import * from PyQt4.QtGui import * from qgis.core import * from qgis.gui import * from qgis.utils import * import os import sys import sqlite3 # aafc module name containing submodules for soil db work aafcModuleName = "aafc_modules" # add aafc module name directory to python path. found as .qgis2/processing/scripts/name_of_aafc_module scriptDirectory = os.path.join(QFileInfo(QgsApplication.qgisUserDbFilePath()).path(), "processing/scripts",aafcModuleName) # add to aafc module directory to python path sys.path.append(scriptDirectory) # import aafc_modules import aafc_utilities as utilities import aafc_database as database # create utility class instance. pass qgis supplied iface utils = utilities.Utils(iface) # get path to temp directory tempSystemDirectoryPath = utils.determineSystemTempDirectory() #========= validate user input # returns (message, boolean) #TODO: validate data # msg, status = utils.validateUserInput(soil_database, slc_shapefile) # if not status: # # problem with data provided # utils.communicateWithUserInQgis("Problem with either: paths or type of data passed in. Stopping.",level="CRITICAL", messageExistanceDuration=15) # raise Exception(msg) #========== load layers # load soil db table "permittedOperations" into canvas #TODO: should check in input validation if soil_database == "": # user must supply this! utils.communicateWithUserInQgis("Must supply soil database path. Stopping.",level="CRITICAL", messageExistanceDuration=15) raise Exception("Must supply soil datbase path. Stopping.") # load slc shapefile if not slc_shapefile =="": # user supplied path msg, status = utils.loadVectorLayerIntoQgis(slc_shapefile) if not status: # problem loading table utils.communicateWithUserInQgis("Problem with vector layer provided. Stopping.",level="CRITICAL", messageExistanceDuration=15) raise Exception(msg) # create database class instance # db must exist before sqlite connection can exit db = database.Db(soil_database, tempSystemDirectoryPath) #== load cmp table msg, status = utils.loadDbTableAsLayerIntoQgis(soil_database, "cmp") if not status: # problem loading table utils.communicateWithUserInQgis("Problem loading cmp soil table. Issue with either: paths or type of data passed in. Stopping.",level="CRITICAL", messageExistanceDuration=15) raise Exception(msg) #== join related tables if present in db # load join options table. only present if user supplied snf/slf when building soil db msg, status = utils.loadDbTableAsLayerIntoQgis(soil_database, db.userJoinOptionsTable) if not status: # table not present. this is okay pass # load joinedSoilTables or similar named soil joined tables msg, status = utils.loadDbTableAsLayerIntoQgis(soil_database, db.joinTableName) if not status: # table does not exist. this is okay, perhaps user did not load snf/slf and/or create a joined table before connecting pass #========== clean up # remove added aafc soil module from python path sys.path.pop()
gpl-3.0
2,176,013,862,879,242,000
32.318966
178
0.761387
false
alchayward/seeding
views.py
1
6614
from django.contrib.auth.decorators import login_required from django.shortcuts import render, render_to_response from seeding.models import Tournament, Team, Session, Game from django.contrib.auth import authenticate, login from django.http import HttpResponseRedirect, HttpResponse from django.template import RequestContext from itertools import chain from seeding import staging def tournament_list(request): tourn_list = Tournament.objects.all() return render_to_response('seeding/tournament_list.html', {'tournaments': tourn_list }) def tournament(request, tournament_id): tour = Tournament.objects.get(id__exact=tournament_id) sessions = Session.objects.filter(tournament=tournament_id) return render_to_response('seeding/tournament.html', {'tour':tour,'sessions':sessions}) def session(request, tournament_id, session_name): sess = Session.objects.get(tournament=tournament_id, slug=session_name) tour = Tournament.objects.get(id__exact=tournament_id) sess.teams = Team.objects.filter(session=sess.id) for team in sess.teams: games_1 = Game.objects.filter(session=sess.id,team_1=team.id,staged=True) for game in games_1: game.opp_name = Team.objects.get(id=game.team_2.id).name if game.completed: result = "%s -- %s" % (game.score_1,game.score_2) else: result = "unplayed" game.result = result games_2 = Game.objects.filter(session=sess.id,team_2=team.id,staged=True) for game in games_2: game.opp_name = Team.objects.get(id=game.team_1.id).name if game.completed: result = "%s -- %s" % (game.score_2,game.score_1) else: result = "unplayed" game.result = result team.games = list(chain(games_1,games_2)) if request.user.is_authenticated(): return render_to_response('seeding/session_run.html', {'tour': tour, 'sess': sess}) else: return render_to_response('seeding/session.html', {'tour': tour, 'sess': sess}) def update_score(sess): #get games games = Game.objects.filter(session=sess.id,staged=True) teams = Team.objects.filter(session=sess.id) for g in games: for ind,t in enumerate(teams): if g.team_1 == t: g.s_team_1 = ind if g.team_2 == t: g.s_team_2 = ind s = staging.Seeding(teams) s.games = games s.fit_model() ratings = s.mcmc.theta.stats()['mean'] #print ratings #make class #update the scores for ind,t in enumerate(teams): t.score = round(ratings[ind],2) t.save() ord_teams = sorted(teams, key=lambda ta:-ta.score) for ind,t in enumerate(ord_teams): t.rank=ind+1 t.save() return s.mcmc.theta.stats() @login_required def run_session(request, tournament_id, session_name): sess = Session.objects.get(tournament=tournament_id, slug=session_name) tour = Tournament.objects.get(id__exact=tournament_id) context = RequestContext(request) #Check for permission #Check for which action is being preformed #no action #add games #change scores #stage next round (automatic?) #add a round # If the request is a HTTP POST, try to pull out the relevant information. if request.method == 'POST': # Gather the username and password provided by the user. # This information is obtained from the login form. update = request.POST['update'] sess.status = 'UP' sess.save() str = update_score(sess) return HttpResponse(str) #Not used yet def game_list(request,session_id): return render_to_response('seeding/game_list.html', {'tournaments': Tournament.title.all() }) def team_list(request,tournament_name): return render_to_response('seeding/team_list.html', {'tournaments': Team.title.all() }) #Not used yet def team_info(request, team_name): return render_to_response('seeding/team_info.html', {'team': Data.team_info(team_id) } ) #def tourn_admin(request, tour_id): # return render_to_response('seeding/run_tournament.html', # {'sessions': sessions, teams} ) ##def tourn_admin(request, session_id): # return render_to_response('seeding/run_session.html', # {'session_id': session_id} ) #Generate Tournament object from database? Doesn't matter if ineffiecent. #Need an app for admin. Simple object editor. #cookies? No point. This can be completely staeless at this point. #Though admin needs to log in. def user_login(request): # Like before, obtain the context for the user's request. context = RequestContext(request) # If the request is a HTTP POST, try to pull out the relevant information. if request.method == 'POST': # Gather the username and password provided by the user. # This information is obtained from the login form. username = request.POST['username'] password = request.POST['password'] # Use Django's machinery to attempt to see if the username/password # combination is valid - a User object is returned if it is. user = authenticate(username=username, password=password) # If we have a User object, the details are correct. # If None (Python's way of representing the absence of a value), no user # with matching credentials was found. if user: # Is the account active? It could have been disabled. if user.is_active: # If the account is valid and active, we can log the user in. # We'll send the user back to the homepage. login(request, user) return HttpResponseRedirect('/seeding/') else: # An inactive account was used - no logging in! return HttpResponse("Your account is disabled.") else: # Bad login details were provided. So we can't log the user in. print "Invalid login details: {0}, {1}".format(username, password) return HttpResponse("Invalid login details supplied.") # The request is not a HTTP POST, so display the login form. # This scenario would most likely be a HTTP GET. else: # No context variables to pass to the template system, hence the # blank dictionary object... return render_to_response('seeding/login.html', {}, context)
gpl-2.0
-2,677,022,637,569,270,300
34.55914
81
0.636831
false
drjova/zenodo
zenodo/modules/deposit/workflows/upload.py
1
27368
# -*- coding: utf-8 -*- # # This file is part of Zenodo. # Copyright (C) 2012, 2013, 2014, 2015 CERN. # # Zenodo is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Zenodo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Zenodo. If not, see <http://www.gnu.org/licenses/>. # # In applying this licence, CERN does not waive the privileges and immunities # granted to it by virtue of its status as an Intergovernmental Organization # or submit itself to any jurisdiction. from __future__ import absolute_import import json from datetime import date from flask import render_template, request, url_for from flask.ext.login import current_user from flask.ext.restful import fields, marshal from workflow import patterns as p from invenio.base.globals import cfg from invenio.base.helpers import unicodifier from invenio.ext.login import UserInfo from invenio.ext.restful import ISODate, error_codes from invenio.ext.sqlalchemy import db from invenio.modules.communities.models import Community from invenio.modules.deposit.helpers import record_to_draft from invenio.modules.deposit.models import Deposition, DepositionType, \ InvalidApiAction from invenio.modules.deposit.tasks import create_recid, finalize_record_sip, \ has_submission, is_sip_uploaded, load_record, merge_changes, \ merge_record, mint_pid, prefill_draft, prepare_sip, process_bibdocfile, \ process_sip_metadata, render_form, upload_record_sip from invenio.modules.formatter import format_record from invenio.modules.knowledge.api import get_kb_mapping from invenio.modules.records.api import Record from zenodo.legacy.utils.zenodoutils import create_doi, filter_empty_helper from zenodo.modules.deposit.forms import ZenodoEditForm, ZenodoForm __all__ = ['upload'] CFG_LICENSE_KB = "licenses" CFG_LICENSE_SOURCE = "opendefinition.org" CFG_ZENODO_USER_COLLECTION_ID = "zenodo" CFG_ECFUNDED_USER_COLLECTION_ID = "ecfunded" # ======= # Helpers # ======= def file_firerole(uid, access_right, embargo_date): """ Compute file firerole for a file given access_right, embargo_date. """ # Generate firerole fft_status = [] if access_right == 'open': # Access to everyone fft_status = [] elif access_right == 'embargoed': # Access to submitter, deny everyone else until embargo date, # then allow all fft_status = [ 'allow uid "%s"' % uid, 'deny until "%s"' % embargo_date, 'allow any', ] elif access_right in ('closed', 'restricted',): # Access to submitter, deny everyone else fft_status = [ 'allow uid "%s"' % uid, 'deny all', ] if fft_status: return "firerole: %s" % "\n".join(fft_status) else: return "" def check_existing_pid(pid, recjson): """ In Zenodo an existing pid is either 1) pre-reserved and should be minted or 2) external and should not be minted. A user cannot enter a Zenodo pid by themselves. """ reserved_doi = recjson.get('prereserve_doi', None) if reserved_doi and reserved_doi['doi'] == pid: return True return False # ========================= # JSON processing functions # ========================= def process_draft(draft): """ Process loaded form JSON """ # Filter out Zenodo and OpenAIRE communities draft.values['communities'] = filter( lambda c: c['identifier'] not in [CFG_ZENODO_USER_COLLECTION_ID, CFG_ECFUNDED_USER_COLLECTION_ID], draft.values.get('communities', []) ) return draft def process_recjson(deposition, recjson): """ Process exported recjson (common for both new and edited records) """ # ================ # ISO format dates # ================ for k in recjson.keys(): if isinstance(recjson[k], date): recjson[k] = recjson[k].isoformat() # ======= # Authors # ======= if 'authors' in recjson and recjson['authors']: recjson['_first_author'] = recjson['authors'][0] recjson['_additional_authors'] = recjson['authors'][1:] # =========== # Communities # =========== try: communities = recjson.get('provisional_communities', []) # Auto-approve community if it's owned by the uploader for c in communities: if Community.query.get(c['identifier']).id_user == \ current_user.get_id(): c['provisional'] = False # Extract identifier (i.e. elements are mapped from dict -> # string) recjson['provisional_communities'] = map( lambda x: x['identifier'], filter(lambda x: x.get('provisional', False), communities) ) recjson['communities'] = map( lambda x: x['identifier'], filter(lambda x: not x.get('provisional', False), communities) ) except TypeError: # Happens on re-run pass # ============================= # Related/alternate identifiers # ============================= if recjson.get('related_identifiers', []): related_identifiers = recjson.get('related_identifiers', []) recjson['related_identifiers'] = filter( lambda x: x.get('relation', '') != 'isAlternativeIdentifier', related_identifiers ) recjson['alternate_identifiers'] = map( lambda x: {'scheme': x['scheme'], 'identifier': x['identifier']}, filter( lambda x: x.get('relation', '') == 'isAlternativeIdentifier', related_identifiers ) ) # ================= # License # ================= if recjson['access_right'] in ["open", "embargoed"]: info = get_kb_mapping(CFG_LICENSE_KB, str(recjson['license'])) if info: info = json.loads(info['value']) recjson['license'] = dict( identifier=recjson['license'], source=CFG_LICENSE_SOURCE, license=info['title'], url=info['url'], ) elif 'license' in recjson: del recjson['license'] # ======================= # Journal # ======================= # Set year or delete fields if no title is provided if recjson.get('journal.title', None): recjson['journal.year'] = recjson['publication_date'][:4] # ======================= # References # ======================= if recjson.get('references', []): recjson['references'] = map( lambda x: dict(raw_reference=x), recjson['references'] ) # ======================= # Book/chaper/report # ======================= if 'imprint.publisher' in recjson and 'imprint.place' in recjson: recjson['imprint.year'] = recjson['publication_date'][:4] if 'part_of.title' in recjson: mapping = [ ('part_of.publisher', 'imprint.publisher'), ('part_of.place', 'imprint.place'), ('part_of.year', 'imprint.year'), ('part_of.isbn', 'isbn'), ] for new, old in mapping: if old in recjson: try: recjson[new] = recjson[old] del recjson[old] except KeyError: pass # ================= # Grants # ================= # Remap incoming dictionary recjson['grants'] = map( lambda x: dict( title="%s - %s (%s)" % (x['acronym'], x['title'], x['id']), identifier=x['id'] ), recjson.get('grants', []) ) # ======================= # Filter out empty fields # ======================= filter_empty_elements(recjson) # ================================== # Map dot-keys to their dictionaries # ================================== for k in recjson.keys(): if '.' in k: mainkey, subkey = k.split('.') if mainkey not in recjson: recjson[mainkey] = {} recjson[mainkey][subkey] = recjson.pop(k) return recjson def filter_empty_elements(recjson): list_fields = [ 'authors', 'keywords', 'thesis_supervisors', 'subjects', ] for key in list_fields: recjson[key] = filter( filter_empty_helper(), recjson.get(key, []) ) recjson['related_identifiers'] = filter( filter_empty_helper(keys=['identifier']), recjson.get('related_identifiers', []) ) recjson['contributors'] = filter( filter_empty_helper(keys=['name', 'affiliation']), recjson.get('contributors', []) ) return recjson def process_recjson_new(deposition, recjson): """ Process exported recjson for a new record """ process_recjson(deposition, recjson) # ================ # Owner # ================ # Owner of record (can edit/view the record) user = UserInfo(deposition.user_id) email = user.info.get('email', '') recjson['owner'] = dict( email=email, username=user.info.get('nickname', ''), id=deposition.user_id, deposition_id=deposition.id, ) # =========== # Communities # =========== # Specific Zenodo user collection, used to curate content for # Zenodo if CFG_ZENODO_USER_COLLECTION_ID not in recjson['provisional_communities']: recjson['provisional_communities'].append( CFG_ZENODO_USER_COLLECTION_ID ) # Specific Zenodo user collection for OpenAIRE (used to curate # FP7 funded research) if recjson.get('grants', []) and CFG_ECFUNDED_USER_COLLECTION_ID \ not in recjson['provisional_communities']: recjson['provisional_communities'].append( CFG_ECFUNDED_USER_COLLECTION_ID ) # ============================== # Files (sorting + restrictions) # ============================== fft_status = file_firerole( deposition.user_id, recjson['access_right'], recjson.get('embargo_date', None) ) # Calculate number of leading zeros needed in the comment. file_commment_fmt = "%%0%dd" % len(str(len(recjson['fft']))) for idx, f in enumerate(recjson['fft']): f['restriction'] = fft_status # Bibdocfile does not have any concept of ordering, nor will # bibupload keep the order of FFT tags for the MARC field 8564. # Hence, this trick stores the ordering of files for a record in # the files comment, so files can be alphabetically sorted by their # comment (i.e. we add leading zeros). f['comment'] = file_commment_fmt % idx return recjson def process_recjson_edit(deposition, recjson): """ Process recjson for an edited record """ process_recjson(deposition, recjson) # Remove all FFTs try: del recjson['fft'] except KeyError: pass return recjson def process_files(deposition, bibrecdocs): """ Process bibrecdocs for extra files """ sip = deposition.get_latest_sip(sealed=False) fft_status = file_firerole( sip.metadata['owner']['id'], sip.metadata['access_right'], sip.metadata.get('embargo_date'), ) sip.metadata['fft'] = [] for bf in bibrecdocs.list_latest_files(): sip.metadata['fft'].append({ 'name': bf.name, 'format': bf.format, 'restriction': fft_status, 'description': 'KEEP-OLD-VALUE', 'comment': 'KEEP-OLD-VALUE', }) def merge(deposition, dest, a, b): """ Merge changes from editing a deposition. """ # A record might have been approved in communities since it was loaded, # thus we "manually" merge communities approved = set(a['communities']) & set(b['provisional_communities']) communities = b['communities'] provisional = [] for c in b['provisional_communities']: if c in approved: if c not in communities: communities.append(c) else: provisional.append(c) # Ensure that no community is in two states common = set(communities) & set(provisional) for c in common: provisional.pop(c) b['communities'] = communities b['provisional_communities'] = provisional # Append Zenodo collection if CFG_ZENODO_USER_COLLECTION_ID in dest['communities']: a['communities'].append(CFG_ZENODO_USER_COLLECTION_ID) b['communities'].append(CFG_ZENODO_USER_COLLECTION_ID) elif CFG_ZENODO_USER_COLLECTION_ID in dest['provisional_communities']: a['provisional_communities'].append(CFG_ZENODO_USER_COLLECTION_ID) b['provisional_communities'].append(CFG_ZENODO_USER_COLLECTION_ID) if CFG_ECFUNDED_USER_COLLECTION_ID in dest['communities']: a['communities'].append(CFG_ECFUNDED_USER_COLLECTION_ID) b['communities'].append(CFG_ECFUNDED_USER_COLLECTION_ID) elif CFG_ECFUNDED_USER_COLLECTION_ID in dest['provisional_communities']: a['provisional_communities'].append(CFG_ECFUNDED_USER_COLLECTION_ID) b['provisional_communities'].append(CFG_ECFUNDED_USER_COLLECTION_ID) b["doi"] = a["doi"] # Now proceed, with normal merging. data = merge_changes(deposition, dest, a, b) if 'authors' in data and data['authors']: data['_first_author'] = data['authors'][0] data['_additional_authors'] = data['authors'][1:] # Force ownership (owner of record (can edit/view the record)) user = UserInfo(deposition.user_id) data['owner'].update(dict( email=user.info.get('email', ''), username=user.info.get('nickname', ''), id=deposition.user_id, deposition_id=deposition.id, )) return data def transfer_ownership(deposition, user_id): """ Transfer ownership of a deposition """ if deposition.state != 'done': return False # Get latest uploaded SIP sip = deposition.get_latest_sip(sealed=True) if not is_sip_uploaded(sip): return False # Change user_id deposition.user_id = user_id db.session.commit() # Re-upload record to apply changes (e.g. file restrictions and uploader) deposition.reinitialize_workflow() deposition.run_workflow(headless=True) deposition.drafts['_edit'].completed = True deposition.run_workflow(headless=True) return True # ============== # Workflow tasks # ============== def run_tasks(update=False): """Run bibtasklet and webcoll after upload.""" def _run_tasks(obj, dummy_eng): from invenio.legacy.bibsched.bibtask import task_low_level_submission d = Deposition(obj) sip = d.get_latest_sip(sealed=True) recid = sip.metadata['recid'] common_args = [] sequenceid = getattr(d.workflow_object, 'task_sequence_id', None) if sequenceid: common_args += ['-I', str(sequenceid)] if update: tasklet_name = 'bst_openaire_update_upload' else: tasklet_name = 'bst_openaire_new_upload' task_id = task_low_level_submission( 'bibtasklet', 'webdeposit', '-T', tasklet_name, '--argument', 'recid=%s' % recid, *common_args ) sip.task_ids.append(task_id) d.update() return _run_tasks def reserved_recid(): """ Check for existence of a reserved recid and put in metadata so other tasks are not going to reserve yet another recid. """ def _reserved_recid(obj, dummy_eng): d = Deposition(obj) sip = d.get_latest_sip(sealed=False) reserved_doi = sip.metadata.get('prereserve_doi', None) if reserved_doi and reserved_doi['recid']: sip.metadata['recid'] = reserved_doi['recid'] d.update() return _reserved_recid def api_validate_files(): """ Check for existence of a reserved recid and put in metadata so other tasks are not going to reserve yet another recid. """ def _api_validate_files(obj, eng): if getattr(request, 'is_api_request', False): d = Deposition(obj) if len(d.files) < 1: d.set_render_context(dict( response=dict( message="Bad request", status=400, errors=[dict( message="Minimum one file must be provided.", code=error_codes['validation_error'] )], ), status=400, )) d.update() eng.halt("API: No files provided") else: # Mark all drafts as completed for draft in d.drafts.values(): draft.complete() d.update() return _api_validate_files # =============== # Deposition type # =============== class upload(DepositionType): """ Zenodo deposition workflow """ workflow = [ p.IF_ELSE( has_submission, # Existing deposition [ # Load initial record load_record( draft_id='_edit', post_process=process_draft ), # Render the form and wait until it is completed render_form(draft_id='_edit'), ], # New deposition [ # Load pre-filled data from cache prefill_draft(draft_id='_default'), # Render the form and wait until it is completed render_form(draft_id='_default'), # Test if all files are available for API api_validate_files(), ] ), # Create the submission information package by merging data # from all drafts - i.e. generate the recjson. prepare_sip(), p.IF_ELSE( has_submission, [ # Process SIP recjson process_sip_metadata(process_recjson_edit), # Merge SIP metadata into record and generate MARC merge_record( draft_id='_edit', post_process_load=process_draft, process_export=process_recjson_edit, merge_func=merge, ), # Set file restrictions process_bibdocfile(process=process_files), ], [ # Check for reserved recids. reserved_recid(), # Reserve a new record id create_recid(), # Register DOI in internal pid store. mint_pid( pid_field='doi', pid_store_type='doi', pid_creator=lambda recjson: create_doi( recid=recjson['recid'] )['doi'], existing_pid_checker=check_existing_pid, ), # Process SIP metadata process_sip_metadata(process_recjson_new), ] ), # Generate MARC based on recjson structure finalize_record_sip(), p.IF_ELSE( has_submission, [ # Seal the SIP and write MARCXML file and call bibupload on it upload_record_sip(), # Schedule background tasks. run_tasks(update=True), ], [ # Note: after upload_record_sip(), has_submission will return # True no matter if it's a new or editing of a deposition. upload_record_sip(), run_tasks(update=False), ] ), ] name = "Upload" name_plural = "Uploads" editable = True stopable = True enabled = True default = True api = True draft_definitions = { '_default': ZenodoForm, '_edit': ZenodoEditForm, } marshal_metadata_fields = dict( access_right=fields.String, access_conditions=fields.String, communities=fields.List(fields.Raw), conference_acronym=fields.String, conference_dates=fields.String, conference_place=fields.String, conference_title=fields.String, conference_url=fields.String, conference_session=fields.String, conference_session_part=fields.String, creators=fields.Raw(default=[]), description=fields.String, doi=fields.String(default=''), embargo_date=ISODate, grants=fields.List(fields.Raw), image_type=fields.String(default=''), imprint_isbn=fields.String, imprint_place=fields.String, imprint_publisher=fields.String, journal_issue=fields.String, journal_pages=fields.String, journal_title=fields.String, journal_volume=fields.String, keywords=fields.Raw(default=[]), subjects=fields.Raw(default=[]), license=fields.String, notes=fields.String(default=''), partof_pages=fields.String, partof_title=fields.String, prereserve_doi=fields.Raw, publication_date=ISODate, publication_type=fields.String(default=''), references=fields.List(fields.String, default=[]), related_identifiers=fields.Raw(default=[]), thesis_supervisors=fields.Raw(default=[]), title=fields.String, upload_type=fields.String, contributors=fields.Raw(default=[]), ) marshal_metadata_edit_fields = marshal_metadata_fields.copy() del marshal_metadata_edit_fields['prereserve_doi'] #marshal_metadata_edit_fields.update(dict( # recid=fields.Integer, # version_id=UTCISODateTime, #)) marshal_deposition_fields = DepositionType.marshal_deposition_fields.copy() del marshal_deposition_fields['drafts'] marshal_draft_fields = DepositionType.marshal_draft_fields.copy() marshal_draft_fields['metadata'] = fields.Nested( marshal_metadata_fields, attribute='values' ) del marshal_draft_fields['id'] del marshal_draft_fields['completed'] @classmethod def default_draft_id(cls, deposition): if deposition.has_sip() and '_edit' in deposition.drafts: return '_edit' return '_default' @classmethod def marshal_deposition(cls, deposition): """ Generate a JSON representation for REST API of a Deposition """ # Get draft if deposition.has_sip() and '_edit' in deposition.drafts: draft = deposition.get_draft('_edit') metadata_fields = cls.marshal_metadata_edit_fields elif deposition.has_sip(): # FIXME: Not based on latest available data in record. sip = deposition.get_latest_sip(sealed=True) draft = record_to_draft( Record.create(sip.package, master_format='marc'), post_process=process_draft ) metadata_fields = cls.marshal_metadata_edit_fields else: draft = deposition.get_or_create_draft('_default') metadata_fields = cls.marshal_metadata_fields # Fix known differences in marshalling draft.values = filter_empty_elements(draft.values) if 'grants' not in draft.values: draft.values['grants'] = [] # Set disabled values to None in output for field, flags in draft.flags.items(): if 'disabled' in flags and field in draft.values: del draft.values[field] # Marshal deposition obj = marshal(deposition, cls.marshal_deposition_fields) # Marshal the metadata attribute obj['metadata'] = marshal(unicodifier(draft.values), metadata_fields) # Add record and DOI information from latest SIP for sip in deposition.sips: if sip.is_sealed(): recjson = sip.metadata if recjson.get('recid'): obj['record_id'] = fields.Integer().format( recjson.get('recid') ) obj['record_url'] = fields.String().format(url_for( 'record.metadata', recid=recjson.get('recid'), _external=True )) if recjson.get('doi') and \ recjson.get('doi').startswith(cfg['CFG_DATACITE_DOI_PREFIX'] +"/"): obj['doi'] = fields.String().format(recjson.get('doi')) obj['doi_url'] = fields.String().format( "http://dx.doi.org/%s" % obj['doi'] ) break return obj @classmethod def marshal_draft(cls, obj): """ Generate a JSON representation for REST API of a DepositionDraft """ return marshal(obj, cls.marshal_draft_fields) @classmethod def api_action(cls, deposition, action_id): if action_id == 'publish': return deposition.run_workflow(headless=True) elif action_id == 'edit': # Trick: Works in combination with load_record task to provide # proper response codes to API clients. if deposition.state == 'done' or deposition.drafts: deposition.reinitialize_workflow() return deposition.run_workflow(headless=True) elif action_id == 'discard': deposition.stop_workflow() deposition.save() return deposition.marshal(), 201 raise InvalidApiAction(action_id) @classmethod def api_metadata_schema(cls, draft_id): schema = super(upload, cls).api_metadata_schema(draft_id) if schema and draft_id == '_edit': if 'recid' in schema['schema']: del schema['schema']['recid'] if 'modification_date' in schema['schema']: del schema['schema']['modification_date'] return schema @classmethod def render_completed(cls, d): """ Render page when deposition was successfully completed """ ctx = dict( deposition=d, deposition_type=( None if d.type.is_default() else d.type.get_identifier() ), uuid=d.id, my_depositions=list(Deposition.get_depositions( current_user, type=d.type )), sip=d.get_latest_sip(), format_record=format_record, ) return render_template('deposit/completed.html', **ctx)
gpl-3.0
3,779,258,443,149,295,000
32.133172
79
0.565405
false
edsu/twarc
utils/retweets.py
1
1795
#!/usr/bin/env python """ Reads a stream of twitter data and writes out data for the top 10 retweets. Use the --results option to change the number of results. """ from __future__ import print_function import json import optparse import fileinput def main(): retweets = [] parser = optparse.OptionParser() parser.add_option("-r", "--results", dest="results", default=10, type="int", help="number of top retweets to find") options, argv = parser.parse_args() min_rt = 0 # TODO: maybe this index should be on disk berkeleydb or something? seen = set() for line in fileinput.input(argv): try: tweet = json.loads(line) except: continue if 'retweeted_status' not in tweet: continue if tweet['retweeted_status']['id_str'] in seen: # TODO: make this work for data that is not reverse-chrono? continue rt = tweet['retweeted_status'] if rt['retweet_count'] > min_rt or len(retweets) < options.results: seen.add(rt['id_str']) insert(rt, retweets, options.results) min_rt = retweets[-1]['retweet_count'] for rt in retweets: print(json.dumps(rt)) def insert(rt, retweets, num_results): num_retweets = len(retweets) if num_retweets == 0: retweets.append(rt) return # there's a more efficient way of doing this for i in range(0, len(retweets)): if rt['retweet_count'] > retweets[i]['retweet_count']: retweets.insert(i, rt) break if len(retweets) == num_retweets: retweets.append(rt) # trim less popular ones while len(retweets) > num_results: retweets.pop() if __name__ == "__main__": main()
mit
-2,691,810,667,764,524,500
27.492063
80
0.594429
false
jiwanlimbu/aura
keystone/tests/unit/test_policy.py
1
10409
# Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import os import uuid from oslo_policy import policy as common_policy import six from testtools import matchers import keystone.conf from keystone import exception from keystone.policy.backends import rules from keystone.tests import unit from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import temporaryfile CONF = keystone.conf.CONF class PolicyFileTestCase(unit.TestCase): def setUp(self): # self.tmpfilename should exist before setUp super is called # this is to ensure it is available for the config_fixture in # the config_overrides call. self.tempfile = self.useFixture(temporaryfile.SecureTempFile()) self.tmpfilename = self.tempfile.file_name super(PolicyFileTestCase, self).setUp() self.target = {} def _policy_fixture(self): return ksfixtures.Policy(self.tmpfilename, self.config_fixture) def test_modified_policy_reloads(self): action = "example:test" empty_credentials = {} with open(self.tmpfilename, "w") as policyfile: policyfile.write("""{"example:test": []}""") rules.enforce(empty_credentials, action, self.target) with open(self.tmpfilename, "w") as policyfile: policyfile.write("""{"example:test": ["false:false"]}""") rules._ENFORCER.clear() self.assertRaises(exception.ForbiddenAction, rules.enforce, empty_credentials, action, self.target) class PolicyTestCase(unit.TestCase): def setUp(self): super(PolicyTestCase, self).setUp() self.rules = { "true": [], "example:allowed": [], "example:denied": [["false:false"]], "example:get_http": [["http:http://www.example.com"]], "example:my_file": [["role:compute_admin"], ["project_id:%(project_id)s"]], "example:early_and_fail": [["false:false", "rule:true"]], "example:early_or_success": [["rule:true"], ["false:false"]], "example:lowercase_admin": [["role:admin"], ["role:sysadmin"]], "example:uppercase_admin": [["role:ADMIN"], ["role:sysadmin"]], } # NOTE(vish): then overload underlying policy engine self._set_rules() self.credentials = {} self.target = {} def _set_rules(self): these_rules = common_policy.Rules.from_dict(self.rules) rules._ENFORCER.set_rules(these_rules) def test_enforce_nonexistent_action_throws(self): action = "example:noexist" self.assertRaises(exception.ForbiddenAction, rules.enforce, self.credentials, action, self.target) def test_enforce_bad_action_throws(self): action = "example:denied" self.assertRaises(exception.ForbiddenAction, rules.enforce, self.credentials, action, self.target) def test_enforce_good_action(self): action = "example:allowed" rules.enforce(self.credentials, action, self.target) def test_templatized_enforcement(self): target_mine = {'project_id': 'fake'} target_not_mine = {'project_id': 'another'} credentials = {'project_id': 'fake', 'roles': []} action = "example:my_file" rules.enforce(credentials, action, target_mine) self.assertRaises(exception.ForbiddenAction, rules.enforce, credentials, action, target_not_mine) def test_early_AND_enforcement(self): action = "example:early_and_fail" self.assertRaises(exception.ForbiddenAction, rules.enforce, self.credentials, action, self.target) def test_early_OR_enforcement(self): action = "example:early_or_success" rules.enforce(self.credentials, action, self.target) def test_ignore_case_role_check(self): lowercase_action = "example:lowercase_admin" uppercase_action = "example:uppercase_admin" # NOTE(dprince): We mix case in the Admin role here to ensure # case is ignored admin_credentials = {'roles': ['AdMiN']} rules.enforce(admin_credentials, lowercase_action, self.target) rules.enforce(admin_credentials, uppercase_action, self.target) class DefaultPolicyTestCase(unit.TestCase): def setUp(self): super(DefaultPolicyTestCase, self).setUp() self.rules = { "default": [], "example:exist": [["false:false"]] } self._set_rules('default') self.credentials = {} # FIXME(gyee): latest Oslo policy Enforcer class reloads the rules in # its enforce() method even though rules has been initialized via # set_rules(). To make it easier to do our tests, we're going to # monkeypatch load_roles() so it does nothing. This seem like a bug in # Oslo policy as we shouldn't have to reload the rules if they have # already been set using set_rules(). self._old_load_rules = rules._ENFORCER.load_rules self.addCleanup(setattr, rules._ENFORCER, 'load_rules', self._old_load_rules) rules._ENFORCER.load_rules = lambda *args, **kwargs: None def _set_rules(self, default_rule): these_rules = common_policy.Rules.from_dict(self.rules, default_rule) rules._ENFORCER.set_rules(these_rules) def test_policy_called(self): self.assertRaises(exception.ForbiddenAction, rules.enforce, self.credentials, "example:exist", {}) def test_not_found_policy_calls_default(self): rules.enforce(self.credentials, "example:noexist", {}) def test_default_not_found(self): new_default_rule = "default_noexist" # FIXME(gyee): need to overwrite the Enforcer's default_rule first # as it is recreating the rules with its own default_rule instead # of the default_rule passed in from set_rules(). I think this is a # bug in Oslo policy. rules._ENFORCER.default_rule = new_default_rule self._set_rules(new_default_rule) self.assertRaises(exception.ForbiddenAction, rules.enforce, self.credentials, "example:noexist", {}) class PolicyJsonTestCase(unit.TestCase): def _load_entries(self, filename): return set(json.load(open(filename))) def test_json_examples_have_matching_entries(self): policy_keys = self._load_entries(unit.dirs.etc('policy.json')) cloud_policy_keys = self._load_entries( unit.dirs.etc('policy.v3cloudsample.json')) policy_extra_keys = ['admin_or_token_subject', 'service_admin_or_token_subject', 'token_subject', ] expected_policy_keys = list(cloud_policy_keys) + policy_extra_keys diffs = set(policy_keys).difference(set(expected_policy_keys)) self.assertThat(diffs, matchers.Equals(set())) def test_policies_loads(self): action = 'identity:list_projects' target = {'user_id': uuid.uuid4().hex, 'user.domain_id': uuid.uuid4().hex, 'group.domain_id': uuid.uuid4().hex, 'project.domain_id': uuid.uuid4().hex, 'project_id': uuid.uuid4().hex, 'domain_id': uuid.uuid4().hex} credentials = {'username': uuid.uuid4().hex, 'token': uuid.uuid4().hex, 'project_name': None, 'user_id': uuid.uuid4().hex, 'roles': [u'admin'], 'is_admin': True, 'is_admin_project': True, 'project_id': None, 'domain_id': uuid.uuid4().hex} standard_policy = unit.dirs.etc('policy.json') enforcer = common_policy.Enforcer(CONF, policy_file=standard_policy) result = enforcer.enforce(action, target, credentials) self.assertTrue(result) domain_policy = unit.dirs.etc('policy.v3cloudsample.json') enforcer = common_policy.Enforcer(CONF, policy_file=domain_policy) result = enforcer.enforce(action, target, credentials) self.assertTrue(result) def test_all_targets_documented(self): # All the targets in the sample policy file must be documented in # doc/source/policy_mapping.rst. policy_keys = self._load_entries(unit.dirs.etc('policy.json')) # These keys are in the policy.json but aren't targets. policy_rule_keys = [ 'admin_or_owner', 'admin_or_token_subject', 'admin_required', 'default', 'owner', 'service_admin_or_token_subject', 'service_or_admin', 'service_role', 'token_subject', ] def read_doc_targets(): # Parse the doc/source/policy_mapping.rst file and return the # targets. doc_path = os.path.join( unit.ROOTDIR, 'doc', 'source', 'policy_mapping.rst') with open(doc_path) as doc_file: for line in doc_file: if line.startswith('Target'): break for line in doc_file: # Skip === line if line.startswith('==='): break for line in doc_file: line = line.rstrip() if not line or line.startswith(' '): continue if line.startswith('=='): break target, dummy, dummy = line.partition(' ') yield six.text_type(target) doc_targets = list(read_doc_targets()) self.assertItemsEqual(policy_keys, doc_targets + policy_rule_keys)
apache-2.0
-8,015,890,782,514,124,000
40.47012
79
0.608128
false
jmwright/cadquery-freecad-module
Libs/docutils/core.py
1
29533
# $Id: core.py 8126 2017-06-23 09:34:28Z milde $ # Author: David Goodger <[email protected]> # Copyright: This module has been placed in the public domain. """ Calling the ``publish_*`` convenience functions (or instantiating a `Publisher` object) with component names will result in default behavior. For custom behavior (setting component options), create custom component objects first, and pass *them* to ``publish_*``/`Publisher`. See `The Docutils Publisher`_. .. _The Docutils Publisher: http://docutils.sf.net/docs/api/publisher.html """ __docformat__ = 'reStructuredText' import sys import pprint from docutils import __version__, __version_details__, SettingsSpec from docutils import frontend, io, utils, readers, writers from docutils.frontend import OptionParser from docutils.transforms import Transformer from docutils.utils.error_reporting import ErrorOutput, ErrorString import docutils.readers.doctree class Publisher: """ A facade encapsulating the high-level logic of a Docutils system. """ def __init__(self, reader=None, parser=None, writer=None, source=None, source_class=io.FileInput, destination=None, destination_class=io.FileOutput, settings=None): """ Initial setup. If any of `reader`, `parser`, or `writer` are not specified, the corresponding ``set_...`` method should be called with a component name (`set_reader` sets the parser as well). """ self.document = None """The document tree (`docutils.nodes` objects).""" self.reader = reader """A `docutils.readers.Reader` instance.""" self.parser = parser """A `docutils.parsers.Parser` instance.""" self.writer = writer """A `docutils.writers.Writer` instance.""" for component in 'reader', 'parser', 'writer': assert not isinstance(getattr(self, component), str), ( 'passed string "%s" as "%s" parameter; pass an instance, ' 'or use the "%s_name" parameter instead (in ' 'docutils.core.publish_* convenience functions).' % (getattr(self, component), component, component)) self.source = source """The source of input data, a `docutils.io.Input` instance.""" self.source_class = source_class """The class for dynamically created source objects.""" self.destination = destination """The destination for docutils output, a `docutils.io.Output` instance.""" self.destination_class = destination_class """The class for dynamically created destination objects.""" self.settings = settings """An object containing Docutils settings as instance attributes. Set by `self.process_command_line()` or `self.get_settings()`.""" self._stderr = ErrorOutput() def set_reader(self, reader_name, parser, parser_name): """Set `self.reader` by name.""" reader_class = readers.get_reader_class(reader_name) self.reader = reader_class(parser, parser_name) self.parser = self.reader.parser def set_writer(self, writer_name): """Set `self.writer` by name.""" writer_class = writers.get_writer_class(writer_name) self.writer = writer_class() def set_components(self, reader_name, parser_name, writer_name): if self.reader is None: self.set_reader(reader_name, self.parser, parser_name) if self.parser is None: if self.reader.parser is None: self.reader.set_parser(parser_name) self.parser = self.reader.parser if self.writer is None: self.set_writer(writer_name) def setup_option_parser(self, usage=None, description=None, settings_spec=None, config_section=None, **defaults): if config_section: if not settings_spec: settings_spec = SettingsSpec() settings_spec.config_section = config_section parts = config_section.split() if len(parts) > 1 and parts[-1] == 'application': settings_spec.config_section_dependencies = ['applications'] #@@@ Add self.source & self.destination to components in future? option_parser = OptionParser( components=(self.parser, self.reader, self.writer, settings_spec), defaults=defaults, read_config_files=True, usage=usage, description=description) return option_parser def get_settings(self, usage=None, description=None, settings_spec=None, config_section=None, **defaults): """ Set and return default settings (overrides in `defaults` dict). Set components first (`self.set_reader` & `self.set_writer`). Explicitly setting `self.settings` disables command line option processing from `self.publish()`. """ option_parser = self.setup_option_parser( usage, description, settings_spec, config_section, **defaults) self.settings = option_parser.get_default_values() return self.settings def process_programmatic_settings(self, settings_spec, settings_overrides, config_section): if self.settings is None: defaults = (settings_overrides or {}).copy() # Propagate exceptions by default when used programmatically: defaults.setdefault('traceback', True) self.get_settings(settings_spec=settings_spec, config_section=config_section, **defaults) def process_command_line(self, argv=None, usage=None, description=None, settings_spec=None, config_section=None, **defaults): """ Pass an empty list to `argv` to avoid reading `sys.argv` (the default). Set components first (`self.set_reader` & `self.set_writer`). """ option_parser = self.setup_option_parser( usage, description, settings_spec, config_section, **defaults) if argv is None: argv = sys.argv[1:] # converting to Unicode (Python 3 does this automatically): if sys.version_info < (3,0): # TODO: make this failsafe and reversible? argv_encoding = (frontend.locale_encoding or 'ascii') argv = [a.decode(argv_encoding) for a in argv] self.settings = option_parser.parse_args(argv) def set_io(self, source_path=None, destination_path=None): if self.source is None: self.set_source(source_path=source_path) if self.destination is None: self.set_destination(destination_path=destination_path) def set_source(self, source=None, source_path=None): if source_path is None: source_path = self.settings._source else: self.settings._source = source_path # Raise IOError instead of system exit with `tracback == True` # TODO: change io.FileInput's default behaviour and remove this hack try: self.source = self.source_class( source=source, source_path=source_path, encoding=self.settings.input_encoding) except TypeError: self.source = self.source_class( source=source, source_path=source_path, encoding=self.settings.input_encoding) def set_destination(self, destination=None, destination_path=None): if destination_path is None: destination_path = self.settings._destination else: self.settings._destination = destination_path self.destination = self.destination_class( destination=destination, destination_path=destination_path, encoding=self.settings.output_encoding, error_handler=self.settings.output_encoding_error_handler) def apply_transforms(self): self.document.transformer.populate_from_components( (self.source, self.reader, self.reader.parser, self.writer, self.destination)) self.document.transformer.apply_transforms() def publish(self, argv=None, usage=None, description=None, settings_spec=None, settings_overrides=None, config_section=None, enable_exit_status=False): """ Process command line options and arguments (if `self.settings` not already set), run `self.reader` and then `self.writer`. Return `self.writer`'s output. """ exit = None try: if self.settings is None: self.process_command_line( argv, usage, description, settings_spec, config_section, **(settings_overrides or {})) self.set_io() self.document = self.reader.read(self.source, self.parser, self.settings) self.apply_transforms() output = self.writer.write(self.document, self.destination) self.writer.assemble_parts() except SystemExit as error: exit = 1 exit_status = error.code except Exception as error: if not self.settings: # exception too early to report nicely raise if self.settings.traceback: # Propagate exceptions? self.debugging_dumps() raise self.report_Exception(error) exit = True exit_status = 1 self.debugging_dumps() if (enable_exit_status and self.document and (self.document.reporter.max_level >= self.settings.exit_status_level)): sys.exit(self.document.reporter.max_level + 10) elif exit: sys.exit(exit_status) return output def debugging_dumps(self): if not self.document: return if self.settings.dump_settings: print >>self._stderr, '\n::: Runtime settings:' print >>self._stderr, pprint.pformat(self.settings.__dict__) if self.settings.dump_internals: print >>self._stderr, '\n::: Document internals:' print >>self._stderr, pprint.pformat(self.document.__dict__) if self.settings.dump_transforms: print >>self._stderr, '\n::: Transforms applied:' print >>self._stderr, (' (priority, transform class, ' 'pending node details, keyword args)') print >>self._stderr, pprint.pformat( [(priority, '%s.%s' % (xclass.__module__, xclass.__name__), pending and pending.details, kwargs) for priority, xclass, pending, kwargs in self.document.transformer.applied]) if self.settings.dump_pseudo_xml: print >>self._stderr, '\n::: Pseudo-XML:' print >>self._stderr, self.document.pformat().encode( 'raw_unicode_escape') def report_Exception(self, error): if isinstance(error, utils.SystemMessage): self.report_SystemMessage(error) elif isinstance(error, UnicodeEncodeError): self.report_UnicodeError(error) elif isinstance(error, io.InputError): self._stderr.write(u'Unable to open source file for reading:\n' u' %s\n' % ErrorString(error)) elif isinstance(error, io.OutputError): self._stderr.write( u'Unable to open destination file for writing:\n' u' %s\n' % ErrorString(error)) else: print >>self._stderr, u'%s' % ErrorString(error) print >>self._stderr, ("""\ Exiting due to error. Use "--traceback" to diagnose. Please report errors to <[email protected]>. Include "--traceback" output, Docutils version (%s%s), Python version (%s), your OS type & version, and the command line used.""" % (__version__, docutils.__version_details__ and ' [%s]'%docutils.__version_details__ or '', sys.version.split()[0])) def report_SystemMessage(self, error): print >>self._stderr, ('Exiting due to level-%s (%s) system message.' % (error.level, utils.Reporter.levels[error.level])) def report_UnicodeError(self, error): data = error.object[error.start:error.end] self._stderr.write( '%s\n' '\n' 'The specified output encoding (%s) cannot\n' 'handle all of the output.\n' 'Try setting "--output-encoding-error-handler" to\n' '\n' '* "xmlcharrefreplace" (for HTML & XML output);\n' ' the output will contain "%s" and should be usable.\n' '* "backslashreplace" (for other output formats);\n' ' look for "%s" in the output.\n' '* "replace"; look for "?" in the output.\n' '\n' '"--output-encoding-error-handler" is currently set to "%s".\n' '\n' 'Exiting due to error. Use "--traceback" to diagnose.\n' 'If the advice above doesn\'t eliminate the error,\n' 'please report it to <[email protected]>.\n' 'Include "--traceback" output, Docutils version (%s),\n' 'Python version (%s), your OS type & version, and the\n' 'command line used.\n' % (ErrorString(error), self.settings.output_encoding, data.encode('ascii', 'xmlcharrefreplace'), data.encode('ascii', 'backslashreplace'), self.settings.output_encoding_error_handler, __version__, sys.version.split()[0])) default_usage = '%prog [options] [<source> [<destination>]]' default_description = ('Reads from <source> (default is stdin) and writes to ' '<destination> (default is stdout). See ' '<http://docutils.sf.net/docs/user/config.html> for ' 'the full reference.') def publish_cmdline(reader=None, reader_name='standalone', parser=None, parser_name='restructuredtext', writer=None, writer_name='pseudoxml', settings=None, settings_spec=None, settings_overrides=None, config_section=None, enable_exit_status=True, argv=None, usage=default_usage, description=default_description): """ Set up & run a `Publisher` for command-line-based file I/O (input and output file paths taken automatically from the command line). Return the encoded string output also. Parameters: see `publish_programmatically` for the remainder. - `argv`: Command-line argument list to use instead of ``sys.argv[1:]``. - `usage`: Usage string, output if there's a problem parsing the command line. - `description`: Program description, output for the "--help" option (along with command-line option descriptions). """ pub = Publisher(reader, parser, writer, settings=settings) pub.set_components(reader_name, parser_name, writer_name) output = pub.publish( argv, usage, description, settings_spec, settings_overrides, config_section=config_section, enable_exit_status=enable_exit_status) return output def publish_file(source=None, source_path=None, destination=None, destination_path=None, reader=None, reader_name='standalone', parser=None, parser_name='restructuredtext', writer=None, writer_name='pseudoxml', settings=None, settings_spec=None, settings_overrides=None, config_section=None, enable_exit_status=False): """ Set up & run a `Publisher` for programmatic use with file-like I/O. Return the encoded string output also. Parameters: see `publish_programmatically`. """ output, pub = publish_programmatically( source_class=io.FileInput, source=source, source_path=source_path, destination_class=io.FileOutput, destination=destination, destination_path=destination_path, reader=reader, reader_name=reader_name, parser=parser, parser_name=parser_name, writer=writer, writer_name=writer_name, settings=settings, settings_spec=settings_spec, settings_overrides=settings_overrides, config_section=config_section, enable_exit_status=enable_exit_status) return output def publish_string(source, source_path=None, destination_path=None, reader=None, reader_name='standalone', parser=None, parser_name='restructuredtext', writer=None, writer_name='pseudoxml', settings=None, settings_spec=None, settings_overrides=None, config_section=None, enable_exit_status=False): """ Set up & run a `Publisher` for programmatic use with string I/O. Return the encoded string or Unicode string output. For encoded string output, be sure to set the 'output_encoding' setting to the desired encoding. Set it to 'unicode' for unencoded Unicode string output. Here's one way:: publish_string(..., settings_overrides={'output_encoding': 'unicode'}) Similarly for Unicode string input (`source`):: publish_string(..., settings_overrides={'input_encoding': 'unicode'}) Parameters: see `publish_programmatically`. """ output, pub = publish_programmatically( source_class=io.StringInput, source=source, source_path=source_path, destination_class=io.StringOutput, destination=None, destination_path=destination_path, reader=reader, reader_name=reader_name, parser=parser, parser_name=parser_name, writer=writer, writer_name=writer_name, settings=settings, settings_spec=settings_spec, settings_overrides=settings_overrides, config_section=config_section, enable_exit_status=enable_exit_status) return output def publish_parts(source, source_path=None, source_class=io.StringInput, destination_path=None, reader=None, reader_name='standalone', parser=None, parser_name='restructuredtext', writer=None, writer_name='pseudoxml', settings=None, settings_spec=None, settings_overrides=None, config_section=None, enable_exit_status=False): """ Set up & run a `Publisher`, and return a dictionary of document parts. Dictionary keys are the names of parts, and values are Unicode strings; encoding is up to the client. For programmatic use with string I/O. For encoded string input, be sure to set the 'input_encoding' setting to the desired encoding. Set it to 'unicode' for unencoded Unicode string input. Here's how:: publish_parts(..., settings_overrides={'input_encoding': 'unicode'}) Parameters: see `publish_programmatically`. """ output, pub = publish_programmatically( source=source, source_path=source_path, source_class=source_class, destination_class=io.StringOutput, destination=None, destination_path=destination_path, reader=reader, reader_name=reader_name, parser=parser, parser_name=parser_name, writer=writer, writer_name=writer_name, settings=settings, settings_spec=settings_spec, settings_overrides=settings_overrides, config_section=config_section, enable_exit_status=enable_exit_status) return pub.writer.parts def publish_doctree(source, source_path=None, source_class=io.StringInput, reader=None, reader_name='standalone', parser=None, parser_name='restructuredtext', settings=None, settings_spec=None, settings_overrides=None, config_section=None, enable_exit_status=False): """ Set up & run a `Publisher` for programmatic use with string I/O. Return the document tree. For encoded string input, be sure to set the 'input_encoding' setting to the desired encoding. Set it to 'unicode' for unencoded Unicode string input. Here's one way:: publish_doctree(..., settings_overrides={'input_encoding': 'unicode'}) Parameters: see `publish_programmatically`. """ pub = Publisher(reader=reader, parser=parser, writer=None, settings=settings, source_class=source_class, destination_class=io.NullOutput) pub.set_components(reader_name, parser_name, 'null') pub.process_programmatic_settings( settings_spec, settings_overrides, config_section) pub.set_source(source, source_path) pub.set_destination(None, None) output = pub.publish(enable_exit_status=enable_exit_status) return pub.document def publish_from_doctree(document, destination_path=None, writer=None, writer_name='pseudoxml', settings=None, settings_spec=None, settings_overrides=None, config_section=None, enable_exit_status=False): """ Set up & run a `Publisher` to render from an existing document tree data structure, for programmatic use with string I/O. Return the encoded string output. Note that document.settings is overridden; if you want to use the settings of the original `document`, pass settings=document.settings. Also, new document.transformer and document.reporter objects are generated. For encoded string output, be sure to set the 'output_encoding' setting to the desired encoding. Set it to 'unicode' for unencoded Unicode string output. Here's one way:: publish_from_doctree( ..., settings_overrides={'output_encoding': 'unicode'}) Parameters: `document` is a `docutils.nodes.document` object, an existing document tree. Other parameters: see `publish_programmatically`. """ reader = docutils.readers.doctree.Reader(parser_name='null') pub = Publisher(reader, None, writer, source=io.DocTreeInput(document), destination_class=io.StringOutput, settings=settings) if not writer and writer_name: pub.set_writer(writer_name) pub.process_programmatic_settings( settings_spec, settings_overrides, config_section) pub.set_destination(None, destination_path) return pub.publish(enable_exit_status=enable_exit_status) def publish_cmdline_to_binary(reader=None, reader_name='standalone', parser=None, parser_name='restructuredtext', writer=None, writer_name='pseudoxml', settings=None, settings_spec=None, settings_overrides=None, config_section=None, enable_exit_status=True, argv=None, usage=default_usage, description=default_description, destination=None, destination_class=io.BinaryFileOutput ): """ Set up & run a `Publisher` for command-line-based file I/O (input and output file paths taken automatically from the command line). Return the encoded string output also. This is just like publish_cmdline, except that it uses io.BinaryFileOutput instead of io.FileOutput. Parameters: see `publish_programmatically` for the remainder. - `argv`: Command-line argument list to use instead of ``sys.argv[1:]``. - `usage`: Usage string, output if there's a problem parsing the command line. - `description`: Program description, output for the "--help" option (along with command-line option descriptions). """ pub = Publisher(reader, parser, writer, settings=settings, destination_class=destination_class) pub.set_components(reader_name, parser_name, writer_name) output = pub.publish( argv, usage, description, settings_spec, settings_overrides, config_section=config_section, enable_exit_status=enable_exit_status) return output def publish_programmatically(source_class, source, source_path, destination_class, destination, destination_path, reader, reader_name, parser, parser_name, writer, writer_name, settings, settings_spec, settings_overrides, config_section, enable_exit_status): """ Set up & run a `Publisher` for custom programmatic use. Return the encoded string output and the Publisher object. Applications should not need to call this function directly. If it does seem to be necessary to call this function directly, please write to the Docutils-develop mailing list <http://docutils.sf.net/docs/user/mailing-lists.html#docutils-develop>. Parameters: * `source_class` **required**: The class for dynamically created source objects. Typically `io.FileInput` or `io.StringInput`. * `source`: Type depends on `source_class`: - If `source_class` is `io.FileInput`: Either a file-like object (must have 'read' and 'close' methods), or ``None`` (`source_path` is opened). If neither `source` nor `source_path` are supplied, `sys.stdin` is used. - If `source_class` is `io.StringInput` **required**: The input string, either an encoded 8-bit string (set the 'input_encoding' setting to the correct encoding) or a Unicode string (set the 'input_encoding' setting to 'unicode'). * `source_path`: Type depends on `source_class`: - `io.FileInput`: Path to the input file, opened if no `source` supplied. - `io.StringInput`: Optional. Path to the file or object that produced `source`. Only used for diagnostic output. * `destination_class` **required**: The class for dynamically created destination objects. Typically `io.FileOutput` or `io.StringOutput`. * `destination`: Type depends on `destination_class`: - `io.FileOutput`: Either a file-like object (must have 'write' and 'close' methods), or ``None`` (`destination_path` is opened). If neither `destination` nor `destination_path` are supplied, `sys.stdout` is used. - `io.StringOutput`: Not used; pass ``None``. * `destination_path`: Type depends on `destination_class`: - `io.FileOutput`: Path to the output file. Opened if no `destination` supplied. - `io.StringOutput`: Path to the file or object which will receive the output; optional. Used for determining relative paths (stylesheets, source links, etc.). * `reader`: A `docutils.readers.Reader` object. * `reader_name`: Name or alias of the Reader class to be instantiated if no `reader` supplied. * `parser`: A `docutils.parsers.Parser` object. * `parser_name`: Name or alias of the Parser class to be instantiated if no `parser` supplied. * `writer`: A `docutils.writers.Writer` object. * `writer_name`: Name or alias of the Writer class to be instantiated if no `writer` supplied. * `settings`: A runtime settings (`docutils.frontend.Values`) object, for dotted-attribute access to runtime settings. It's the end result of the `SettingsSpec`, config file, and option processing. If `settings` is passed, it's assumed to be complete and no further setting/config/option processing is done. * `settings_spec`: A `docutils.SettingsSpec` subclass or object. Provides extra application-specific settings definitions independently of components. In other words, the application becomes a component, and its settings data is processed along with that of the other components. Used only if no `settings` specified. * `settings_overrides`: A dictionary containing application-specific settings defaults that override the defaults of other components. Used only if no `settings` specified. * `config_section`: A string, the name of the configuration file section for this application. Overrides the ``config_section`` attribute defined by `settings_spec`. Used only if no `settings` specified. * `enable_exit_status`: Boolean; enable exit status at end of processing? """ pub = Publisher(reader, parser, writer, settings=settings, source_class=source_class, destination_class=destination_class) pub.set_components(reader_name, parser_name, writer_name) pub.process_programmatic_settings( settings_spec, settings_overrides, config_section) pub.set_source(source, source_path) pub.set_destination(destination, destination_path) output = pub.publish(enable_exit_status=enable_exit_status) return output, pub
lgpl-3.0
-3,714,240,824,547,092,000
43.410526
78
0.622287
false
sixty-north/cosmic-ray
src/cosmic_ray/operators/comparison_operator_replacement.py
1
3252
"""This module contains mutation operators which replace one comparison operator with another. """ from enum import Enum import itertools import parso.python.tree from ..ast import is_none, is_number from .operator import Operator from .util import extend_name class ComparisonOperators(Enum): "All comparison operators that we mutate." Eq = '==' NotEq = '!=' Lt = '<' LtE = '<=' Gt = '>' GtE = '>=' Is = 'is' IsNot = 'is not' def _create_operator(from_op, to_op): @extend_name('_{}_{}'.format(from_op.name, to_op.name)) class ReplaceComparisonOperator(Operator): "An operator that replaces {} with {}".format(from_op.name, to_op.name) def mutation_positions(self, node): if node.type == 'comparison': # Every other child starting at 1 is a comparison operator of some sort for _, comparison_op in self._mutation_points(node): yield (comparison_op.start_pos, comparison_op.end_pos) def mutate(self, node, index): points = list(itertools.islice(self._mutation_points(node), index, index + 1)) assert len(points) == 1 op_idx, _ = points[0] mutated_comparison_op = parso.parse(' ' + to_op.value) node.children[op_idx * 2 + 1] = mutated_comparison_op return node @staticmethod def _mutation_points(node): for op_idx, comparison_op in enumerate(node.children[1::2]): if comparison_op.get_code().strip() == from_op.value: rhs = node.children[(op_idx + 1) * 2] if _allowed(to_op, from_op, rhs): yield op_idx, comparison_op @classmethod def examples(cls): return ( ('x {} y'.format(from_op.value), 'x {} y'.format(to_op.value)), ) return ReplaceComparisonOperator # Build all of the binary replacement operators _OPERATORS = tuple( _create_operator(from_op, to_op) for from_op, to_op in itertools.permutations(ComparisonOperators, 2)) # Inject the operators into the module namespace for op_cls in _OPERATORS: globals()[op_cls.__name__] = op_cls def operators(): "Iterable of all binary operator replacement mutation operators." return iter(_OPERATORS) # This determines the allowed from-to mutations when the RHS is None. _RHS_IS_NONE_OPS = { ComparisonOperators.Eq: [ComparisonOperators.IsNot], ComparisonOperators.NotEq: [ComparisonOperators.Is], ComparisonOperators.Is: [ComparisonOperators.IsNot], ComparisonOperators.IsNot: [ComparisonOperators.Is], } # This determines the allowed to mutations when the RHS is a number _RHS_IS_INTEGER_OPS = set([ ComparisonOperators.Eq, ComparisonOperators.NotEq, ComparisonOperators.Lt, ComparisonOperators.LtE, ComparisonOperators.Gt, ComparisonOperators.GtE, ]) def _allowed(to_op, from_op, rhs): "Determine if a mutation from `from_op` to `to_op` is allowed given a particular `rhs` node." if is_none(rhs): return to_op in _RHS_IS_NONE_OPS.get(from_op, ()) if is_number(rhs): return to_op in _RHS_IS_INTEGER_OPS return True
mit
5,231,077,477,629,382,000
29.971429
97
0.629151
false
axltxl/zenfig
setup.py
1
1765
#!/usr/bin/env python # -*- coding: utf-8 -*- """ setuptools config file """ import sys import pip from pip.req import parse_requirements from setuptools import setup, find_packages import os from zenfig import __version__ as version from zenfig import __author__ as author from zenfig import PKG_URL as pkg_url from zenfig import __name__ as pkg_name # parse_requirements() returns generator of pip.req.InstallRequirement objects install_reqs = parse_requirements('requirements.txt', session=pip.download.PipSession()) # reqs is a list of requirement # e.g. ['django==1.5.1', 'mezzanine==1.4.6'] reqs = [str(ir.req) for ir in install_reqs] desc = "Generic configuration files through the power of Jinja2" setup( name=pkg_name, version=version, packages=find_packages(exclude=["tests"]), author=author, author_email="[email protected]", description=desc, long_description=open("README.rst").read(), url=pkg_url, license='MIT', download_url="{url}/tarball/{version}".format(url=pkg_url, version=version), classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console ', 'Natural Language :: English', 'License :: OSI Approved :: MIT License', 'Operating System :: POSIX', 'Operating System :: POSIX :: Linux', 'Topic :: Utilities', 'Programming Language :: Python', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.3', ], entry_points={ 'console_scripts': [ 'zenfig = zenfig.__main__:main', ], }, tests_require = ['nose >= 1.3'], test_suite="nose.collector", install_requires = reqs, )
mit
-2,105,482,101,273,021,200
28.915254
88
0.640793
false
tjcsl/ion
intranet/middleware/session_management.py
1
1764
import time from django.contrib.auth import logout from ..apps.sessionmgmt.models import TrustedSession class SessionManagementMiddleware: """ Handles session management. """ def __init__(self, get_response): self.get_response = get_response def __call__(self, request): if request.user is not None and request.user.is_authenticated: if isinstance(request.session.get("login_time", None), float): if ( request.user.last_global_logout_time is not None and request.session["login_time"] < request.user.last_global_logout_time.timestamp() ): # This is how global logouts work for non-trusted sessions. We automatically log the user out if the user's most recent global # logout happened since the time they logged in (in this session). logout(request) time_since_login = time.time() - request.session["login_time"] if time_since_login >= 30 * 24 * 60 * 60: # Force logout after 30 days, even for trusted sessions TrustedSession.objects.filter(user=request.user, session_key=request.session.session_key).delete() logout(request) else: if request.user.last_global_logout_time is not None: # If the user has performed a global logout, all of their sessions must have a login_time set logout(request) else: # Otherwise, having a value is more important than it being 100% accurate request.session["login_time"] = time.time() return self.get_response(request)
gpl-2.0
3,265,476,217,240,224,300
43.1
146
0.590703
false
flumotion-mirror/flumotion-windowsmedia
flumotion/component/consumers/wms/pull_producer.py
1
9190
# -*- Mode: Python -*- # vi:si:et:sw=4:sts=4:ts=4 # # Flumotion - a streaming media server # Copyright (C) 2004,2005,2006 Fluendo, S.L. (www.fluendo.com). # All rights reserved. # Licensees having purchased or holding a valid Flumotion Advanced # Streaming Server license may use this file in accordance with the # Flumotion Advanced Streaming Server Commercial License Agreement. # See "LICENSE.Flumotion" in the source distribution for more information. # Headers in this file shall remain intact. """ Windows Media Service Pull-Mode producer. Try to look like a Windows Media Encoder. http://msdn.microsoft.com/en-us/library/cc251167(PROT.10).aspx """ import random import time from twisted.internet import reactor, error from twisted.web import http from flumotion.common import log from flumotion.component.common import http as fhttp from flumotion.component.common.wms import common, mmsproducer # Pretend to be Windows Media Encoder SERVER_IDENT = "Rex/12.0.7600.16385 (Flumotion Streaming Server)" LOG_CATEGORY = "wms-pull" class WMSPullRequest(fhttp.Request): def __init__(self, channel, info, active): fhttp.Request.__init__(self, channel, info, active) self.agent = None self.version = None self.requested_client_id = None self.client_id = None def onInitiate(self): if self.method != "GET": self.warning("Pull requests must use GET") self.error(http.NOT_ALLOWED) required = ["Host", "User-Agent"] for name in required: if self.getRecvHeader(name) is None: self.warning("Required header %s not found", name) self.error(http.BAD_REQUEST) agent, version = self.parseUserAgent() self.agent = agent self.version = version # Force HTTP 1.0 response self.protocol = fhttp.HTTP10 client_id = None if self.getRecvHeader("Pragma"): for pragma in self.getRecvHeader("Pragma"): if pragma.startswith("client-id="): try: client_id = int(pragma[10:]) except ValueError: pass self.requested_client_id = client_id self.client_id = self.channel.factory.getClientId(client_id) self.setHeader("Server", SERVER_IDENT) self.setHeader("Cache-Control", "no-cache") self.setHeader("Pragma", "no-cache"), self.setHeader("Pragma", "client-id=%d" % self.client_id) self.setHeader("Pragma", "features=\"broadcast\"") def onActivate(self): pass def onDataReceived(self, data): pass def onAllContentReceived(self): pass def onConnectionLost(self, reason): pass ### MMS Producer callbacks ### def pushData(self, producer, data): self.write(data) class WMSPullDescribeRequest(WMSPullRequest): def onInitiate(self): WMSPullRequest.onInitiate(self) self.setHeader("Content-Type", "application/vnd.ms.wms-hdr.asfv1") def onActivate(self): header = self.channel.factory.getMMSHeader(self) if header is None: self.setResponseCode(http.NOT_FOUND) self.finish() return self.setResponseCode(http.OK) self.setLength(len(header)) self.pushData(None, header) self.finish() class WMSPullPlayRequest(WMSPullRequest): def onInitiate(self): WMSPullRequest.onInitiate(self) req_clid = self.requested_client_id if req_clid is not None and req_clid != self.client_id: self.setHeader("Pragma", "xResetStrm=1") self.setHeader("Content-Type", "application/x-mms-framed") def onActivate(self): self.setResponseCode(http.OK) if not self.channel.factory.registerPull(self): self.setResponseCode(http.NOT_FOUND) self.finish() def connectionLost(self, reason): self.channel.factory.removePull(self) class WMSPullRequestFactory(fhttp.Requestfactory): def buildRequest(self, channel, info, active): if info.method == "POST": # just silently ignore POST request return fhttp.ErrorRequest(channel, info, active, http.OK) agent = info.headers.get("user-agent") name, version, = fhttp.parseUserAgent(agent) if name not in ["NSServer", "NSPlayer"] or version is None: self.warning("Agent not supported: %s", agent) return fhttp.ErrorRequest(channel, info, active, http.FORBIDDEN) if name == "NSPlayer" or name == "NSServer" and version >= (7,0): pragmas = info.headers.get("pragma") if not (pragmas and "xPlayStrm=1" in pragmas): return WMSPullDescribeRequest(channel, info, active) return WMSPullPlayRequest(channel, info, active) class WMSPullFactory(fhttp.Factory): requestFactoryClass = WMSPullRequestFactory session_timeout = 5*60 expire_period = 60 def __init__(self): fhttp.Factory.__init__(self) self.header_obj = None self.data_obj = None self._requests = {} # {CLIENT_ID: Request} self._producers = {} # {CLIENT_ID: Producer} self._timeouts = {} # {CLIENT_ID: EXPIRATION_TIME} self._start_time = time.time() self._call = None self._expire() def stop(self): if self._call is not None: self._call.cancel() self._call = None for req in self._requests.values(): req.finish() self._requests.clear() self._producers.clear() self._timeouts.clear() def getClientId(self, requested_id): if requested_id in self._producers: return requested_id client_id = int((time.time() - self._start_time) * 100) % 2147483648 assert client_id not in self._producers, "Duplicated client identifier" return client_id def removePull(self, req): client_id = req.client_id if client_id in self._requests: # The request may have changed if self._requests[client_id] == req: if client_id in self._producers: self.debug("Disconnecting request from MMS producer %d", client_id) self._producers[client_id].register(None) del self._requests[client_id] def getMMSHeader(self, req): if self.header_obj is None: return None producer = self._getProducer(req.client_id) producer.reset() # Should we reset the producer ? data = self.header_obj.data + self.data_obj.data return producer.mms_header(data) def registerPull(self, req): assert req not in self._requests, "Already registered" if self.header_obj is None: return False client_id = req.client_id if client_id in self._requests: self.debug("Closing old request for MMS producer %d", client_id) self._requests[client_id].finish() del self._requests[client_id] producer = self._getProducer(client_id) producer.reset() # Should we really reset the producer ? self.debug("Connecting request to MMS producer %d", client_id) producer.register(req) producer.pushHeaders(self.header_obj, self.data_obj) self._requests[client_id] = req return True def pushHeaders(self, header_obj, data_obj): expiration = time.time() + self.expire_period self.header_obj = header_obj self.data_obj = data_obj for client_id, request in self._requests.items(): producer = self._producers[client_id] producer.pushHeaders(header_obj, data_obj) request.channel.resetTimeout("inactivity") self._timeouts[client_id] = expiration def pushPacket(self, packet): expiration = time.time() + self.expire_period for client_id, request in self._requests.items(): producer = self._producers[client_id] producer.pushPacket(packet) request.channel.resetTimeout("inactivity") self._timeouts[client_id] = expiration def _getProducer(self, client_id): producer = self._producers.get(client_id) if producer is None: producer = mmsproducer.MMSProducer() self._producers[client_id] = producer return producer def _expire(self): now = time.time() for client_id, expiration in self._timeouts.items(): if expiration < now: self.debug("MMS producer %d expired", client_id) if client_id in self._producers: self._producers[client_id].stop() del self._producers[client_id] if client_id in self._requests: self._requests[client_id].finish() del self._requests[client_id] del self._timeouts[client_id] self._call = reactor.callLater(self.expire_period, self._expire)
lgpl-2.1
-3,519,288,965,423,223,300
31.020906
79
0.612187
false
osiam/connector4python
python/osiam/connector.py
1
10883
# coding=utf-8 import json import requests import collections import logging __author__ = 'phil' # logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) def doLog(func): def wrapped(*args, **kwargs): result = func(*args, **kwargs) logger.debug('called {0} params: {1} result:\n{2}'.format( func.__name__, args, result)) return result return wrapped SCIMMultiValuedAttributeT = collections.namedtuple('SCIMMultiValuedAttribute', ('value', 'display', 'primary', 'type', 'operation')) def SCIMMultiValuedAttribute(value=None, display=None, primary=False, type=None, operation=None): return SCIMMultiValuedAttributeT(value, display, primary, type, operation) SCIMAddressT = collections.namedtuple('SCIMAddress', ('display', 'primary', 'type', 'operation', 'formatted', 'locality', 'region', 'postalCode', 'country', 'streetAddress')) def SCIMAddress( display=None, primary=None, type=None, operation=None, formatted=None, locality=None, region=None, postalCode=None, country=None, streetAddress=None): return SCIMAddressT(display, primary, type, operation, formatted, locality, region, postalCode, country, streetAddress) SCIMNameT = collections.namedtuple('SCIMName', ( 'formatted', 'familyName', 'givenName', 'middleName', 'honorificPrefix', 'honorificSuffix')) def SCIMName(formatted=None, familyName=None, givenName=None, middleName=None, honorificPrefix=None, honorificSuffix=None): return SCIMNameT(formatted, familyName, givenName, middleName, honorificPrefix, honorificSuffix) SCIMUserT = collections.namedtuple('SCIMUser', ( 'id', 'schemas', 'userName', 'name', 'displayName', 'nickName', 'profileUrl', 'title', 'userType', 'preferredLanguage', 'locale', 'timezone', 'active', 'password', 'emails', 'phoneNumbers', 'ims', 'photos', 'addresses', 'groups', 'entitlements', 'roles', 'x509Certificates', 'meta', 'externalId')) def SCIMUser(id=None, schemas=None, userName=None, name=None, displayName=None, nickName=None, profileUrl=None, title=None, userType=None, preferredLanguage=None, locale=None, timezone=None, active=None, password=None, emails=None, phoneNumbers=None, ims=None, photos=None, addresses=None, groups=None, entitlements=None, roles=None, x509Certificates=None, meta=None, externalId=None): if not schemas: schemas = ['urn:scim:schemas:core:1.0'] if meta is not None: meta = meta.__dict__ if name is not None: name = name.__dict__ if not externalId: externalId = None return SCIMUserT(id, schemas, userName, name, displayName, nickName, profileUrl, title, userType, preferredLanguage, locale, timezone, active, password, emails, phoneNumbers, ims, photos, addresses, groups, entitlements, roles, x509Certificates, meta, externalId) SCIMErrorT = collections.namedtuple('SCIMError', ('error_code', 'description')) def SCIMError(error_code, description=None): return SCIMErrorT(error_code, description) SCIMGroupT = collections.namedtuple('SCIMGroup', ('displayName', 'members', 'externalId', 'id', 'meta', 'schemas')) def SCIMGroup(displayName=None, members=None, externalId=None, id=None, meta=None, schemas=None): if not schemas: schemas = ['urn:scim:schemas:core:1.0'] if meta is not None: meta = meta.__dict__ return SCIMGroupT(displayName, members, externalId, id, meta, schemas) ClientT = collections.namedtuple('Client', ('id', 'accessTokenValiditySeconds', 'refreshTokenValiditySeconds', 'redirectUri', 'scope', 'validityInSeconds', 'implicit', 'grants')) def Client(id=None, accessTokenValiditySeconds=None, refreshTokenValiditySeconds=None, redirectUri=None, scope=None, validityInSeconds=None, implicit=None, grants=None): return ClientT(id, accessTokenValiditySeconds, refreshTokenValiditySeconds, redirectUri, scope, validityInSeconds, implicit, grants) MetaT = collections.namedtuple('Meta', ('created', 'lastModified', 'location', 'version', 'attributes', 'resourceType')) def Meta(created=None, lastModified=None, location=None, version=None, attributes=None, resourceType=None): return MetaT(created, lastModified, location, version, attributes, resourceType) class SCIM: def __init__(self, resource_server, access_token): self.resource_server = resource_server self.headers = {'Authorization': "Bearer {0}".format(access_token), 'content-type': 'application/json'} s = requests.session() s.keep_alive = False def __json_dict_to_object__(self, user): return user # if user.get('userName') is not None: # return SCIMUser(user) # elif user.get('error_code') is not None: # return SCIMError(user) # else: # return SCIMGroup(user) def __single_data_operation__(self, func, id, data, type): data = json.dumps(data.__dict__) return func('{0}/{1}/{2}'.format(self.resource_server, type, id), headers=self.headers, data=data) @doLog def get_user(self, uuid): r = requests.get('{0}/Users/{1}'.format( self.resource_server, uuid), headers=self.headers) r_text = r.text o = json.loads(r_text) return self.__json_dict_to_object__(o) @doLog def create_user(self, user): data = json.dumps(user.__dict__) r = requests.post('{0}/Users'.format(self.resource_server), headers=self.headers, data=data) return self.__json_dict_to_object__(json.loads(r.text)) def __single_user_data_operation__(self, func, id, user): return self.__single_data_operation__(func, id, user, "Users") @doLog def replace_user(self, id, user): operation = self.__single_user_data_operation__(requests.put, id, user) return self.__json_dict_to_object__(json.loads(operation.content)) @doLog def update_user(self, id, user): operation = self.__single_user_data_operation__( requests.patch, id, user) return self.__json_dict_to_object__(json.loads(operation.content)) @doLog def delete_user(self, id): return requests.delete('{0}/Users/{1}'. format(self.resource_server, id), headers=self.headers) @doLog def get_group(self, uuid): r = requests.get('{0}/Groups/{1}'.format( self.resource_server, uuid), headers=self.headers) return self.__json_dict_to_object__(json.loads(r.text)) @doLog def create_group(self, group): r = requests.post('{0}/Groups'.format(self.resource_server), headers=self.headers, data=json.dumps(group.__dict__)) return self.__json_dict_to_object__(json.loads(r.text)) def __single_group_data_operation__(self, func, id, user): return self.__single_data_operation__(func, id, user, "Groups") @doLog def replace_group(self, id, group): operation = self.__single_group_data_operation__( requests.put, id, group) return self.__json_dict_to_object__(json.loads(operation.content)) @doLog def update_group(self, id, group): operation = self.__single_group_data_operation__(requests.patch, id, group) return self.__json_dict_to_object__(json.loads(operation.content)) @doLog def delete_group(self, id): return requests.delete('{0}/Groups/{1}'.format( self.resource_server, id), headers=self.headers) @doLog def search_with_get_on_users(self, params): r = requests.get('{0}/Users/?{1}'.format( self.resource_server, params), headers=self.headers) return json.loads(r.text) @doLog def search_with_post_on_users(self, data): r = requests.post('{0}/Users/.search'.format( self.resource_server), headers=self.headers, params=data) return json.loads(r.text) @doLog def search_with_get_on_groups(self, params): r = requests.get('{0}/Groups/?{1}'.format( self.resource_server, params), headers=self.headers) return json.loads(r.text) @doLog def search_with_post_on_groups(self, data): r = requests.post('{0}/Groups/.search'.format( self.resource_server), headers=self.headers, params=data) return json.loads(r.text) @doLog def search_with_get_on_root(self, params): r = requests.get('{0}/?{1}'.format( self.resource_server, params), headers=self.headers) return json.loads(r.text) @doLog def search_with_post_on_root(self, data): r = requests.post('{0}/.search'.format( self.resource_server), headers=self.headers, params=data) return json.loads(r.text) @doLog def get_client(self, id): r = requests.get('{0}/Client/{1}'.format( self.resource_server, id), headers=self.headers) return json.loads(r.text) @doLog def create_client(self, client): r = requests.post('{0}/Client'.format( self.resource_server), headers=self.headers, data=json.dumps(client.__dict__)) return json.loads(r.text) @doLog def delete_client(self, id): return requests.delete('{0}/Client/{1}'.format( self.resource_server, id), headers=self.headers) @doLog def update_client(self, client, id): r = requests.put('{0}/Client/{1}'.format( self.resource_server, id), headers=self.headers, data=json.dumps(client.__dict__)) return json.loads(r.content)
mit
2,842,563,191,492,891,000
36.6609
90
0.577322
false
kfieldho/SMQTK
python/smqtk/representation/descriptor_element/cached_element_wrapper.py
1
12309
import logging import threading import time from smqtk.representation import DescriptorElement from smqtk.representation import DescriptorElementFactory from smqtk.representation import get_descriptor_element_impls __author__ = '[email protected]' class CachingDescriptorElement (DescriptorElement): @classmethod def is_usable(cls): """ This implementation has no direct dependencies of its own. :rtype: bool """ return True @classmethod def get_default_config(cls): """ Generate and return a default configuration dictionary for this class. This will be primarily used for generating what the configuration dictionary would look like for this class without instantiating it. By default, we observe what this class's constructor takes as arguments, aside from the first two assumed positional arguments, turning those argument names into configuration dictionary keys. If any of those arguments have defaults, we will add those values into the configuration dictionary appropriately. The dictionary returned should only contain JSON compliant value types. It is not be guaranteed that the configuration dictionary returned from this method is valid for construction of an instance of this class. :return: Default configuration dictionary for the class. :rtype: dict """ c = super(CachingDescriptorElement, cls).get_default_config() # Nested DescriptorElementFactory configuration if c['wrapped_element_factory'] is None: # Have to make this configuration in such a way that we don't # include ourselves in the list of nestable classes else an infinite # recursion will occur. de_impls = get_descriptor_element_impls() # Remove ourselves del de_impls[cls.__name__] # Construct config block DescriptorElementFactory wants c_def = {"type": None} for label, de_cls in de_impls.iteritems(): # noinspection PyUnresolvedReferences c_def[label] = de_cls.get_default_config() c['wrapped_element_factory'] = c_def else: c['wrapped_element_factory'] = \ c['wrapped_element_factory'].get_config() return c @classmethod def from_config(cls, config_dict, type_str, uuid): merged_config = cls.get_default_config() merged_config.update(config_dict) # convert factory configuration merged_config['wrapped_element_factory'] = \ DescriptorElementFactory.from_config( merged_config['wrapped_element_factory'] ) return super(CachingDescriptorElement, cls).from_config( merged_config, type_str, uuid ) def __init__(self, type_str, uuid, wrapped_element_factory, cache_expiration_timeout=1.0, poll_interval=0.1): """ Initialize a new caching wrapper descriptor element. This implementation is intended to wrap another DescriptorElement type, adding a timed caching layer on top of :raises AssertionError: Cache expiration seconds was not a positive value. :param type_str: Type of descriptor. This is usually the name of the content descriptor that generated this vector. :type type_str: str :param uuid: Unique ID reference of the descriptor. :type uuid: collections.Hashable :param wrapped_element_factory: DescriptorElementFactory to produce DescriptorElement instances of the wrapped type. :type wrapped_element_factory: smqtk.representation.DescriptorElementFactory :param cache_expiration_timeout: Timeout in seconds for accessed descriptors to be cached. If this is non-zero, a monitoring thread will be launched in order to track the timeout. The thread will be shutdown after timeout. This value must be positive. If this is positive infinity, then the cache never expires. This also means that the cache will not be updated if the vector that would be returned from the wrapped element ever changes. :type cache_expiration_timeout: None | float :param poll_interval: How often to check if the cache has expired. :type poll_interval: float """ super(CachingDescriptorElement, self).__init__(type_str, uuid) self.wrapped_element_factory = wrapped_element_factory self.cache_expiration_timeout = float(cache_expiration_timeout) self.poll_interval = poll_interval assert cache_expiration_timeout > 0, \ "Cache expiration timeout was not positive." self._d_elem = self.wrapped_element_factory \ .new_descriptor(self.type(), self.uuid()) # self._log.debug("Caching descriptor element instance of type '%s'", # self._d_elem.__class__.__name__) # Attributes for timed caching with threads self.cache_v = None # Numpy ndarray if there is a current cache self.cache_last_access = None # UNIX timestamp self.cache_lock = threading.RLock() #: :type: threading.Thread self.cache_thread = None # the expiry monitor if there is a cached v def __del__(self): """ Release vector cache """ with self.cache_lock: # Should cause dependent thread to terminate gracefully self.cache_v = None if self.cache_thread and \ self.cache_thread.ident != threading.currentThread(): self.cache_thread.join() def __getstate__(self): return { # Base DescriptorElement stuff "type": self.type(), "uuid": self.uuid(), # This impl's stuff "wrapped_element_factory": self.wrapped_element_factory, "cache_expiration_timeout": self.cache_expiration_timeout, "poll_interval": self.poll_interval } def __setstate__(self, c): # base-class self._type_label = c['type'] self._uuid = c['uuid'] # this-class self.wrapped_element_factory = c['wrapped_element_factory'] self.cache_expiration_timeout = c['cache_expiration_timeout'] self.poll_interval = c['poll_interval'] # Initializing local cache variables that were un-pickle-able self._d_elem = self.wrapped_element_factory\ .new_descriptor(self.type(), self.uuid()) self.cache_v = None self.cache_last_access = None self.cache_lock = threading.RLock() self.cache_thread = None def get_config(self): return { "wrapped_element_factory": self.wrapped_element_factory, "cache_expiration_timeout": self.cache_expiration_timeout, "poll_interval": self.poll_interval, } def has_vector(self): """ :return: Whether or not this container current has a descriptor vector stored. :rtype: bool """ return self.vector() is not None def vector(self): """ :return: Get the stored descriptor vector as a numpy array. This returns None of there is no vector stored in this container. :rtype: numpy.core.multiarray.ndarray or None """ with self.cache_lock: v = self.cache_v # If no cache, attempt to populate it if v is None: # No cache currently, attempt fetch from wrapped elem # self._log.debug("Getting vector from nested descriptor " # "element") v = self._d_elem.vector() # self._log.debug("Vector received: %s", v) if v is not None: # Clean-up old thread if there was one if self.cache_thread: # cache_v is None at this point # self._log.debug("Joining old monitor thread") self.cache_lock.release() self.cache_thread.join() self.cache_lock.acquire() # vector in elem; set in cache; start monitor thread self.cache_v = v self.cache_thread = threading.Thread( target=CachingDescriptorElement .thread_monitor_cache_expiration, args=(self,), # verbose=self._log.getEffectiveLevel()<=logging.DEBUG, ) # self._log.debug("Spawning cache monitor thread") self.cache_thread.start() else: # Currently have a cache assert self.cache_thread is not None, \ "Have a cache, but no monitor thread." self.cache_last_access = time.time() return v def set_vector(self, new_vec): """ Set the contained vector. If this container already stores a descriptor vector, this will overwrite it. :param new_vec: New vector to contain. :type new_vec: numpy.core.multiarray.ndarray """ # set source vector and set as current cache with self.cache_lock: # self._log.debug("Setting in source element") self._d_elem.set_vector(new_vec) # Only start monitor when we're given a cache-able vector and the # cache is currently not occupied. if new_vec is not None and self.cache_v is None: if self.cache_thread: # cache_thread not alive at this point # self._log.debug("Joining old monitor thread") self.cache_lock.release() self.cache_thread.join() self.cache_lock.acquire() # need to start new monitor thread self.cache_thread = threading.Thread( target=CachingDescriptorElement .thread_monitor_cache_expiration, args=(self,), # verbose=self._log.getEffectiveLevel() <= logging.DEBUG, ) # self._log.debug("Spawning cache monitor thread") self.cache_thread.start() # Update cache vector and access time self.cache_v = new_vec self.cache_last_access = time.time() @staticmethod def thread_monitor_cache_expiration(elem): """ Monitor wrapper element's cache expiration :param elem: Wrapper element instance for monitoring :type elem: CachingDescriptorElement """ # log = logging.getLogger(__name__) # # # noinspection PyProtectedMember # log_header = '[{type:s}, {uuid:s}, {elem:s}]'.format(**{ # "type": elem.type(), # 'uuid': elem.uuid(), # 'elem': elem.wrapped_element_factory._d_type, # }) expired = False while not expired: time.sleep(elem.poll_interval) with elem.cache_lock: t = time.time() # log.debug("%s Checking cache cache expiration " # "[now = %f | last access = %f | timeout = %f]", # log_header, t, elem.cache_last_access, # elem.cache_expiration_timeout) if t - elem.cache_last_access >= elem.cache_expiration_timeout: elem.cache_v = None expired = True # log.debug("%s Cache expired", log_header) elif elem.cache_v is None: expired = True # log.debug("%s Cache was invalidated for us", log_header) # log.debug("%s Monitor thread exiting", log_header) # Disabling this implementation for the moment because it needs to be rethought DESCRIPTOR_ELEMENT_CLASS = None
bsd-3-clause
6,018,402,207,347,926,000
37.465625
80
0.581038
false
Erotemic/utool
utool/util_csv.py
1
13720
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals try: import numpy as np except ImportError as ex: pass from six.moves import zip, map import six from utool import util_type from utool import util_inject from utool import util_dev print, rrr, profile = util_inject.inject2(__name__) class CSV(util_dev.NiceRepr): def __init__(self, row_data, row_headers=None, col_headers=None): self.row_data = row_data if col_headers is None: self.header = row_data[0] else: self.header = col_headers self.header_tags = [[x] for x in self.header] self.short_header = None # FIXME: finish row/col header integration self.row_headers = row_headers def __nice__(self): import utool as ut if self.short_header is None: header_str = ', '.join([ut.truncate_str(h, maxlen=15, truncmsg='~//~') for h in self.header]) else: header_str = ', '.join(self.short_header) return '(shape=%s: cols=%s)' % (self.shape, header_str,) @classmethod def from_fpath(cls, fpath): self = cls(read_csv(fpath)) return self @property def shape(self): return len(self.row_data), len(self.header) def __str__(self): return self.nice_table() def _strip_self(self): self.row_data = [[c.strip(' ') for c in r] for r in self.row_data] self.header = self.row_data[0] self.header_tags = [[x] for x in self.header] def tabulate(self): import tabulate import utool as ut tabular_data = [ut.flatten([[r], d]) for r, d in zip(self.row_headers, self.row_data)] return tabulate.tabulate(tabular_data, [''] + self.header, 'fancy_grid') def transpose(self): import utool as ut row_dataT = ut.listT(self.row_data) return CSV(row_dataT, row_headers=self.header, col_headers=self.row_headers) def nice_table(self): import utool as ut return ut.make_csv_table(ut.listT(self.row_data), raw=True) def nice_table2(self, **kwargs): import utool as ut return ut.make_csv_table(ut.listT(self.row_data), column_lbls=self.header, row_lbls=self.row_headers, **kwargs) def raw_table(self): return '\n'.join([','.join([y for y in x]) for x in self.row_data]) def fuzzy_filter_columns(self, fuzzy_headers): import utool as ut col_flags = ut.filterflags_general_tags( self.header_tags, logic='or', in_any=fuzzy_headers) self.header = ut.compress(self.header, col_flags) self.header_tags = ut.compress(self.header_tags, col_flags) self.row_data = ut.listT(ut.compress(ut.listT(self.row_data), col_flags)) if self.short_header is not None: self.short_header = ut.compress(self.short_header, col_flags) def __getitem__(self, pat): colx = self.fuzzy_find_colx(pat) return self.take_column(colx) def fuzzy_reorder_columns(self, fuzzy_headers, inplace=True): import utool as ut specified_xs = [self.fuzzy_find_colx(pat) for pat in fuzzy_headers] otherxs = ut.index_complement(specified_xs, len(self.header_tags)) new_order = specified_xs + otherxs return self.permute_columns(new_order) def permute_columns(self, new_order, inplace=True): import utool as ut self.header = ut.take(self.header, new_order) self.header_tags = ut.take(self.header_tags, new_order) self.row_data = ut.listT(ut.take(ut.listT(self.row_data), new_order)) if self.short_header is not None: self.short_header = ut.take(self.short_header, new_order) return self def fuzzy_find_colxs(self, pat): import utool as ut colxs = ut.where(ut.filterflags_general_tags(self.header_tags, in_any=[pat])) return colxs def fuzzy_find_colx(self, pat): colxs = self.fuzzy_find_colxs(pat) assert len(colxs) == 1, ('cannot find column matching %r' % (pat,)) return colxs[0] def take_fuzzy_column(self, pat): import utool as ut colx = self.fuzzy_find_colx(pat) self.take_column(colx) return ut.take_column(self.row_data, colx) def take_column(self, colx, with_header=True): import utool as ut if with_header: return ut.take_column(self.row_data, colx) else: return ut.take_column(self.row_data[1:], colx) def compress_rows(self, flags, with_header=True, inplace=True): if not inplace: import copy self = copy.deepcopy(self) import utool as ut if with_header: assert flags[0] is True self.row_data = ut.compress(self.row_data, flags) else: self.row_data = self.row_data[0:1] + ut.compress(self.row_data[1:], flags) return self def compress_cols(self, flags): pass def numpy_to_csv(arr, col_lbls=None, header='', col_type=None): col_list = arr.T.tolist() return make_csv_table(col_list, col_lbls, header, col_type) def read_csv(fpath): """ reads csv in unicode """ import csv import utool as ut #csvfile = open(fpath, 'rb') with open(fpath, 'rb') as csvfile: row_iter = csv.reader(csvfile, delimiter=str(','), quotechar=str('|')) row_list = [ut.lmap(ut.ensure_unicode, row) for row in row_iter] return row_list def make_standard_csv(column_list, column_lbls=None): from six.moves import cStringIO as StringIO import utool as ut import csv stream = StringIO() row_list = ut.listT(column_list) if six.PY2: row_list = [[ut.ensure_unicode(c).encode('utf-8') for c in r] for r in row_list] if column_lbls is not None: column_lbls = [ut.ensure_unicode(c).encode('utf-8') for c in column_lbls] writer = csv.writer(stream, dialect=csv.excel) if column_lbls is not None: writer.writerow(column_lbls) writer.writerows(row_list) csv_str = stream.getvalue() return csv_str def make_csv_table(column_list=[], column_lbls=None, header='', column_type=None, row_lbls=None, transpose=False, precision=2, use_lbl_width=True, comma_repl='<com>', raw=False, new=False, standardize=False): """ Creates a csv table with aligned columns make_csv_table Args: column_list (list): column_lbls (None): header (str): column_type (None): row_lbls (None): transpose (bool): Returns: str: csv_text Example: >>> # ENABLE_DOCTEST >>> from utool.util_csv import * # NOQA >>> column_list = [[1, 2, 3], ['A', 'B', 'C']] >>> column_lbls = ['num', 'alpha'] >>> header = '# Test CSV' >>> column_type = (int, str) >>> row_lbls = None >>> transpose = False >>> csv_text = make_csv_table(column_list, column_lbls, header, column_type, row_lbls, transpose) >>> result = csv_text >>> print(result) # Test CSV # num_rows=3 # num, alpha 1, A 2, B 3, C """ import utool as ut assert comma_repl.find(',') == -1, 'comma_repl cannot contain a comma!' if transpose: column_lbls, row_lbls = row_lbls, column_lbls column_list = list(map(list, zip(*column_list))) if row_lbls is not None: if isinstance(column_list, np.ndarray): column_list = column_list.tolist() if isinstance(row_lbls, np.ndarray): row_lbls = row_lbls.tolist() column_list = [row_lbls] + column_list column_lbls = ['ROWLBL'] + list(map(six.text_type, column_lbls)) if column_type is not None: column_type = [six.text_type] + column_type if len(column_list) == 0: print('[csv] No columns') return header column_len = [len(col) for col in column_list] num_data = column_len[0] if num_data == 0: #print('[csv.make_csv_table()] No data. (header=%r)' % (header,)) return header if any([num_data != clen for clen in column_len]): print('[csv] column_lbls = %r ' % (column_lbls,)) print('[csv] column_len = %r ' % (column_len,)) print('[csv] inconsistent column lengths') return header if column_type is None: column_type = list(map(type, ut.get_list_column(column_list, 0))) #column_type = [type(col[0]) for col in column_list] csv_rows = [] if new: csv_rows.append(header) elif not raw: csv_rows.append(header) if not standardize: csv_rows.append('# num_rows=%r' % num_data) column_maxlen = [] column_str_list = [] if column_lbls is None: column_lbls = [''] * len(column_list) def _toint(c): if c is None: return 'None' try: if np.isnan(c): return 'nan' except TypeError as ex: print('------') print('[csv] TypeError %r ' % ex) print('[csv] _toint(c) failed') print('[csv] c = %r ' % c) print('[csv] type(c) = %r ' % type(c)) print('------') raise return ('%d') % int(c) import uuid textable_types = [uuid.UUID, six.text_type] try: if standardize: def csv_format(r): text = ut.repr2(r, precision=precision) #text = six.text_type(r) # Check if needs escape escape_chars = ['"', ' ', ','] if any([c in text for c in escape_chars]): # escape quotes with quotes text = text.replace('"', '""') # encapsulate with quotes text = '"' + text + '"' return text for col, lbl, coltype in zip(column_list, column_lbls, column_type): col_str = [csv_format(r) for r in col] column_str_list.append(col_str) pass else: # Loop over every column for col, lbl, coltype in zip(column_list, column_lbls, column_type): # Loop over every row in the column (using list comprehension) if coltype is list or util_type.is_list(coltype): col_str = [six.text_type(c).replace(',', ' ').replace('.', '<dot>') for c in col] elif (coltype is float or util_type.is_float(coltype) or coltype == np.float32 or util_type.is_valid_floattype(coltype)): precision_fmtstr = '%.' + six.text_type(precision) + 'f' col_str = ['None' if r is None else precision_fmtstr % float(r) for r in col] #col_ = [r if r is None else float(r) for r in col] #col_str = [ut.repr2(r, precision=2) for r in col_] elif coltype is int or util_type.is_int(coltype) or coltype == np.int64: col_str = [_toint(c) for c in (col)] elif coltype in textable_types or util_type.is_str(coltype): col_str = [six.text_type(c).replace(',', comma_repl) for c in col] else: print('[csv] is_unknown coltype=%r' % (coltype,)) try: col_str = [six.text_type(c) for c in (col)] except UnicodeDecodeError: try: col_str = [ut.ensure_unicode(c) for c in (col)] except Exception: col_str = [repr(c) for c in (col)] column_str_list.append(col_str) for col_str, lbl in zip(column_str_list, column_lbls): col_lens = [len(s) for s in (col_str)] max_len = max(col_lens) if use_lbl_width: # The column label counts towards the column width max_len = max(len(lbl), max_len) column_maxlen.append(max_len) except Exception as ex: #ut.embed() ut.printex(ex, keys=['col', 'lbl', 'coltype']) raise def _fmtfn(maxlen): return ''.join(['%', six.text_type(maxlen + 2), 's']) fmtstr = ','.join([_fmtfn(maxlen) for maxlen in column_maxlen]) try: if new: csv_rows.append('# ' + fmtstr % tuple(column_lbls)) elif not raw: csv_rows.append('# ' + fmtstr % tuple(column_lbls)) #csv_rows.append('# ' + fmtstr % column_lbls) except Exception as ex: #print(len(column_list)) #ut.embed() ut.printex(ex, keys=['fmtstr', 'column_lbls']) raise for row in zip(*column_str_list): csv_rows.append(' ' + fmtstr % row) csv_text = '\n'.join(csv_rows) return csv_text if __name__ == '__main__': """ CommandLine: python -c "import utool, utool.util_csv; utool.doctest_funcs(utool.util_csv, allexamples=True)" python -c "import utool, utool.util_csv; utool.doctest_funcs(utool.util_csv)" python -m utool.util_csv python -m utool.util_csv --allexamples """ import multiprocessing multiprocessing.freeze_support() # for win32 import utool as ut # NOQA ut.doctest_funcs()
apache-2.0
4,016,667,093,950,236,000
35.200528
119
0.547522
false
eig-2017/the-magical-csv-merge-machine
merge_machine/linker.py
1
29570
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Apr 24 15:46:00 2017 @author: leo """ from collections import defaultdict import fcntl import os import time from merge_machine.es_labeller import Labeller as ESLabeller import numpy as np import pandas as pd from abstract_data_project import ESAbstractDataProject from normalizer import ESNormalizer from results_analyzer import link_results_analyzer from es_connection import es from CONFIG import LINK_DATA_PATH from MODULES import LINK_MODULES, LINK_MODULE_ORDER, LINK_MODULE_ORDER_log from LINKER_CONFIG import DEFAULT_ANALYZERS, DEFAULT_ANALYZERS_TYPE class Linker(ESAbstractDataProject): MODULES = LINK_MODULES MODULE_ORDER = LINK_MODULE_ORDER MODULE_ORDER_log = LINK_MODULE_ORDER_log def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Add source and ref if the were selected if (self.metadata['files']['source'] is not None) \ and (self.metadata['files']['ref'] is not None): self.load_project_to_merge('source') self.load_project_to_merge('ref') def __repr__(self): string = '{0}({1})'.format(self.__class__.__name__, self.project_id) string += ' / source: ' if self.source is not None: string += self.source.__repr__() else: string += 'None' string += ' / ref: ' if self.ref is not None: string += self.ref.__repr__() return string def __str__(self): string = '{0}; project_id:{1}'.format(self.__class__.__name__, self.project_id) if self.source is not None: string += '\n\n***SOURCE***\n{0}'.format(self.source.__str__()) if self.ref is not None: string += '\n\n***REF***\n{0}'.format(self.ref.__str__()) return string @staticmethod def output_file_name(source_file_name): '''Name of the file to output''' return source_file_name def load_project_to_merge(self, file_role): '''Uses the "current" field in metadata to load source or ref''' self._check_file_role(file_role) # TODO: Add safeguard somewhere # Add source if file_role == 'source': try: self.source = ESNormalizer(self.metadata['files']['source']['project_id']) except: self.source = None if file_role == 'ref': try: self.ref = ESNormalizer(self.metadata['files']['ref']['project_id']) except: self.ref = None #raise Exception('Normalizer project with id {0} could not be found'.format(project_id)) @staticmethod def _check_file_role(file_role): if file_role not in ['ref', 'source']: raise Exception('file_role should be either "source" or "ref"') def _check_select(self): '''Check that a source and referential were selected''' for file_role in ['source', 'ref']: if self.metadata['files'][file_role] is None: raise Exception('{0} is not defined for this linking project'.format(file_role)) def _create_metadata(self, *args, **kwargs): metadata = super()._create_metadata(*args, **kwargs) metadata['files'] = {'source': None, 'ref': None} metadata['project_type'] = 'link' return metadata def add_col_matches(self, column_matches): ''' Adds a configuration file with the column matches between source and referential. INPUT: - column_matches: json file as dict ''' # Remove labeller if it exists if self._has_labeller(): self._remove_labeller() # TODO: add checks on file if (self.source is None) or (self.ref is None): raise RuntimeError('source or referential were not loaded (add_selected_project) and/or (load_project_to_merge)') # Remove duplicates from columns matches column_matches = [{'source': list(set(match['source'])), 'ref': list(set(match['ref'])), 'exact_only': match.get('exact_only', False)} \ for match in column_matches] # Remove matches with missing columns on one side or the othre column_matches = [match for match in column_matches \ if match['source'] and match['ref']] if not column_matches: raise ValueError("You have to specify at least one pair of columns" \ + " in column matches.") # Add matches self.upload_config_data(column_matches, 'es_linker', 'column_matches.json') # Select these columns for normalization in source and ref # TODO: this will cover add_certain_col_matches # Add to log for file_name in self.metadata['log']: self.metadata['log'][file_name]['add_selected_columns']['completed'] = True self._write_metadata() def add_es_learned_settings(self, learned_settings): '''Adds the learned es configuration''' print('trying to upload', learned_settings) # TODO: figure out where to move this learned_settings['best_thresh'] = 1 self.upload_config_data(learned_settings, 'es_linker', 'learned_settings.json') for file_name in self.metadata['log']: self.metadata['log'][file_name]['upload_es_train']['completed'] = True self._write_metadata() def read_col_matches(self, add_created=True): ''' Read the column_matches config file and interprets the columns looking for processed (normalized) columns ''' config = self.read_config_data('es_linker', 'column_matches.json') if not config: config = [] return config def add_col_certain_matches(self, column_matches): '''column_matches is a json file as list of dict of list''' # TODO: add checks on file self.upload_config_data(column_matches, 'es_linker', 'column_certain_matches.json') def read_col_certain_matches(self): config = self.read_config_data('es_linker', 'column_certain_matches.json') if not config: config = [] return config def read_cols_to_return(self, file_role): config_file_name = 'columns_to_return_{0}.json'.format(file_role) config = self.read_config_data('es_linker', config_file_name) if not config: config = [] return config def add_selected_project(self, file_role, public, project_id): ''' Select file to use as source or referential. INPUT: - file_role: "source" or "referential" - public: (bool) is the project available to all (or is it a user project) - project_id - file_name ''' self._check_file_role(file_role) # Check that file exists if public: raise DeprecationWarning else: proj = ESNormalizer(project_id) # if file_name not in proj.metadata['files']: # raise Exception('File {0} could not be found in project {1} \ # (public: {2})'.format(file_name, project_id, public)) # Check that normalization project has only one file (and possibly a MINI__ version) if not len(proj.metadata['files']): raise Exception('The selected normalization project ({0}) has no upload file'.format(project_id)) if len(proj.metadata['files']) > 1: raise Exception('The selected normalization project ({0}) has more than one file.'\ + ' This method expects projects to have exactly 1 file as it'\ + ' uses the implicit get_last_written'.format(project_id)) # TODO: last written is a bad idea because if we modify normalization then BOOM ! # TODO: last_written btw concat_with_initi and init ? (module_name, file_name) = proj.get_last_written() # TODO: add warning for implicit use of not-MINI if proj.metadata['has_mini'] and (file_role == 'source'): file_name = file_name.replace('MINI__', '') if proj.metadata['has_mini'] and (file_role == 'ref'): file_name = file_name.replace('MINI__', '') # Check that self.metadata['files'][file_role] = {'public': public, 'project_id': project_id, 'module_name': module_name, 'file_name': file_name, 'restricted': False} # Create log for source if file_role == 'source': self.metadata['log'][self.output_file_name(file_name)] = self._default_log() # Add project selection if (self.metadata['files']['source'] is not None) and (self.metadata['files']['ref'] is not None): for file_name in self.metadata['log']: self.metadata['log'][file_name]['INIT']['completed'] = True self._write_metadata() self.load_project_to_merge(file_role) def read_selected_files(self): ''' Returns self.metadata['files'] ''' return self.metadata['files'] def infer(self, module_name, params): '''Overwrite to allow restrict_reference''' if module_name == 'infer_restriction': params['NO_MEM_DATA'] = True return super().infer(module_name, params) def linker(self, module_name, data_params, module_params): '''Wrapper around link methods.''' if module_name == 'es_linker': return self.es_linker(module_params) elif module_name == 'dedupe_linker': raise DeprecationWarning def es_linker(self, module_params): module_params['index_name'] = ESNormalizer(self.ref.project_id).index_name s = self.metadata['files']['source'] self.source.load_data(s['module_name'], s['file_name']) self.mem_data = self.source.mem_data self.mem_data_info = self.source.mem_data_info # Change file_name to output file_name self.mem_data_info['file_name'] = self.output_file_name(self.mem_data_info['file_name']) # File being modified log, run_info = self.transform('es_linker', module_params) #print('DEF:', self.mem_data.columns) return log, run_info #========================================================================== # Module specific: ES Linker #========================================================================== def _gen_paths_es(self): self._check_select() # Get path to training file for ES linker training_path = self.path_to('es_linker', 'training.json') learned_settings_path = self.path_to('es_linker', 'learned_settings.json') # TODO: check that normalization projects are complete ? # Get path to source # TODO: fix this: use current file_name = self.metadata['files']['source']['file_name'] source_path = self.source.path_to_last_written(module_name=None, file_name=file_name) # Add paths paths = { 'source': source_path, 'train': training_path, 'learned_settings': learned_settings_path } return paths @staticmethod def _tuple_or_string(x): if isinstance(x, str): return x elif isinstance(x, list): if len(x) == 1: return x[0] else: return tuple(x) elif isinstance(x, tuple): if len(x) == 1: return x[0] else: return x else: raise ValueError('Value should be str, list or tuple') def gen_default_columns_to_index(self): '''Generate the dict specifying the analyzers to use for each column while indexing in Elasticsearch. This method only takes into account the reference file as to avoid re-indexing when using the same reference with a different source. This could change if partial re-indexing is implemented. Returns ------- columns_to_index: dict associating sets of str (values) to str (keys) A dict indicating what Elasticsearch analyzers to use on each column type during indexing. ''' INDEX_ALL = False # Whether or not to index all selected columns of the file def temp(column_types, col): """Return the type specific default analyzer for a column or return all default analyzers if type is not specified or could not be found. """ return DEFAULT_ANALYZERS_TYPE.get(column_types.get(col), DEFAULT_ANALYZERS) # Try fetching referential column types # TODO: dangerous if config was not confirmed by user... column_types = self.ref.read_config_data('recode_types', 'infered_config.json') # Read column match data column_matches = self.read_config_data('es_linker', 'column_matches.json') if not column_matches: raise RuntimeError('No column matches to read from') # Add default analyzer for columns that are exact matches if INDEX_ALL: list_of_columns_exact = self.ref.metadata['column_tracker']['selected'] list_of_columns_exact = {x for x in list_of_columns_exact if '__' not in x} else: exact_matches = filter(lambda m: m.get('exact_only', False), column_matches) list_of_columns_exact = {y for z in [[m['ref']] if isinstance(m['ref'], str) \ else m['ref'] for m in exact_matches] for y in z} columns_to_index = {col: {} for col in list_of_columns_exact} # Add analyzers for columns that are non-exact matches # NB: Preserve order to not overwrite columns_to_index of non-exact non_exact_matches = filter(lambda m: not m.get('exact_only', False), column_matches) list_of_columns_non_exact = {y for z in [[m['ref']] if isinstance(m['ref'], str) \ else m['ref'] for m in non_exact_matches] for y in z} columns_to_index.update({col: temp(column_types, col) for col in list_of_columns_non_exact}) # Add all columns that were selected for col in self.ref.metadata['column_tracker']['selected']: columns_to_index.setdefault(col, {}) print('columns_to_index:') print(columns_to_index) return columns_to_index def _gen_es_labeller(self, columns_to_index=None, certain_column_matches=None): '''Return a es_labeller object. ''' self._check_select() #chunksize = 40000 col_matches_tmp = self.read_col_matches() col_matches = [] for match in col_matches_tmp: col_matches.append({'source': self._tuple_or_string(match['source']), 'ref': self._tuple_or_string(match['ref'])}) # TODO: lists to tuple in col_matches paths = self._gen_paths_es() source = pd.read_csv(paths['source'], sep=',', encoding='utf-8', dtype=str, nrows=3000) source = source.where(source.notnull(), '') ref_table_name = self.ref.project_id if columns_to_index is None: columns_to_index = self.gen_default_columns_to_index() print(columns_to_index) # TODO: Check that reference is indexed # TODO: Restrict columns to index to columns present in reference. labeller = ESLabeller(es, source, ref_table_name, col_matches, columns_to_index, certain_column_matches) # TODO: Auto label certain pairs # TODO: Add pre-load for 3 first queries return labeller def _has_labeller(self): '''Check for json of labeller.''' file_path = self.path_to('es_linker', 'labeller.json') return os.path.isfile(file_path) def _remove_labeller(self): '''Remove json version of labeller.''' if self._has_labeller(): self._remove('es_linker', 'labeller.json') def labeller_to_json(self, labeller): '''Write a Labeller object as a json in the appropriate directory. This includes a locking logic to avoid concurrent writes. ''' NUM_RETRY = 10 RETRY_INTERVAL = 0.1 file_path = self.path_to('es_linker', 'labeller.json') for _ in range(NUM_RETRY): try: # Lock File before writing with open(file_path, 'a') as f: fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB) # Write file labeller.to_json(file_path) # Unlock file with open(file_path, 'r') as w: fcntl.flock(w, fcntl.LOCK_UN) break except BlockingIOError: time.sleep(RETRY_INTERVAL) else: raise BlockingIOError('{0} is un-writable because '.format(file_path) \ + 'it was locked for by another process.') def labeller_from_json(self): file_path = self.path_to('es_linker', 'labeller.json') paths = self._gen_paths_es() source = pd.read_csv(paths['source'], sep=',', encoding='utf-8', dtype=str, nrows=3000) source = source.where(source.notnull(), '') ref_table_name = self.ref.project_id labeller = ESLabeller.from_json(file_path, es, source, ref_table_name) return labeller def analyze_results(self, params={}): # Check that memory is loaded (if necessary) self._check_mem_data() module_name = 'link_results_analyzer' # Initiate log log = self._init_active_log(module_name, 'infer') complete_metrics = defaultdict(int) for data in self.mem_data: metrics = link_results_analyzer(data, params) for col in ['num_match_thresh', 'num_match', 'num_verif_samples']: complete_metrics[col] += metrics[col] # Weigh ratios according to the number of samples (we divide after) complete_metrics['perc_match_thresh'] += metrics['perc_match_thresh'] * metrics['num_match_thresh'] complete_metrics['perc_match'] += metrics['perc_match'] * metrics['num_match'] complete_metrics['precision'] += metrics.get('precision', 0) * metrics['num_verif_samples'] if complete_metrics['num_match_thresh']: complete_metrics['perc_match_thresh'] /= complete_metrics['num_match_thresh'] if complete_metrics['num_match']: complete_metrics['perc_match'] /= complete_metrics['num_match'] if complete_metrics['precision']: complete_metrics['precision'] /= complete_metrics['num_verif_samples'] # Write result of inference module_to_write_to = self.MODULES['infer'][module_name]['write_to'] self.upload_config_data(complete_metrics, module_to_write_to, 'infered_config.json') # Update log buffer self._end_active_log(log, error=False) return complete_metrics # ============================================================================= # Elasticsearch # ============================================================================= def update_results(self, labels): '''Updates the merged table in Elasticsearch to take into account the new labels. ''' # TODO: source indices new_rows = [] columns = set() for label in labels: current_row = es.get(self.index_name, 'structure', label['source_id'])['_source'] if label['is_match']: if current_row['__ID_REF'] != label['ref_id']: new_ref = es.get(self.ref.project_id, 'structure', label['ref_id'])['_source'] new_ref = {key + '__REF': val for key, val in new_ref.items()} new_row = {key: val for key, val in current_row.items()} new_row.update(new_ref) new_row['__IS_MATCH'] = True new_row['__CONFIDENCE'] = 999 new_row['__ID_REF'] = label['ref_id'] # TODO: what to do with __ES_SCORE, __ID_QUERY, __THRESH else: new_row = {key: val for key, val in current_row.items()} new_row['__IS_MATCH'] = True new_row['__CONFIDENCE'] = 999 else: new_row = {col: val for col, val in current_row.items()} nan_cols = list(filter(lambda x: x[-5:]=='__REF', new_row.keys())) \ + ['__CONFIDENCE', '__ES_SCORE', '__ID_QUERY', \ '__ID_REF', '__IS_MATCH', '__THRESH'] for col in nan_cols: new_row[col] = np.nan columns.update(new_row.keys()) new_rows.append((label['source_id'], new_row)) if new_rows: dtype = {col: self._choose_dtype(col) for col in columns} tab = pd.DataFrame([x[1] for x in new_rows], index=[x[0] for x in new_rows]) # Fix for dtype that is not working in DataFrame call for k, v in dtype.items(): if v == str: tab[k].fillna('', inplace=True) tab[k] = tab[k].astype(v) ref_gen = (x for x in [tab]) self.update_index(ref_gen) # Dirty method to keep track of modifications file_name = self.metadata['log'].keys() assert len(file_name) == 1 file_name = list(file_name)[0] self.metadata['log'][file_name]['upload_es_train']['was_modified'] = True self._write_metadata() # def create_es_index_ref(self, columns_to_index, force=False): # '''#TODO: doc''' # # self.ref = ESNormalizer(self.ref.project_id) # # # TODO: Doesn't seem safe.. # (module_name, file_name) = proj.get_last_written(file_name=self.metadata['files']['ref']['file_name']) # ref_path = self.ref.path_to(module_name,file_name) # return self.ref.create_index(ref_path, columns_to_index, force) #========================================================================== # Module specific: Restriction #========================================================================== # training_df = training_to_ref_df(training) # common_words = find_common_words(training_df) # common_vals = find_common_vals(training_df) # def perform_restriction(self, params): # ''' # Writes a new file with the path restricted reference # # /!\ Contrary to infer or transform, the log is written directly. # ''' # # current_module_name = 'restriction' # # # Initiate log # self.mem_data_info['file_role'] = 'link' # Role of file being modified # # log = self._init_active_log(current_module_name, 'link') # # # TODO: Move this # self.load_project_to_merge('ref') # module_name = self.metadata['files']['ref']['module_name'] # file_name = self.metadata['files']['ref']['file_name'] # # self.ref.load_data(module_name, file_name, restrict_to_selected=False) # # self.mem_data = (perform_restriction(part_tab, params)[0] \ # for part_tab in self.ref.mem_data) # TODO: no run info ! # # # Complete log # self.log_buffer.append(self._end_active_log(log, error=False)) # self.mem_data_info['file_name'] = self.ref.mem_data_info['file_name'] # self.mem_data_info['module_name'] = current_module_name # # # TODO: fix fishy: # # self.run_info_buffer[(current_module_name, '__REF__')] = {} # # self.run_info_buffer[(current_module_name, '__REF__')][current_module_name] = run_info # TODO: fishy # # # Add restricted to current for restricted # self.metadata['files']['ref']['restricted'] = True # # # TODO: write new_ref to "restriction" # self.write_data() # self.clear_memory() # # return {} #run_info # TODO: Add to current reference # TODO: Return smth # # # # # self.mem_data_info['file_role'] = 'link' # Role of file being modified # self.mem_data_info['file_name'] = self.output_file_name(os.path.split(paths['source'])[-1]) # File being modified # # log = self._init_active_log(module_name, 'link') # # self.mem_data, run_info = MODULES['link'][module_name]['func'](paths, params) # # self.mem_data_info['module_name'] = module_name # # # Complete log # log = self._end_active_log(log, error=False) # # # Update buffers # self.log_buffer.append(log) # self.run_info_buffer[(module_name, self.mem_data_info['file_name'])] = run_info # return class ESLinker(Linker): def path_to(self, module_name='', file_name=''): return self._path_to(LINK_DATA_PATH, module_name, file_name) if __name__ == '__main__': assert False source_file_name = 'source.csv' source_user_given_name = 'my_source.csv' ref_file_name = 'ref.csv' # Create source proj = ESNormalizer(None, create_new=True) source_proj_id = proj.project_id # Upload files to normalize file_path = os.path.join('local_test_data', source_file_name) with open(file_path, 'rb') as f: proj.upload_init_data(f, source_file_name, source_user_given_name) # Create ref proj = ESNormalizer(None, create_new=True) ref_proj_id = proj.project_id # Upload files to normalize file_path = os.path.join('local_test_data', ref_file_name) with open(file_path, 'rb') as f: proj.upload_init_data(f, ref_file_name, ref_file_name) # Try deduping proj = ESLinker(create_new=True) proj.add_selected_project('source', False, source_proj_id) proj.add_selected_project('ref', False, ref_proj_id) # Index proj.load_project_to_merge('ref') ref = ESNormalizer(proj.ref.project_id) # ref_path, columns_to_index, force=False) ref_path = ref.path_to_last_written() columns_to_index = { 'numero_uai': {}, 'denomination_principale_uai': { 'french', 'whitespace', 'integers', 'n_grams' }, 'patronyme_uai': { 'french', 'whitespace', 'integers', 'n_grams' }, 'adresse_uai': { 'french', 'whitespace', 'integers', 'n_grams' }, 'localite_acheminement_uai': { 'french', 'whitespace', 'integers', 'n_grams' }, 'departement': { 'french', 'whitespace', 'integers', 'n_grams' }, 'code_postal_uai': {}, 'full_name': { 'french', 'whitespace', 'integers', 'n_grams' } } ref.create_index(ref_path, columns_to_index, force=False) # Link index_name = proj.metadata['files']['ref']['project_id'] query_template = (('must', 'commune', 'localite_acheminement_uai', '.french', 1), ('must', 'lycees_sources', 'full_name', '.french', 1)) threshold = 3.5 must = {'full_name': ['lycee']} must_not = {'full_name': ['ass', 'association', 'sportive', 'foyer']} params=dict() params['index_name'] = index_name params['query_template'] = query_template params['thresh'] = threshold params['must'] = must params['must_not'] = must_not proj.linker('es_linker', None, params) proj.write_data() import pprint pprint.pprint(proj.metadata)
mit
7,818,758,024,777,220,000
37.502604
140
0.533345
false
vlegoff/tsunami
src/debug.py
1
2317
# -*-coding:Utf-8 -* # Copyright (c) 2010-2017 LE GOFF Vincent # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT # OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """Script de debug (appeler avec kassie.py -r debug.py). Ce script peut être utilisé au lancement de Kassie pour générer une sauvegarde alégée (sans joueurs ni comptes ni objets). Les prototypes d'objets sont conservés ainsi que les salles et autres informations. """ for compte in list(importeur.connex.comptes.values()): if compte.nom.lower() not in ("admin", "systeme", "sunami"): importeur.connex.supprimer_compte(compte) importeur.connex.comptes["sunami"].changer_mot_de_passe("azerty") for salle in importeur.salle.salles.values(): if "neige" in salle.affections: salle.affections["neige"].duree = 1 for objet in list(importeur.objet.objets.values()): importeur.objet.supprimer_objet(objet.identifiant)
bsd-3-clause
6,796,149,646,355,495,000
44.294118
79
0.767532
false
mlperf/training_results_v0.6
NVIDIA/benchmarks/gnmt/implementations/pytorch/setup.py
1
1082
from setuptools import setup, find_packages from torch.utils.cpp_extension import BuildExtension, CUDAExtension import sys if sys.version_info < (3,): sys.exit('Sorry, Python3 is required for gnmt.') with open('requirements.txt') as f: reqs = f.read() extra_cuda_compile_args = { 'cxx': ['-O2', ], 'nvcc': ['--gpu-architecture=sm_70', ] } cat_utils = CUDAExtension( name='seq2seq.pack_utils._C', sources=[ 'seq2seq/csrc/pack_utils.cpp', 'seq2seq/csrc/pack_utils_kernel.cu' ], extra_compile_args=extra_cuda_compile_args ) attn_score = CUDAExtension( name='seq2seq.attn_score._C', sources=[ 'seq2seq/csrc/attn_score_cuda.cpp', 'seq2seq/csrc/attn_score_cuda_kernel.cu', ], extra_compile_args=extra_cuda_compile_args ) setup( name='gnmt', version='0.6.0', description='GNMT', install_requires=reqs.strip().split('\n'), packages=find_packages(), ext_modules=[cat_utils, attn_score], cmdclass={ 'build_ext': BuildExtension }, test_suite='tests', )
apache-2.0
-957,689,414,757,461,600
24.162791
67
0.62939
false