prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>color.rs<|end_file_name|><|fim▁begin|>use crate::color::{self, Component, IntoLinSrgba, LinSrgba};
use crate::math::num_traits::Float;
/// A **Srgba** type with the default Scalar.
pub type DefaultSrgba = color::Srgba<color::DefaultScalar>;
/// A **LinSrgba** type with the default Scalar.
pub type DefaultLinSrgba = color::LinSrgba<color::DefaultScalar>;
/// Nodes that support setting colors.
pub trait SetColor<S>: Sized
where
S: Component,<|fim▁hole|> /// Specify a color.
///
/// This method supports any color type that can be converted into RGBA.
///
/// Colors that have no alpha channel will be given an opaque alpha channel value `1.0`.
fn color<C>(mut self, color: C) -> Self
where
C: IntoLinSrgba<S>,
{
*self.rgba_mut() = Some(color.into_lin_srgba());
self
}
/// Specify the color via red, green and blue channels.
fn rgb<T>(self, r: T, g: T, b: T) -> Self
where
T: Component,
S: Float,
{
self.color(color::Srgb::new(r, g, b))
}
/// Specify the color via red, green and blue channels as bytes
fn rgb8(self, r: u8, g: u8, b: u8) -> Self
where
S: Float,
{
self.color(color::Srgb::<u8>::new(r, g, b))
}
/// Specify the color via red, green, blue and alpha channels.
fn rgba<T>(self, r: T, g: T, b: T, a: T) -> Self
where
T: Component,
S: Float,
{
self.color(color::Srgba::new(r, g, b, a))
}
/// Specify the color via red, green, blue and alpha channels as bytes
fn rgba8(self, r: u8, g: u8, b: u8, a: u8) -> Self
where
S: Float,
{
self.color(color::Srgba::<u8>::new(r, g, b, a))
}
/// Specify the color via hue, saturation and luminance.
///
/// If you're looking for HSV or HSB, use the `hsv` method instead.
///
/// The given hue expects a value between `0.0` and `1.0` where `0.0` is 0 degress and `1.0` is
/// 360 degrees (or 2 PI radians).
///
/// See the [wikipedia entry](https://en.wikipedia.org/wiki/HSL_and_HSV) for more details on
/// this color space.
fn hsl(self, h: S, s: S, l: S) -> Self
where
S: Float + Into<color::RgbHue<S>>,
{
let hue = color::RgbHue::from_degrees(h * S::from(360.0).unwrap());
self.color(color::Hsl::new(hue, s, l))
}
/// Specify the color via hue, saturation, luminance and an alpha channel.
///
/// If you're looking for HSVA or HSBA, use the `hsva` method instead.
///
/// The given hue expects a value between `0.0` and `1.0` where `0.0` is 0 degress and `1.0` is
/// 360 degrees (or 2 PI radians).
///
/// See the [wikipedia entry](https://en.wikipedia.org/wiki/HSL_and_HSV) for more details on
/// this color space.
fn hsla(self, h: S, s: S, l: S, a: S) -> Self
where
S: Float + Into<color::RgbHue<S>>,
{
let hue = color::RgbHue::from_degrees(h * S::from(360.0).unwrap());
self.color(color::Hsla::new(hue, s, l, a))
}
/// Specify the color via hue, saturation and *value* (brightness).
///
/// This is sometimes also known as "hsb".
///
/// The given hue expects a value between `0.0` and `1.0` where `0.0` is 0 degress and `1.0` is
/// 360 degrees (or 2 PI radians).
///
/// See the [wikipedia entry](https://en.wikipedia.org/wiki/HSL_and_HSV) for more details on
/// this color space.
fn hsv(self, h: S, s: S, v: S) -> Self
where
S: Float,
{
let hue = color::RgbHue::from_degrees(h * S::from(360.0).unwrap());
self.color(color::Hsv::new(hue, s, v))
}
/// Specify the color via hue, saturation, *value* (brightness) and an alpha channel.
///
/// This is sometimes also known as "hsba".
///
/// The given hue expects a value between `0.0` and `1.0` where `0.0` is 0 degress and `1.0` is
/// 360 degrees (or 2 PI radians).
///
/// See the [wikipedia entry](https://en.wikipedia.org/wiki/HSL_and_HSV) for more details on
/// this color space.
fn hsva(self, h: S, s: S, v: S, a: S) -> Self
where
S: Float,
{
let hue = color::RgbHue::from_degrees(h * S::from(360.0).unwrap());
self.color(color::Hsva::new(hue, s, v, a))
}
/// Specify the color as gray scale
///
/// The given g expects a value between `0.0` and `1.0` where `0.0` is black and `1.0` is white
fn gray<T>(self, g: T) -> Self
where
T: Component,
S: Float,
{
self.color(color::Srgb::new(g, g, g))
}
}
impl<S> SetColor<S> for Option<LinSrgba<S>>
where
S: Component,
{
fn rgba_mut(&mut self) -> &mut Option<LinSrgba<S>> {
self
}
}<|fim▁end|> | {
/// Provide a mutable reference to the RGBA field which can be used for setting colors.
fn rgba_mut(&mut self) -> &mut Option<LinSrgba<S>>;
|
<|file_name|>db.go<|end_file_name|><|fim▁begin|>package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"sort"
"strings"
"golang.org/x/crypto/openpgp"
)
//
// Password keeps login, password and other
// this should be called Entry.
//
type Password struct {
Key string
Login string
Password string
Description string
}
//
// Database holds entries and metainfo
//
type Database struct {
FileName string
Password string
Entries map[string]Password
}
//
// New crates a database instance. It doesn't open/create a file.
//
func New(fileName, password string) *Database {
database = &Database{
FileName: fileName,
Password: password,
Entries: make(map[string]Password),
}
return database
}
//
// Load reads a file.
//
func (d *Database) Load() (err error) {
f, err := os.Open(d.FileName)
defer f.Close()
if os.IsNotExist(err) {
return err
}
if err != nil {
return fmt.Errorf("couldn't open db file %s: %s", d.FileName, err)
}
// FIXME that's weird solution
var tries int
promptFunction := func(keys []openpgp.Key, symmetric bool) ([]byte, error) {
if tries > 0 {
return nil, fmt.Errorf("invalid password")
}
tries++
return []byte(d.Password), nil
}
md, err := openpgp.ReadMessage(f, nil, promptFunction, nil)
if err != nil {
return fmt.Errorf("decryption failed: %s ", err)
}
bytes, err := ioutil.ReadAll(md.UnverifiedBody)
if err != nil {
return fmt.Errorf("reading decrypted message: %s", err)
}
fmt.Printf("Loading %s\n", d.FileName)
err = json.Unmarshal(bytes, &d.Entries)
if err != nil {
return fmt.Errorf("unmarshalling failed: %s", err)
}
fmt.Printf("Got %d passwords\n", len(d.Entries))
return nil
}
//
// Save stores database into a file.
//
func (d *Database) Save() error {
blob, err := json.MarshalIndent(&d.Entries, "", " ")
if err != nil {
return fmt.Errorf("marshalling failed: %s", err)
}
f, err := os.Create(d.FileName)
defer f.Close()
if err != nil {
return fmt.Errorf("creating '%s' failed: %s", d.FileName, err)
}
writer, err := openpgp.SymmetricallyEncrypt(f, []byte(d.Password), nil, nil)
if err != nil {
return fmt.Errorf("encryption failed: %s", err)
}
_, err = writer.Write(blob)
if err != nil {
return fmt.Errorf("writing %s failed: %s", d.FileName, err)
}
writer.Close()
fmt.Printf("Saved %s\n", d.FileName)
return nil
}
//
// Add add entry to a database.
//
func (d *Database) Add(key, login, pass, description string) {
var p Password
p.Key = key
p.Login = login
p.Password = pass
p.Description = description
d.Entries[key] = p
}
//
// Delete deletes entry.
//
func (d *Database) Delete(key string) {
delete(d.Entries, key)
}
<|fim▁hole|>func (d *Database) Get(key string) *Password {
p, exists := d.Entries[key]
if exists {
return &p
}
return nil
}
func searchMatch(pass Password, q string) bool {
if q == "" {
return true
}
return strings.Contains(pass.Login, q) ||
strings.Contains(pass.Description, q)
}
type byName []Password
func (s byName) Len() int { return len(s) }
func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s byName) Less(i, j int) bool { return s[i].Key < s[j].Key }
//
// Search returns entries matching query.
//
func (d *Database) Search(q string) []Password {
var res = []Password{}
for _, pass := range d.Entries {
if searchMatch(pass, q) {
res = append(res, pass)
}
}
sort.Sort(byName(res))
return res
}<|fim▁end|> | //
// Get retrieves entry if any otherwise returns nil.
// |
<|file_name|>environment.py<|end_file_name|><|fim▁begin|>from steps.bdd_test_util import cli_call
def after_scenario(context, scenario):
if 'doNotDecompose' in scenario.tags:
print("Not going to decompose after scenario {0}, with yaml '{1}'".format(scenario.name, context.compose_yaml))
else:
if 'compose_yaml' in context:
print("Decomposing with yaml '{0}' after scenario {1}, ".format(context.compose_yaml, scenario.name))
context.compose_output, context.compose_error, context.compose_returncode = \
cli_call(context, ["docker-compose", "-f", context.compose_yaml, "kill"], expect_success=True)
context.compose_output, context.compose_error, context.compose_returncode = \
cli_call(context, ["docker-compose", "-f", context.compose_yaml, "rm","-f"], expect_success=True)
# now remove any other containers (chaincodes)
context.compose_output, context.compose_error, context.compose_returncode = \
cli_call(context, ["docker", "ps", "-qa"], expect_success=True)
if context.compose_returncode == 0:
# Remove each container
for containerId in context.compose_output.splitlines():
#print("docker rm {0}".format(containerId))<|fim▁hole|><|fim▁end|> | context.compose_output, context.compose_error, context.compose_returncode = \
cli_call(context, ["docker", "rm", containerId], expect_success=True) |
<|file_name|>plot_msg_minus_cosmo.py<|end_file_name|><|fim▁begin|>from __future__ import division
from __future__ import print_function
#!/usr/bin/python
# program to plot SEVIRI observations
# usage from command line
# $ python plot_msg.py
#
# pass arguments to overwrite time, rgb, area given in the script
# $ python plot_msg.py year month day hour min rgb area
# year month day hour min
# -> integers specifying the date of observation
# rgb -> string, e.g. RGBs='HRoverview' or
# string list, e.g. RGBs=['IR_108','IR_120-IR_108','HRoverview']
# for possible options have a look at __main__
# area -> string or string array, e.g. 'EuropeCanary' or 'ccs4' (default)
# for possible options have a look at the file area.def
# RSS -> logical (True or False) rapid scan service
# True -> 5min service for europe (default)
# False -> 15min service for whole disk
# verbose-> logical (True or False) activates verbose output
# True -> more messages for debugging (default)
# False -> quiet
#
# Author Ulrich Hamann
# History 2014-10-01 U. Hamann, first version
# 2014-10-28 U. Hamann, area can also be used as array
# 2014-02-10 U. Hamann, introduced input file
# 2015-02-25 U. Hamann, added the ability to plot
# NWC-SAF cloud mask and SPhR products
#
from mpop.satellites import GeostationaryFactory
from mpop.imageo.geo_image import GeoImage
#from mpop.imageo.palettes import cms_modified, convert_palette, convert_palette2colormap
from pycoast import ContourWriterAGG
from pydecorate import DecoratorAGG
from mpop.channel import Channel, GenericChannel
import aggdraw
from numpy import where, zeros
import numpy.ma as ma
from os.path import dirname, exists, join
from os import makedirs, chmod, stat
import subprocess
from mpop.projector import get_area_def
from copy import deepcopy
from PIL import Image
from trollimage.image import Image as trollimage
from PIL import ImageFont
from PIL import ImageDraw
from trollimage.colormap import rdbu, greys, rainbow, spectral
from my_composites import mask_clouddepth, get_image
from my_msg_module import get_last_SEVIRI_date, check_input, channel_str2ind
from my_msg_module import choose_msg, choose_area_loaded_msg, convert_NWCSAF_to_radiance_format, get_NWC_pge_name, format_name
from my_msg_module import check_loaded_channels
from postprocessing import postprocessing
import products
from datetime import datetime
from plot_msg import load_products, create_PIL_image, choose_map_resolution, save_reprojected_data, mask_data
from plot_msg import add_colorscale, add_title, indicate_mask, add_borders_and_rivers
from get_input_msg import parse_commandline_and_read_inputfile
import inspect
from mpop.utils import debug_on
debug_on()
try:
str
except NameError:
str = str
#----------------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------------
def plot_msg_minus_cosmo(in_msg):
# do statistics for the last full hour (minutes=0, seconds=0)
in_msg.datetime = datetime(in_msg.datetime.year, in_msg.datetime.month, in_msg.datetime.day, in_msg.datetime.hour, 0, 0)
area_loaded = choose_area_loaded_msg(in_msg.sat, in_msg.sat_nr, in_msg.datetime)
# define contour write for coasts, borders, rivers
cw = ContourWriterAGG(in_msg.mapDir)
# check if input data is complete
if in_msg.verbose:
print("*** check input data for ", in_msg.sat_str())
RGBs = check_input(in_msg, in_msg.sat_str(layout="%(sat)s")+in_msg.sat_nr_str(), in_msg.datetime)
# in_msg.sat_nr might be changed to backup satellite
if in_msg.verbose:
print('*** Create plots for ')
print(' Satellite/Sensor: ' + in_msg.sat_str())
print(' Satellite number: ' + in_msg.sat_nr_str() +' // ' +str(in_msg.sat_nr))
print(' Satellite instrument: ' + in_msg.instrument)
print(' Date/Time: '+ str(in_msg.datetime))
print(' RGBs: ', in_msg.RGBs)
print(' Area: ', in_msg.areas)
print(' reader level: ', in_msg.reader_level)
# define satellite data object
#global_data = GeostationaryFactory.create_scene(in_msg.sat, in_msg.sat_nr_str(), "seviri", in_msg.datetime)
global_data = GeostationaryFactory.create_scene(in_msg.sat_str(), in_msg.sat_nr_str(), in_msg.instrument, in_msg.datetime)
# global_data = GeostationaryFactory.create_scene("msg-ot", "", "Overshooting_Tops", in_msg.datetime)
if len(RGBs) == 0 and len(in_msg.postprocessing_areas) == 0:
return RGBs
if in_msg.verbose:
print("*** load satellite channels for " + in_msg.sat_str()+in_msg.sat_nr_str()+" ", global_data.fullname)
# initialize processed RGBs
RGBs_done=[]
# -------------------------------------------------------------------
# load reflectivities, brightness temperatures, NWC-SAF products ...
# -------------------------------------------------------------------
area_loaded = load_products(global_data, RGBs, in_msg, area_loaded)<|fim▁hole|> print("... read COSMO input file: ", cosmo_input_file)
in_cosmo = parse_commandline_and_read_inputfile(input_file=cosmo_input_file)
# add composite
in_msg.scpOutput = True
in_msg.resize_montage = 70
in_msg.postprocessing_montage = [["MSG_IR-108cpc","COSMO_SYNMSG-BT-CL-IR10.8","MSG_IR-108-COSMO-minus-MSGpc"]]
in_msg.scpProducts = [["MSG_IR-108cpc","COSMO_SYNMSG-BT-CL-IR10.8","MSG_IR-108-COSMO-minus-MSGpc"]]
#in_msg.scpProducts = ["all"]
# define satellite data object
cosmo_data = GeostationaryFactory.create_scene(in_cosmo.sat_str(), in_cosmo.sat_nr_str(), in_cosmo.instrument, in_cosmo.datetime)
area_loaded_cosmo = load_products(cosmo_data, ['SYNMSG_BT_CL_IR10.8'], in_cosmo, area_loaded)
# preprojecting the data to another area
# --------------------------------------
if len(RGBs) > 0:
for area in in_msg.areas:
print("")
obj_area = get_area_def(area)
if area != 'ccs4':
print("*** WARNING, diff MSG-COSMO only implemented for ccs4")
continue
# reproject data to new area
print(area_loaded)
if obj_area == area_loaded:
if in_msg.verbose:
print("*** Use data for the area loaded: ", area)
#obj_area = area_loaded
data = global_data
resolution='l'
else:
if in_msg.verbose:
print("*** Reproject data to area: ", area, "(org projection: ", area_loaded.name, ")")
obj_area = get_area_def(area)
# PROJECT data to new area
data = global_data.project(area, precompute=True)
resolution='i'
if in_msg.parallax_correction:
loaded_products = [chn.name for chn in data.loaded_channels()]
if 'CTH' not in loaded_products:
print("*** Error in plot_msg ("+inspect.getfile(inspect.currentframe())+")")
print(" Cloud Top Height is needed for parallax correction ")
print(" either load CTH or specify the estimation of the CTH in the input file (load 10.8 in this case)")
quit()
if in_msg.verbose:
print(" perform parallax correction for loaded channels: ", loaded_products)
data = data.parallax_corr(fill=in_msg.parallax_gapfilling, estimate_cth=in_msg.estimate_cth, replace=True)
# save reprojected data
if area in in_msg.save_reprojected_data:
save_reprojected_data(data, area, in_msg)
# apply a mask to the data (switched off at the moment)
if False:
mask_data(data, area)
# save average values
if in_msg.save_statistics:
mean_array = zeros(len(RGBs))
#statisticFile = '/data/COALITION2/database/meteosat/ccs4/'+yearS+'/'+monthS+'/'+dayS+'/MSG_'+area+'_'+yearS[2:]+monthS+dayS+'.txt'
statisticFile = './'+yearS+'-'+monthS+'-'+dayS+'/MSG_'+area+'_'+yearS[2:]+monthS+dayS+'.txt'
if in_msg.verbose:
print("*** write statistics (average values) to "+statisticFile)
f1 = open(statisticFile,'a') # mode append
i_rgb=0
for rgb in RGBs:
if rgb in products.MSG_color:
mean_array[i_rgb]=data[rgb.replace("c","")].data.mean()
i_rgb=i_rgb+1
# create string to write
str2write = dateS +' '+hourS+' : '+minS+' UTC '
for mm in mean_array:
str2write = str2write+' '+ "%7.2f" % mm
str2write = str2write+"\n"
f1.write(str2write)
f1.close()
# creating plots/images
if in_msg.make_plots:
# choose map resolution
in_msg.resolution = choose_map_resolution(area, in_msg.mapResolution)
# define area
proj4_string = obj_area.proj4_string
# e.g. proj4_string = '+proj=geos +lon_0=0.0 +a=6378169.00 +b=6356583.80 +h=35785831.0'
area_extent = obj_area.area_extent
# e.g. area_extent = (-5570248.4773392612, -5567248.074173444, 5567248.074173444, 5570248.4773392612)
area_tuple = (proj4_string, area_extent)
RGBs=['IR_108-COSMO-minus-MSG']
print(data['IR_108'].data.shape)
print(cosmo_data['SYNMSG_BT_CL_IR10.8'].data.shape)
diff_MSG_COSMO = cosmo_data['SYNMSG_BT_CL_IR10.8'].data - data['IR_108'].data
HRV_enhance_str=''
# add IR difference as "channel object" to satellite regional "data" object
data.channels.append(Channel(name=RGBs[0],
wavelength_range=[0.,0.,0.],
resolution=data['IR_108'].resolution,
data = diff_MSG_COSMO) )
for rgb in RGBs:
if not check_loaded_channels(rgb, data):
continue
PIL_image = create_PIL_image(rgb, data, in_msg, obj_area=obj_area)
# !!! in_msg.colorbar[rgb] is initialized inside (give attention to rgbs) !!!
add_borders_and_rivers(PIL_image, cw, area_tuple,
add_borders=in_msg.add_borders, border_color=in_msg.border_color,
add_rivers=in_msg.add_rivers, river_color=in_msg.river_color,
resolution=in_msg.resolution, verbose=in_msg.verbose)
# indicate mask
if in_msg.indicate_mask:
PIL_image = indicate_mask(rgb, PIL_image, data, in_msg.verbose)
#if area.find("EuropeCanary") != -1 or area.find("ccs4") != -1:
dc = DecoratorAGG(PIL_image)
# add title to image
if in_msg.add_title:
add_title(PIL_image, in_msg.title, HRV_enhance_str+rgb, in_msg.sat_str(), data.sat_nr(), in_msg.datetime, area, dc, in_msg.font_file, in_msg.verbose,
title_color=in_msg.title_color, title_y_line_nr=in_msg.title_y_line_nr ) # !!! needs change
# add MeteoSwiss and Pytroll logo
if in_msg.add_logos:
if in_msg.verbose:
print('... add logos')
dc.align_right()
if in_msg.add_colorscale:
dc.write_vertically()
if PIL_image.mode != 'L':
height = 60 # height=60.0 normal resolution
dc.add_logo(in_msg.logos_dir+"/pytroll3.jpg",height=height) # height=60.0
dc.add_logo(in_msg.logos_dir+"/meteoSwiss3.jpg",height=height)
dc.add_logo(in_msg.logos_dir+"/EUMETSAT_logo2_tiny_white_square.png",height=height) # height=60.0
# add colorscale
if in_msg.add_colorscale and in_msg.colormap[rgb] != None:
if rgb in products.MSG_color:
unit = data[rgb.replace("c","")].info['units']
#elif rgb in products.MSG or rgb in products.NWCSAF or rgb in products.HSAF:
# unit = data[rgb].info['units']
else:
unit = None
loaded_channels = [chn.name for chn in data.loaded_channels()]
if rgb in loaded_channels:
if hasattr(data[rgb], 'info'):
print(" hasattr(data[rgb], 'info')", list(data[rgb].info.keys()))
if 'units' in list(data[rgb].info.keys()):
print("'units' in data[rgb].info.keys()")
unit = data[rgb].info['units']
print("... units = ", unit)
add_colorscale(dc, rgb, in_msg, unit=unit)
if in_msg.parallax_correction:
parallax_correction_str='pc'
else:
parallax_correction_str=''
rgb+=parallax_correction_str
# create output filename
outputDir = format_name(in_msg.outputDir, data.time_slot, area=area, rgb=rgb, sat=data.satname, sat_nr=data.sat_nr()) # !!! needs change
outputFile = outputDir +"/"+ format_name(in_msg.outputFile, data.time_slot, area=area, rgb=rgb, sat=data.satname, sat_nr=data.sat_nr()) # !!! needs change
# check if output directory exists, if not create it
path= dirname(outputFile)
if not exists(path):
if in_msg.verbose:
print('... create output directory: ' + path)
makedirs(path)
# save file
if exists(outputFile) and not in_msg.overwrite:
if stat(outputFile).st_size > 0:
print('... outputFile '+outputFile+' already exists (keep old file)')
else:
print('*** Warning, outputFile'+outputFile+' already exists, but is empty (overwrite file)')
PIL_image.save(outputFile, optimize=True) # optimize -> minimize file size
chmod(outputFile, 0o777) ## FOR PYTHON3: 0o664 # give access read/write access to group members
else:
if in_msg.verbose:
print('... save final file: ' + outputFile)
PIL_image.save(outputFile, optimize=True) # optimize -> minimize file size
chmod(outputFile, 0o777) ## FOR PYTHON3: 0o664 # give access read/write access to group members
if in_msg.compress_to_8bit:
if in_msg.verbose:
print('... compress to 8 bit image: display '+outputFile.replace(".png","-fs8.png")+' &')
subprocess.call("/usr/bin/pngquant -force 256 "+outputFile+" 2>&1 &", shell=True) # 256 == "number of colors"
#if in_msg.verbose:
# print " add coastlines to "+outputFile
## alternative: reopen image and modify it (takes longer due to additional reading and saving)
#cw.add_rivers_to_file(img, area_tuple, level=5, outline='blue', width=0.5, outline_opacity=127)
#cw.add_coastlines_to_file(outputFile, obj_area, resolution=resolution, level=4)
#cw.add_borders_to_file(outputFile, obj_area, outline=outline, resolution=resolution)
# secure copy file to another place
if in_msg.scpOutput:
if (rgb in in_msg.scpProducts) or ('all' in [x.lower() for x in in_msg.scpProducts if type(x)==str]):
scpOutputDir = format_name (in_msg.scpOutputDir, data.time_slot, area=area, rgb=rgb, sat=data.satname, sat_nr=data.sat_nr() )
if in_msg.compress_to_8bit:
if in_msg.verbose:
print("... secure copy "+outputFile.replace(".png","-fs8.png")+ " to "+scpOutputDir)
subprocess.call("scp "+in_msg.scpID+" "+outputFile.replace(".png","-fs8.png")+" "+scpOutputDir+" 2>&1 &", shell=True)
else:
if in_msg.verbose:
print("... secure copy "+outputFile+ " to "+scpOutputDir)
subprocess.call("scp "+in_msg.scpID+" "+outputFile+" "+scpOutputDir+" 2>&1 &", shell=True)
if in_msg.scpOutput and in_msg.scpID2 != None and in_msg.scpOutputDir2 != None:
if (rgb in in_msg.scpProducts2) or ('all' in [x.lower() for x in in_msg.scpProducts2 if type(x)==str]):
scpOutputDir2 = format_name (in_msg.scpOutputDir2, data.time_slot, area=area, rgb=rgb, sat=data.satname, sat_nr=data.sat_nr() )
if in_msg.compress_to_8bit:
if in_msg.verbose:
print("... secure copy "+outputFile.replace(".png","-fs8.png")+ " to "+scpOutputDir2)
subprocess.call("scp "+in_msg.scpID2+" "+outputFile.replace(".png","-fs8.png")+" "+scpOutputDir2+" 2>&1 &", shell=True)
else:
if in_msg.verbose:
print("... secure copy "+outputFile+ " to "+scpOutputDir2)
subprocess.call("scp "+in_msg.scpID2+" "+outputFile+" "+scpOutputDir2+" 2>&1 &", shell=True)
if 'ninjotif' in in_msg.outputFormats:
ninjotif_file = format_name (outputDir+'/'+in_msg.ninjotifFilename, data.time_slot, sat_nr=data.sat_nr(), RSS=in_msg.RSS, area=area, rgb=rgb )
from plot_coalition2 import pilimage2geoimage
GEO_image = pilimage2geoimage(PIL_image, obj_area, data.time_slot)
GEO_image.save(ninjotif_file,
fformat='mpop.imageo.formats.ninjotiff',
ninjo_product_name=rgb, chan_id = products.ninjo_chan_id[rgb.replace("_","-")+"_"+area],
nbits=8)
chmod(ninjotif_file, 0o777)
print(("... save ninjotif image: display ", ninjotif_file, " &"))
if rgb not in RGBs_done:
RGBs_done.append(rgb)
## start postprocessing
for area in in_msg.postprocessing_areas:
postprocessing(in_msg, global_data.time_slot, int(data.sat_nr()), area)
if in_msg.verbose:
print(" ")
return RGBs_done
#----------------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------------
# the main function get the command line arguments and start the function plot_msg
#----------------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------------
def print_usage():
print("*** ")
print("*** Error, not enough command line arguments")
print("*** please specify at least an input file")
print("*** possible calls are:")
print("*** python "+inspect.getfile(inspect.currentframe())+" input_MSG ")
print("*** python "+inspect.getfile(inspect.currentframe())+" input_MSG 2014 07 23 16 10 ")
print(" date and time must be completely given")
print("*** python "+inspect.getfile(inspect.currentframe())+" input_MSG 2014 07 23 16 10 'IR_108'")
print("*** python "+inspect.getfile(inspect.currentframe())+" input_MSG 2014 07 23 16 10 'IR_108' 'ccs4'")
print("*** python "+inspect.getfile(inspect.currentframe())+" input_MSG 2014 07 23 16 10 ['HRoverview','fog'] ['ccs4','euro4']")
print("*** ")
quit() # quit at this point
#----------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
in_msg = parse_commandline_and_read_inputfile(input_file="input_msg_cosmo_cronjob.py")
RGBs_done = plot_msg_minus_cosmo(in_msg)
print("*** Satellite pictures produced for ", RGBs_done)
print(" ")<|fim▁end|> |
cosmo_input_file="input_cosmo_cronjob.py" |
<|file_name|>classifier_mpdsvm_modular.py<|end_file_name|><|fim▁begin|>from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
label_traindat = lm.load_labels('../data/label_train_twoclass.dat')
parameter_list = [[traindat,testdat,label_traindat,1,1e-5],[traindat,testdat,label_traindat,0.9,1e-5]]<|fim▁hole|> from shogun.Kernel import GaussianKernel
from shogun.Classifier import MPDSVM
feats_train=RealFeatures(fm_train_real)
feats_test=RealFeatures(fm_test_real)
width=2.1
kernel=GaussianKernel(feats_train, feats_train, width)
labels=BinaryLabels(label_train_twoclass)
svm=MPDSVM(C, kernel, labels)
svm.set_epsilon(epsilon)
svm.train()
kernel.init(feats_train, feats_test)
svm.apply().get_labels()
predictions = svm.apply()
return predictions, svm, predictions.get_labels()
if __name__=='__main__':
print('MPDSVM')
classifier_mpdsvm_modular(*parameter_list[0])<|fim▁end|> |
def classifier_mpdsvm_modular (fm_train_real=traindat,fm_test_real=testdat,label_train_twoclass=label_traindat,C=1,epsilon=1e-5):
from shogun.Features import RealFeatures, BinaryLabels |
<|file_name|>no-arguments-on-generators.rs<|end_file_name|><|fim▁begin|>// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//<|fim▁hole|>// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(generators)]
fn main() {
let gen = |start| { //~ ERROR generators cannot have explicit arguments
yield;
};
}<|fim▁end|> | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your |
<|file_name|>render.py<|end_file_name|><|fim▁begin|>from dataclasses import dataclass
import io
import typing as T
import numpy as np
@dataclass(frozen=True)
class PolygonShape:
vertices: T.Tuple[T.Tuple[float]]
color: T.Text
@dataclass(frozen=True)
class Body:
x: float
y: float
angle: float<|fim▁hole|>@dataclass(frozen=True)
class Scene:
bodies: T.Tuple[Body]
bounds: T.Tuple[float] # left, right, top, bottom
width: int
def draw_shape(shape, out):
out.write('<path fill="{fill}" d="'.format(fill=shape.color))
dx, dy = shape.vertices[0]
out.write('M {} {}'.format(dx, dy))
for (dx, dy) in shape.vertices[1:]:
out.write(' L {} {}'.format(dx, dy))
out.write('"/>')
def draw_body(body, out):
out.write('<g transform="translate({x},{y}) rotate({angle})">'.format(
x=body.x, y=body.y, angle=body.angle * 180/np.pi,
))
for shape in body.shapes:
draw_shape(shape, out)
out.write('</g>')
def draw_scene(scene, out):
xmin, xmax, ymin, ymax = scene.bounds
height = (ymax-ymin)/(xmax-xmin) * scene.width
out.write('<svg viewBox="{viewbox}" width="{width}" height="{height}">'.format(
viewbox='{} {} {} {}'.format(xmin, ymin, xmax-xmin, ymax-ymin),
width=scene.width, height=height))
out.write('<g transform="scale(1,-1) translate(0, {dy})">'.format(dy=-(ymax+ymin)))
for body in scene.bodies:
draw_body(body, out)
out.write('</g></svg>')
def draw(scene):
out = io.StringIO()
draw_scene(scene, out)
return out.getvalue()<|fim▁end|> | shapes: T.Tuple[PolygonShape]
|
<|file_name|>test_command.py<|end_file_name|><|fim▁begin|># Copyright 2014 Google Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from caniusepython3 import command
from caniusepython3.test import unittest, skip_pypi_timeouts
from distutils import dist
def make_command(requires):
return command.Command(dist.Distribution(requires))
class RequiresTests(unittest.TestCase):
def verify_cmd(self, requirements):
requires = {requirements: ['pip']}
cmd = make_command(requires)
got = cmd._dependencies()
self.assertEqual(frozenset(got), frozenset(['pip']))
return cmd
def test_install_requires(self):
self.verify_cmd('install_requires')
def test_tests_require(self):
self.verify_cmd('tests_require')
def test_extras_require(self):
cmd = make_command({'extras_require': {'testing': ['pip']}})
got = frozenset(cmd._dependencies())
self.assertEqual(got, frozenset(['pip']))
class OptionsTests(unittest.TestCase):
def test_finalize_options(self):<|fim▁hole|> # Don't expect anything to happen.
make_command({}).finalize_options()
class NetworkTests(unittest.TestCase):
@skip_pypi_timeouts
def test_run(self):
make_command({'install_requires': ['pip']}).run()<|fim▁end|> | |
<|file_name|>money_transfere.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils.data import flt, nowdate, getdate, cint
class MoneyTransfere(Document):
def on_submit(self):
self.validate_transfere()
def validate(self):
self.get_dummy_accounts()
def get_dummy_accounts(self):
dummy_to = frappe.db.get_values("Account", {"name": "حساب استلام من"+" - "+self.from_company + " - "+self.abbr_to,
"company": self.to_company,
"parent_account":"حساب استلام من"+" - "+self.abbr_to })
self.dummy_to=dummy_to[0][0]
<|fim▁hole|>
def before_cancel(self):
pe = frappe.get_value("Payment Entry", filters = {"transfere_reference": self.name}, fieldname = "name")
if pe:
pe_doc = frappe.get_doc("Payment Entry", pe)
pe_doc.cancel()
je = frappe.get_value("Journal Entry Account", filters = {"reference_name": self.name}, fieldname = "parent")
if je:
je_doc = frappe.get_doc("Journal Entry", je)
je_doc.cancel()
def validate_transfere(self):
if self.from_company != self.to_company:
# sending_account = "حساب ارسال الى " + self.to_company
# receiving_account = "حساب استلام من " + self.from_company
# self.add_account_for_company(sending_account, self.to_company, "Liability")
# self.add_account_for_company(receiving_account, self.from_company, "Expense")
self.add_payment_entry(self.from_account, self.dummy_from, self.from_company)
self.add_journal_entry(self.to_account,self.dummy_to, self.to_company)
else:
self.add_payment_entry(self.from_account, self.to_account, self.from_company)
def add_account_for_company(self, account, company, r_type):
pass
# pacc_name = ""
# if r_type == "Expense":
# pacc_name = "حساب ارسال - E"
# elif r_type == "Liability":
# pacc_name = "حساب استقبال - o"
# # if not frappe.db.exists("Account", pacc_name):
# # pacc = frappe.new_doc("Account")
# # pacc.account_name = pacc_name
# # pacc.root_type = r_type
# # pacc.is_group = 1
# # pacc.parent_account = ""
# # pacc.company = company
# # pacc.flags.ignore_validate = True
# # pacc.insert()
# if not frappe.db.exists("Account", account):
# acc = frappe.new_doc("Account")
# acc.account_name = account
# acc.company = company
# acc.parent_account = pacc_name
# acc.is_group = 0
# acc.insert()
def add_payment_entry(self, paid_from, paid_to, company):
pe = frappe.new_doc("Payment Entry")
pe.payment_type = "Internal Transfer"
pe.company = company
pe.paid_from = paid_from
pe.paid_to = paid_to
pe.paid_amount = self.transfered_amount
pe.received_amount = self.transfered_amount
pe.posting_date = nowdate()
pe.mode_of_payment = self.mode_of_payment
pe.transfere_reference = self.name
pe.insert()
pe.submit()
# pe.setup_party_account_field()
# pe.set_missing_values()
# pe.set_exchange_rate()
# pe.set_amounts()
# self.assertEquals(pe.difference_amount, 500)
# pe.append("deductions", {
# "account": "_Test Exchange Gain/Loss - _TC",
# "cost_center": "_Test Cost Center - _TC",
# "amount": 500
# })
def add_journal_entry(self, account1, account2, company):
default_cost = frappe.get_value("Company", filters = {"name":company}, fieldname = "cost_center")
jv = frappe.new_doc("Journal Entry")
jv.posting_date = nowdate()
jv.company = company
jv.voucher_type = "Opening Entry"
jv.set("accounts", [
{
"account": account2,
"credit_in_account_currency": self.transfered_amount,
"cost_center": default_cost,
"reference_type": "Money Transfere",
"reference_name": self.name
}, {
"account": account1,
"debit_in_account_currency": self.transfered_amount,
"cost_center": default_cost,
"reference_type": "Money Transfere",
"reference_name": self.name
}
])
jv.insert()
jv.submit()<|fim▁end|> | dummy_from = frappe.db.get_values("Account", {"name": "حساب ارسال الي"+" - "+self.to_company + " - "+self.abbr,
"company": self.from_company,
"parent_account":"حساب ارسال"+" - "+self.abbr })
self.dummy_from=dummy_from[0][0] |
<|file_name|>netlist.py<|end_file_name|><|fim▁begin|>import gdsfactory as gf
yaml = """<|fim▁hole|> settings:
width_mmi: 4.5
length_mmi: 10
mmi2:
component: mmi1x2
settings:
width_mmi: 4.5
length_mmi: 5
straight:
component: straight
placements:
mmi2:
x: 100
mirror: True
straight:
x: 40
y: 40
routes:
route_name1:
links:
mmi1,o3: mmi2,o3
route_name2:
links:
mmi1,o2: straight,o1
route_name3:
links:
mmi2,o2: straight,o2
ports:
o1: mmi2,o1
o2: mmi2,o1
"""
if __name__ == "__main__":
"""FIXME"""
mzi = gf.read.from_yaml(yaml)
n = mzi.get_netlist()
# mzi.show()
# mzi.plot()<|fim▁end|> | instances:
mmi1:
component: mmi1x2 |
<|file_name|>admin_practitioners.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
## ##
## Copyright 2010-2012, Neil Wallace <[email protected]> ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###############################################################################
'''
Provides schema and insert queries for the practitioner table
information about the practitioners (dentists hygienists etc..)
'''
from lib_openmolar.common.db_orm import InsertableRecord
TABLENAME = "practitioners"
<|fim▁hole|>class DemoGenerator(object):
def __init__(self, database=None):
self.length = 4
self.record = InsertableRecord(database, TABLENAME)
self.record.remove(self.record.indexOf("time_stamp"))
def demo_queries(self):
'''
return a list of queries to populate a demo database
'''
## practitioner 1
self.record.setValue('user_id', 1)
self.record.setValue('type',"dentist")
self.record.setValue('status', "active")
self.record.setValue('modified_by', "demo_installer")
yield self.record.insert_query
self.record.clearValues()
## practitioner 2
self.record.setValue('user_id', 2)
self.record.setValue('type',"dentist")
self.record.setValue('status', "active")
self.record.setValue('modified_by', "demo_installer")
yield self.record.insert_query
self.record.clearValues()
## practitioner 3
self.record.setValue('user_id', 3)
self.record.setValue('type',"dentist")
self.record.setValue('speciality', 'Orthodontics')
self.record.setValue('status', "active")
self.record.setValue('modified_by', "demo_installer")
yield self.record.insert_query
self.record.clearValues()
## practitioner 4
self.record.setValue('user_id', 4)
self.record.setValue('type',"hygienist")
self.record.setValue('status', "active")
self.record.setValue('modified_by', "demo_installer")
yield self.record.insert_query
if __name__ == "__main__":
from lib_openmolar.admin.connect import DemoAdminConnection
sc = DemoAdminConnection()
sc.connect()
builder = DemoGenerator(sc)
print builder.demo_queries()<|fim▁end|> | |
<|file_name|>script.js<|end_file_name|><|fim▁begin|>// Scroll to the very bottom to see the stuff we wrote, the big giant blocks are:
// froogaloop
// and
// fitvid.js
;(function( $ ){
'use strict';
$.fn.fitVids = function( options ) {
var settings = {
customSelector: null,
ignore: null
};
if(!document.getElementById('fit-vids-style')) {
// appendStyles: https://github.com/toddmotto/fluidvids/blob/master/dist/fluidvids.js
var head = document.head || document.getElementsByTagName('head')[0];
var css = '.fluid-width-video-wrapper{width:100%;position:relative;padding:0;}.fluid-width-video-wrapper iframe,.fluid-width-video-wrapper object,.fluid-width-video-wrapper embed {position:absolute;top:0;left:0;width:100%;height:100%;}';
var div = document.createElement("div");
div.innerHTML = '<p>x</p><style id="fit-vids-style">' + css + '</style>';
head.appendChild(div.childNodes[1]);
}
if ( options ) {
$.extend( settings, options );
}
return this.each(function(){
var selectors = [
'iframe[src*="player.vimeo.com"]',
'iframe[src*="youtube.com"]',
'iframe[src*="youtube-nocookie.com"]',
'iframe[src*="kickstarter.com"][src*="video.html"]',
'object',
'embed'
];
if (settings.customSelector) {
selectors.push(settings.customSelector);
}
var ignoreList = '.fitvidsignore';
if(settings.ignore) {
ignoreList = ignoreList + ', ' + settings.ignore;
}
var $allVideos = $(this).find(selectors.join(','));
$allVideos = $allVideos.not('object object'); // SwfObj conflict patch
$allVideos = $allVideos.not(ignoreList); // Disable FitVids on this video.
$allVideos.each(function(count){
var $this = $(this);
if($this.parents(ignoreList).length > 0) {
return; // Disable FitVids on this video.
}
if (this.tagName.toLowerCase() === 'embed' && $this.parent('object').length || $this.parent('.fluid-width-video-wrapper').length) { return; }
if ((!$this.css('height') && !$this.css('width')) && (isNaN($this.attr('height')) || isNaN($this.attr('width'))))
{
$this.attr('height', 9);
$this.attr('width', 16);
}
var height = ( this.tagName.toLowerCase() === 'object' || ($this.attr('height') && !isNaN(parseInt($this.attr('height'), 10))) ) ? parseInt($this.attr('height'), 10) : $this.height(),
width = !isNaN(parseInt($this.attr('width'), 10)) ? parseInt($this.attr('width'), 10) : $this.width(),
aspectRatio = height / width;<|fim▁hole|> var videoID = 'fitvid' + count;
$this.attr('id', videoID);
}
$this.wrap('<div class="fluid-width-video-wrapper"></div>').parent('.fluid-width-video-wrapper').css('padding-top', (aspectRatio * 100)+'%');
$this.removeAttr('height').removeAttr('width');
});
});
};
// Works with either jQuery or Zepto
})( window.jQuery || window.Zepto );
// Init style shamelessly stolen from jQuery http://jquery.com
var Froogaloop = (function(){
// Define a local copy of Froogaloop
function Froogaloop(iframe) {
console.log('hello');
// The Froogaloop object is actually just the init constructor
return new Froogaloop.fn.init(iframe);
}
var eventCallbacks = {},
hasWindowEvent = false,
isReady = false,
slice = Array.prototype.slice,
playerDomain = '';
Froogaloop.fn = Froogaloop.prototype = {
element: null,
init: function(iframe) {
console.log('test');
if (typeof iframe === "string") {
iframe = document.getElementById(iframe);
}
this.element = iframe;
console.log(this.element);
// Register message event listeners
playerDomain = getDomainFromUrl(this.element.getAttribute('src'));
return this;
},
/*
* Calls a function to act upon the player.
*
* @param {string} method The name of the Javascript API method to call. Eg: 'play'.
* @param {Array|Function} valueOrCallback params Array of parameters to pass when calling an API method
* or callback function when the method returns a value.
*/
api: function(method, valueOrCallback) {
if (!this.element || !method) {
return false;
}
var self = this,
element = self.element,
target_id = element.id !== '' ? element.id : null,
params = !isFunction(valueOrCallback) ? valueOrCallback : null,
callback = isFunction(valueOrCallback) ? valueOrCallback : null;
// Store the callback for get functions
if (callback) {
storeCallback(method, callback, target_id);
}
postMessage(method, params, element);
return self;
},
/*
* Registers an event listener and a callback function that gets called when the event fires.
*
* @param eventName (String): Name of the event to listen for.
* @param callback (Function): Function that should be called when the event fires.
*/
addEvent: function(eventName, callback) {
if (!this.element) {
return false;
}
var self = this,
element = self.element,
target_id = element.id !== '' ? element.id : null;
storeCallback(eventName, callback, target_id);
// The ready event is not registered via postMessage. It fires regardless.
if (eventName != 'ready') {
postMessage('addEventListener', eventName, element);
}
else if (eventName == 'ready' && isReady) {
callback.call(null, target_id);
}
return self;
},
/*
* Unregisters an event listener that gets called when the event fires.
*
* @param eventName (String): Name of the event to stop listening for.
*/
removeEvent: function(eventName) {
if (!this.element) {
return false;
}
var self = this,
element = self.element,
target_id = element.id !== '' ? element.id : null,
removed = removeCallback(eventName, target_id);
// The ready event is not registered
if (eventName != 'ready' && removed) {
postMessage('removeEventListener', eventName, element);
}
}
};
/**
* Handles posting a message to the parent window.
*
* @param method (String): name of the method to call inside the player. For api calls
* this is the name of the api method (api_play or api_pause) while for events this method
* is api_addEventListener.
* @param params (Object or Array): List of parameters to submit to the method. Can be either
* a single param or an array list of parameters.
* @param target (HTMLElement): Target iframe to post the message to.
*/
function postMessage(method, params, target) {
if (!target.contentWindow.postMessage) {
return false;
}
var url = target.getAttribute('src').split('?')[0],
data = JSON.stringify({
method: method,
value: params
});
if (url.substr(0, 2) === '//') {
url = window.location.protocol + url;
}
target.contentWindow.postMessage(data, url);
}
/**
* Event that fires whenever the window receives a message from its parent
* via window.postMessage.
*/
function onMessageReceived(event) {
var data, method;
try {
data = JSON.parse(event.data);
method = data.event || data.method;
}
catch(e) {
//fail silently... like a ninja!
}
if (method == 'ready' && !isReady) {
isReady = true;
}
// Handles messages from moogaloop only
if (event.origin != playerDomain) {
return false;
}
var value = data.value,
eventData = data.data,
target_id = target_id === '' ? null : data.player_id,
callback = getCallback(method, target_id),
params = [];
if (!callback) {
return false;
}
if (value !== undefined) {
params.push(value);
}
if (eventData) {
params.push(eventData);
}
if (target_id) {
params.push(target_id);
}
return params.length > 0 ? callback.apply(null, params) : callback.call();
}
/**
* Stores submitted callbacks for each iframe being tracked and each
* event for that iframe.
*
* @param eventName (String): Name of the event. Eg. api_onPlay
* @param callback (Function): Function that should get executed when the
* event is fired.
* @param target_id (String) [Optional]: If handling more than one iframe then
* it stores the different callbacks for different iframes based on the iframe's
* id.
*/
function storeCallback(eventName, callback, target_id) {
if (target_id) {
if (!eventCallbacks[target_id]) {
eventCallbacks[target_id] = {};
}
eventCallbacks[target_id][eventName] = callback;
}
else {
eventCallbacks[eventName] = callback;
}
}
/**
* Retrieves stored callbacks.
*/
function getCallback(eventName, target_id) {
if (target_id) {
return eventCallbacks[target_id][eventName];
}
else {
return eventCallbacks[eventName];
}
}
function removeCallback(eventName, target_id) {
if (target_id && eventCallbacks[target_id]) {
if (!eventCallbacks[target_id][eventName]) {
return false;
}
eventCallbacks[target_id][eventName] = null;
}
else {
if (!eventCallbacks[eventName]) {
return false;
}
eventCallbacks[eventName] = null;
}
return true;
}
/**
* Returns a domain's root domain.
* Eg. returns http://vimeo.com when http://vimeo.com/channels is sbumitted
*
* @param url (String): Url to test against.
* @return url (String): Root domain of submitted url
*/
function getDomainFromUrl(url) {
if (url.substr(0, 2) === '//') {
url = window.location.protocol + url;
}
var url_pieces = url.split('/'),
domain_str = '';
for(var i = 0, length = url_pieces.length; i < length; i++) {
if(i<3) {domain_str += url_pieces[i];}
else {break;}
if(i<2) {domain_str += '/';}
}
return domain_str;
}
function isFunction(obj) {
return !!(obj && obj.constructor && obj.call && obj.apply);
}
function isArray(obj) {
return toString.call(obj) === '[object Array]';
}
// Give the init function the Froogaloop prototype for later instantiation
Froogaloop.fn.init.prototype = Froogaloop.fn;
// Listens for the message event.
// W3C
if (window.addEventListener) {
window.addEventListener('message', onMessageReceived, false);
}
// IE
else {
window.attachEvent('onmessage', onMessageReceived);
}
// Expose froogaloop to the global object
return (window.Froogaloop = window.$f = Froogaloop);
})();<|fim▁end|> | if(!$this.attr('id')){ |
<|file_name|>report.py<|end_file_name|><|fim▁begin|>"""
report.py
Functions to create various reports.
project : pf
version : 0.0.0
status : development
modifydate :
createdate :
website : https://github.com/tmthydvnprt/pf
author : tmthydvnprt
email : [email protected]
maintainer : tmthydvnprt
license : MIT
copyright : Copyright 2016, tmthydvnprt<|fim▁hole|>
"""<|fim▁end|> | credits : |
<|file_name|>LayoutBase.js<|end_file_name|><|fim▁begin|>/**
* LayoutBase
* A Mithril component Base class for Layouts, e.g. HorizontalLayout and
* CircosLayout.
*/
import {Bounds} from '../../model/Bounds';
export class LayoutBase {
// constructor() - prefer do not use in mithril components
/**
* mithril lifecycle callback
* @param vnode
*/<|fim▁hole|> }
/**
* mithril lifecycle method
* @param vnode
*/
oncreate(vnode) {
// save a reference to this component's dom element
this.el = vnode.dom;
this.bounds = new Bounds(vnode.dom.getBoundingClientRect());
}
/**
* mithril lifecycle method
* @param vnode
*/
onupdate(vnode) {
this.bounds = new Bounds(vnode.dom.getBoundingClientRect());
}
}<|fim▁end|> |
oninit(vnode) {
this.appState = vnode.attrs.appState; |
<|file_name|>Base_Logging.py<|end_file_name|><|fim▁begin|>"""<|fim▁hole|>import pytest,logging
from loguru import logger
from pytest_reportportal import RPLogger, RPLogHandler
class Base_Logging():
"A plug-n-play class for logging"
def __init__(self,log_file_name=None,level="DEBUG",format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {module} | {message}"):
"Constructor for the logging class"
self.log_file_name=log_file_name
self.log_file_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','log'))
self.level=level
self.format=format
self.log = self.set_log(self.log_file_name,self.level,self.format)
self.rp_logger = None
def set_log(self,log_file_name,level,format,test_module_name=None):
"Add an handler sending log messages to a sink"
if test_module_name is None:
test_module_name = self.get_calling_module()
if not os.path.exists(self.log_file_dir):
os.makedirs(self.log_file_dir)
if log_file_name is None:
log_file_name = self.log_file_dir + os.sep + test_module_name + '.log'
else:
log_file_name = self.log_file_dir + os.sep + log_file_name
logger.add(log_file_name,level=level,format=format,
rotation="30 days", filter=None, colorize=None, serialize=False, backtrace=True, enqueue=False, catch=True)
def get_calling_module(self):
"Get the name of the calling module"
calling_file = inspect.stack()[-1][1]
if 'runpy' in calling_file:
calling_file = inspect.stack()[4][1]
calling_filename = calling_file.split(os.sep)
#This logic bought to you by windows + cygwin + git bash
if len(calling_filename) == 1: #Needed for
calling_filename = calling_file.split('/')
self.calling_module = calling_filename[-1].split('.')[0]
return self.calling_module
def setup_rp_logging(self, rp_pytest_service):
"Setup reportportal logging"
try:
# Setting up a logging.
logging.setLoggerClass(RPLogger)
self.rp_logger = logging.getLogger(__name__)
self.rp_logger.setLevel(logging.INFO)
# Create handler for Report Portal.
rp_handler = RPLogHandler(rp_pytest_service)
# Set INFO level for Report Portal handler.
rp_handler.setLevel(logging.INFO)
return self.rp_logger
except Exception as e:
self.write("Exception when trying to set rplogger")
self.write(str(e))
self.exceptions.append("Error when setting up the reportportal logger")
def write(self,msg,level='info'):
"Write out a message"
#fname = inspect.stack()[2][3] #May be use a entry-exit decorator instead
all_stack_frames = inspect.stack()
for stack_frame in all_stack_frames[1:]:
if 'Base_Page' not in stack_frame[1]:
break
fname = stack_frame[3]
d = {'caller_func': fname}
if self.rp_logger:
if level.lower()== 'debug':
self.rp_logger.debug(msg=msg)
elif level.lower()== 'info':
self.rp_logger.info(msg)
elif level.lower()== 'warn' or level.lower()=='warning':
self.rp_logger.warning(msg)
elif level.lower()== 'error':
self.rp_logger.error(msg)
elif level.lower()== 'critical':
self.rp_logger.critical(msg)
else:
self.rp_logger.critical(msg)
return
if level.lower()== 'debug':
logger.debug("{module} | {msg}",module=d['caller_func'],msg=msg)
elif level.lower()== 'info':
logger.info("{module} | {msg}",module=d['caller_func'],msg=msg)
elif level.lower()== 'warn' or level.lower()=='warning':
logger.warning("{module} | {msg}",module=d['caller_func'],msg=msg)
elif level.lower()== 'error':
logger.error("{module} | {msg}",module=d['caller_func'],msg=msg)
elif level.lower()== 'critical':
logger.critical("{module} | {msg}",module=d['caller_func'],msg=msg)
else:
logger.critical("Unknown level passed for the msg: {}", msg)<|fim▁end|> | Qxf2 Services: A plug-n-play class for logging.
This class wraps around Python's loguru module.
"""
import os, inspect |
<|file_name|>BookListServiceImpl.java<|end_file_name|><|fim▁begin|>package com.twu.biblioteca.service.impl;
import com.twu.biblioteca.mapper.BookListMapper;
import com.twu.biblioteca.mapper.MyBatisUtil;
import com.twu.biblioteca.model.Book;
import com.twu.biblioteca.service.BookListService;
import org.apache.ibatis.session.SqlSession;
import java.util.ArrayList;
public class BookListServiceImpl implements BookListService {<|fim▁hole|>
public BookListServiceImpl() {
this.sqlSession = MyBatisUtil.getSqlSessionFactory().openSession();
this.bookListMapper = sqlSession.getMapper(BookListMapper.class);
}
public BookListServiceImpl(BookListMapper bookListMapper) {
this.bookListMapper = bookListMapper;
}
@Override
public ArrayList<Book> getBookList() {
return bookListMapper.getBookList();
}
}<|fim▁end|> |
private SqlSession sqlSession;
private BookListMapper bookListMapper; |
<|file_name|>compile.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import argparse
import os
import logging
import cdec.configobj
import cdec.sa<|fim▁hole|>import sys
MAX_PHRASE_LENGTH = 4
def precompute(f_sa, max_len, max_nt, max_size, min_gap, rank1, rank2, tight_phrases):
lcp = cdec.sa.LCP(f_sa)
stats = sorted(lcp.compute_stats(MAX_PHRASE_LENGTH), reverse=True)
precomp = cdec.sa.Precomputation(from_stats=stats,
fsarray=f_sa,
precompute_rank=rank1,
precompute_secondary_rank=rank2,
max_length=max_len,
max_nonterminals=max_nt,
train_max_initial_size=max_size,
train_min_gap_size=min_gap)
return precomp
def main():
preprocess_start_time = monitor_cpu()
sys.setrecursionlimit(sys.getrecursionlimit() * 100)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('cdec.sa.compile')
parser = argparse.ArgumentParser(description='Compile a corpus into a suffix array.')
parser.add_argument('--maxnt', '-n', type=int, default=2,
help='Maximum number of non-terminal symbols')
parser.add_argument('--maxlen', '-l', type=int, default=5,
help='Maximum number of terminals')
parser.add_argument('--maxsize', '-s', type=int, default=15,
help='Maximum rule span')
parser.add_argument('--mingap', '-g', type=int, default=1,
help='Minimum gap size')
parser.add_argument('--rank1', '-r1', type=int, default=100,
help='Number of pre-computed frequent patterns')
parser.add_argument('--rank2', '-r2', type=int, default=10,
help='Number of pre-computed super-frequent patterns)')
parser.add_argument('--loose', action='store_true',
help='Enable loose phrase extraction (default: tight)')
parser.add_argument('-c', '--config', default='/dev/stdout',
help='Output configuration')
parser.add_argument('-f', '--source',
help='Source language corpus')
parser.add_argument('-e', '--target',
help='Target language corpus')
parser.add_argument('-b', '--bitext',
help='Parallel text (source ||| target)')
parser.add_argument('-a', '--alignment', required=True,
help='Bitext word alignment')
parser.add_argument('-o', '--output', required=True,
help='Output path')
args = parser.parse_args()
if not ((args.source and args.target) or args.bitext):
parser.error('a parallel corpus is required\n'
'\tuse -f (source) with -e (target) or -b (bitext)')
param_names = ('max_len', 'max_nt', 'max_size', 'min_gap',
'rank1', 'rank2', 'tight_phrases')
params = (args.maxlen, args.maxnt, args.maxsize, args.mingap,
args.rank1, args.rank2, not args.loose)
if not os.path.exists(args.output):
os.mkdir(args.output)
f_sa_bin = os.path.join(args.output, 'f.sa.bin')
e_bin = os.path.join(args.output, 'e.bin')
precomp_file = 'precomp.{0}.{1}.{2}.{3}.{4}.{5}.bin'.format(*params)
precomp_bin = os.path.join(args.output, precomp_file)
a_bin = os.path.join(args.output, 'a.bin')
lex_bin = os.path.join(args.output, 'lex.bin')
start_time = monitor_cpu()
logger.info('Compiling source suffix array')
if args.bitext:
f_sa = cdec.sa.SuffixArray(from_text=args.bitext, side='source')
else:
f_sa = cdec.sa.SuffixArray(from_text=args.source)
f_sa.write_binary(f_sa_bin)
stop_time = monitor_cpu()
logger.info('Compiling source suffix array took %f seconds', stop_time - start_time)
start_time = monitor_cpu()
logger.info('Compiling target data array')
if args.bitext:
e = cdec.sa.DataArray(from_text=args.bitext, side='target')
else:
e = cdec.sa.DataArray(from_text=args.target)
e.write_binary(e_bin)
stop_time = monitor_cpu()
logger.info('Compiling target data array took %f seconds', stop_time - start_time)
start_time = monitor_cpu()
logger.info('Precomputing frequent phrases')
precompute(f_sa, *params).write_binary(precomp_bin)
stop_time = monitor_cpu()
logger.info('Compiling precomputations took %f seconds', stop_time - start_time)
start_time = monitor_cpu()
logger.info('Compiling alignment')
a = cdec.sa.Alignment(from_text=args.alignment)
a.write_binary(a_bin)
stop_time = monitor_cpu()
logger.info('Compiling alignment took %f seonds', stop_time - start_time)
start_time = monitor_cpu()
logger.info('Compiling bilexical dictionary')
lex = cdec.sa.BiLex(from_data=True, alignment=a, earray=e, fsarray=f_sa)
lex.write_binary(lex_bin)
stop_time = monitor_cpu()
logger.info('Compiling bilexical dictionary took %f seconds', stop_time - start_time)
# Write configuration
config = cdec.configobj.ConfigObj(args.config, unrepr=True)
config['f_sa_file'] = os.path.abspath(f_sa_bin)
config['e_file'] = os.path.abspath(e_bin)
config['a_file'] = os.path.abspath(a_bin)
config['lex_file'] = os.path.abspath(lex_bin)
config['precompute_file'] = os.path.abspath(precomp_bin)
for name, value in zip(param_names, params):
config[name] = value
config.write()
preprocess_stop_time = monitor_cpu()
logger.info('Overall preprocessing step took %f seconds', preprocess_stop_time - preprocess_start_time)
if __name__ == '__main__':
main()<|fim▁end|> | from cdec.sa._sa import monitor_cpu |
<|file_name|>ArrayHashMultiMap.java<|end_file_name|><|fim▁begin|>/*
* This file is part of the Cliche project, licensed under MIT License. See LICENSE.txt file in root folder of Cliche
* sources.
*/
package net.dudehook.cliche2.util;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* @author ASG
*/
public final class ArrayHashMultiMap<K, V> implements MultiMap<K, V>
{
private Map<K, List<V>> listMap;
public ArrayHashMultiMap()
{
listMap = new HashMap<K, List<V>>();
}
public ArrayHashMultiMap(MultiMap<K, V> map)
{
this();<|fim▁hole|> {
List<V> values = listMap.get(key);
if (values == null)
{
values = new ArrayList<V>();
listMap.put(key, values);
}
values.add(value);
}
public Collection<V> get(K key)
{
List<V> result = listMap.get(key);
if (result == null)
{
result = new ArrayList<V>();
}
return result;
}
public Set<K> keySet()
{
return listMap.keySet();
}
public void remove(K key, V value)
{
List<V> values = listMap.get(key);
if (values != null)
{
values.remove(value);
if (values.isEmpty())
{
listMap.remove(key);
}
}
}
public void removeAll(K key)
{
listMap.remove(key);
}
public int size()
{
int sum = 0;
for (K key : listMap.keySet())
{
sum += listMap.get(key).size();
}
return sum;
}
public void putAll(MultiMap<K, V> map)
{
for (K key : map.keySet())
{
for (V val : map.get(key))
{
put(key, val);
}
}
}
@Override
public String toString()
{
return listMap.toString();
}
}<|fim▁end|> | putAll(map);
}
public void put(K key, V value) |
<|file_name|>5de499ab5b62_cascade_useraffiliation_deletes.py<|end_file_name|><|fim▁begin|>"""Cascade UserAffiliation deletes
Revision ID: 5de499ab5b62
Revises: 14f51f27a106
Create Date: 2016-12-13 00:21:39.842218
"""
# revision identifiers, used by Alembic.
revision = '5de499ab5b62'
down_revision = '14f51f27a106'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('user_affiliation_user_fk', 'user_affiliation', type_='foreignkey')
op.drop_constraint('user_affiliation_cgac_fk', 'user_affiliation', type_='foreignkey')
op.create_foreign_key('user_affiliation_user_fk', 'user_affiliation', 'users', ['user_id'], ['user_id'], ondelete='CASCADE')<|fim▁hole|> op.create_foreign_key('user_affiliation_cgac_fk', 'user_affiliation', 'cgac', ['cgac_id'], ['cgac_id'], ondelete='CASCADE')
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('user_affiliation_cgac_fk', 'user_affiliation', type_='foreignkey')
op.drop_constraint('user_affiliation_user_fk', 'user_affiliation', type_='foreignkey')
op.create_foreign_key('user_affiliation_cgac_fk', 'user_affiliation', 'cgac', ['cgac_id'], ['cgac_id'])
op.create_foreign_key('user_affiliation_user_fk', 'user_affiliation', 'users', ['user_id'], ['user_id'])
### end Alembic commands ###<|fim▁end|> | |
<|file_name|>extern-call-indirect.rs<|end_file_name|><|fim▁begin|>// run-pass
// ignore-wasm32-bare no libc to test ffi with
#![feature(rustc_private)]
extern crate libc;
mod rustrt {
extern crate libc;
#[link(name = "rust_test_helpers", kind = "static")]
extern "C" {
pub fn rust_dbg_call(
cb: extern "C" fn(libc::uintptr_t) -> libc::uintptr_t,
data: libc::uintptr_t,
) -> libc::uintptr_t;
}
}<|fim▁hole|>
fn fact(n: libc::uintptr_t) -> libc::uintptr_t {
unsafe {
println!("n = {}", n);
rustrt::rust_dbg_call(cb, n)
}
}
pub fn main() {
let result = fact(10);
println!("result = {}", result);
assert_eq!(result, 3628800);
}<|fim▁end|> |
extern "C" fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1 { data } else { fact(data - 1) * data }
} |
<|file_name|>SynblkClass.java<|end_file_name|><|fim▁begin|>//
// This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, v2.2.4-2
// See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// Any modifications to this file will be lost upon recompilation of the source schema.
// Generated on: 2014.08.02 at 08:05:16 PM CEST
//
package net.ramso.dita.concept;
import java.util.ArrayList;
import java.util.List;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlElements;
import javax.xml.bind.annotation.XmlSchemaType;
import javax.xml.bind.annotation.XmlSeeAlso;
import javax.xml.bind.annotation.XmlType;
import javax.xml.bind.annotation.adapters.CollapsedStringAdapter;
import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
/**
* <p>Java class for synblk.class complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType name="synblk.class">
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <group ref="{}synblk.content"/>
* </sequence>
* <attGroup ref="{}synblk.attributes"/>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "synblk.class", propOrder = {
"title",
"groupseqOrGroupchoiceOrGroupcomp"
})
@XmlSeeAlso({
Synblk.class
})
public class SynblkClass {
protected Title title;
@XmlElements({
@XmlElement(name = "groupseq", type = Groupseq.class),
@XmlElement(name = "groupchoice", type = Groupchoice.class),
@XmlElement(name = "groupcomp", type = Groupcomp.class),
@XmlElement(name = "fragref", type = Fragref.class),
@XmlElement(name = "synnote", type = Synnote.class),
@XmlElement(name = "synnoteref", type = Synnoteref.class),
@XmlElement(name = "fragment", type = Fragment.class)
})
protected List<java.lang.Object> groupseqOrGroupchoiceOrGroupcomp;
@XmlAttribute(name = "outputclass")
protected String outputclass;
@XmlAttribute(name = "xtrc")
protected String xtrc;
@XmlAttribute(name = "xtrf")
protected String xtrf;
@XmlAttribute(name = "base")
protected String base;
@XmlAttribute(name = "rev")
protected String rev;
@XmlAttribute(name = "importance")
protected ImportanceAttsClass importance;
@XmlAttribute(name = "status")
protected StatusAttsClass status;
@XmlAttribute(name = "props")
protected String props;
@XmlAttribute(name = "platform")
protected String platform;
@XmlAttribute(name = "product")
protected String product;
@XmlAttribute(name = "audience")
protected String audienceMod;
@XmlAttribute(name = "otherprops")
protected String otherprops;
@XmlAttribute(name = "translate")
protected YesnoAttClass translate;
@XmlAttribute(name = "lang", namespace = "http://www.w3.org/XML/1998/namespace")
protected String lang;
@XmlAttribute(name = "dir")
protected DirAttsClass dir;
@XmlAttribute(name = "id")
@XmlJavaTypeAdapter(CollapsedStringAdapter.class)
@XmlSchemaType(name = "NMTOKEN")
protected String id;
@XmlAttribute(name = "conref")
protected String conref;
@XmlAttribute(name = "conrefend")
protected String conrefend;
@XmlAttribute(name = "conaction")
protected ConactionAttClass conaction;
@XmlAttribute(name = "conkeyref")
protected String conkeyref;
/**
* Gets the value of the title property.
*
* @return
* possible object is
* {@link Title }
*
*/
public Title getTitle() {
return title;
}
/**
* Sets the value of the title property.
*
* @param value
* allowed object is
* {@link Title }
*
*/
public void setTitle(Title value) {
this.title = value;
}
/**
* Gets the value of the groupseqOrGroupchoiceOrGroupcomp property.
*
* <p>
* This accessor method returns a reference to the live list,
* not a snapshot. Therefore any modification you make to the
* returned list will be present inside the JAXB object.
* This is why there is not a <CODE>set</CODE> method for the groupseqOrGroupchoiceOrGroupcomp property.
*
* <p>
* For example, to add a new item, do as follows:
* <pre>
* getGroupseqOrGroupchoiceOrGroupcomp().add(newItem);
* </pre>
*
*
* <p>
* Objects of the following type(s) are allowed in the list
* {@link Groupseq }
* {@link Groupchoice }
* {@link Groupcomp }
* {@link Fragref }
* {@link Synnote }
* {@link Synnoteref }
* {@link Fragment }
*
*
*/
public List<java.lang.Object> getGroupseqOrGroupchoiceOrGroupcomp() {
if (groupseqOrGroupchoiceOrGroupcomp == null) {
groupseqOrGroupchoiceOrGroupcomp = new ArrayList<java.lang.Object>();
}
return this.groupseqOrGroupchoiceOrGroupcomp;
}
/**
* Gets the value of the outputclass property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getOutputclass() {
return outputclass;
}
/**
* Sets the value of the outputclass property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setOutputclass(String value) {
this.outputclass = value;
}
/**
* Gets the value of the xtrc property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getXtrc() {
return xtrc;
}
/**
* Sets the value of the xtrc property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setXtrc(String value) {
this.xtrc = value;
}
/**
* Gets the value of the xtrf property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getXtrf() {
return xtrf;
}
/**
* Sets the value of the xtrf property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setXtrf(String value) {
this.xtrf = value;
}
/**
* Gets the value of the base property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getBase() {
return base;
}
/**
* Sets the value of the base property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setBase(String value) {
this.base = value;
}
/**
* Gets the value of the rev property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getRev() {
return rev;
}
/**
* Sets the value of the rev property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setRev(String value) {
this.rev = value;
}
/**
* Gets the value of the importance property.
*
* @return
* possible object is
* {@link ImportanceAttsClass }
*
*/
public ImportanceAttsClass getImportance() {
return importance;
}
/**
* Sets the value of the importance property.
*
* @param value
* allowed object is
* {@link ImportanceAttsClass }
*
*/
public void setImportance(ImportanceAttsClass value) {
this.importance = value;
}
/**
* Gets the value of the status property.
*
* @return
* possible object is
* {@link StatusAttsClass }
*
*/
public StatusAttsClass getStatus() {
return status;
}
/**
* Sets the value of the status property.<|fim▁hole|> * @param value
* allowed object is
* {@link StatusAttsClass }
*
*/
public void setStatus(StatusAttsClass value) {
this.status = value;
}
/**
* Gets the value of the props property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getProps() {
return props;
}
/**
* Sets the value of the props property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setProps(String value) {
this.props = value;
}
/**
* Gets the value of the platform property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getPlatform() {
return platform;
}
/**
* Sets the value of the platform property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setPlatform(String value) {
this.platform = value;
}
/**
* Gets the value of the product property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getProduct() {
return product;
}
/**
* Sets the value of the product property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setProduct(String value) {
this.product = value;
}
/**
* Gets the value of the audienceMod property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getAudienceMod() {
return audienceMod;
}
/**
* Sets the value of the audienceMod property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setAudienceMod(String value) {
this.audienceMod = value;
}
/**
* Gets the value of the otherprops property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getOtherprops() {
return otherprops;
}
/**
* Sets the value of the otherprops property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setOtherprops(String value) {
this.otherprops = value;
}
/**
* Gets the value of the translate property.
*
* @return
* possible object is
* {@link YesnoAttClass }
*
*/
public YesnoAttClass getTranslate() {
return translate;
}
/**
* Sets the value of the translate property.
*
* @param value
* allowed object is
* {@link YesnoAttClass }
*
*/
public void setTranslate(YesnoAttClass value) {
this.translate = value;
}
/**
* Gets the value of the lang property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getLang() {
return lang;
}
/**
* Sets the value of the lang property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setLang(String value) {
this.lang = value;
}
/**
* Gets the value of the dir property.
*
* @return
* possible object is
* {@link DirAttsClass }
*
*/
public DirAttsClass getDir() {
return dir;
}
/**
* Sets the value of the dir property.
*
* @param value
* allowed object is
* {@link DirAttsClass }
*
*/
public void setDir(DirAttsClass value) {
this.dir = value;
}
/**
* Gets the value of the id property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getId() {
return id;
}
/**
* Sets the value of the id property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setId(String value) {
this.id = value;
}
/**
* Gets the value of the conref property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getConref() {
return conref;
}
/**
* Sets the value of the conref property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setConref(String value) {
this.conref = value;
}
/**
* Gets the value of the conrefend property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getConrefend() {
return conrefend;
}
/**
* Sets the value of the conrefend property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setConrefend(String value) {
this.conrefend = value;
}
/**
* Gets the value of the conaction property.
*
* @return
* possible object is
* {@link ConactionAttClass }
*
*/
public ConactionAttClass getConaction() {
return conaction;
}
/**
* Sets the value of the conaction property.
*
* @param value
* allowed object is
* {@link ConactionAttClass }
*
*/
public void setConaction(ConactionAttClass value) {
this.conaction = value;
}
/**
* Gets the value of the conkeyref property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getConkeyref() {
return conkeyref;
}
/**
* Sets the value of the conkeyref property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setConkeyref(String value) {
this.conkeyref = value;
}
}<|fim▁end|> | * |
<|file_name|>SchedulerService.java<|end_file_name|><|fim▁begin|><|fim▁hole|>package com.stech.meetat.service;
public interface SchedulerService {
}<|fim▁end|> | |
<|file_name|>util.ts<|end_file_name|><|fim▁begin|>import {provide, Provider, Component} from 'angular2/core';
import {Type, isBlank} from 'angular2/src/facade/lang';
import {BaseException} from 'angular2/src/facade/exceptions';
import {
ComponentFixture,
AsyncTestCompleter,
TestComponentBuilder,
beforeEach,
ddescribe,
xdescribe,
describe,
el,
inject,
beforeEachProviders,
it,
xit
} from 'angular2/testing_internal';
import {RootRouter} from 'angular2/src/router/router';
import {Router, ROUTER_DIRECTIVES, ROUTER_PRIMARY_COMPONENT} from 'angular2/router';
import {SpyLocation} from 'angular2/src/mock/location_mock';
import {Location} from 'angular2/platform/common';
import {RouteRegistry} from 'angular2/src/router/route_registry';
import {DOM} from 'angular2/src/platform/dom/dom_adapter';
export {ComponentFixture} from 'angular2/testing_internal';
/**
* Router test helpers and fixtures
*/
@Component({
selector: 'root-comp',
template: `<router-outlet></router-outlet>`,
directives: [ROUTER_DIRECTIVES]
})
export class RootCmp {
name: string;
activatedCmp: any;
}
export function compile(tcb: TestComponentBuilder,<|fim▁hole|> return tcb.overrideTemplate(RootCmp, ('<div>' + template + '</div>')).createAsync(RootCmp);
}
export var TEST_ROUTER_PROVIDERS = [
RouteRegistry,
provide(Location, {useClass: SpyLocation}),
provide(ROUTER_PRIMARY_COMPONENT, {useValue: RootCmp}),
provide(Router, {useClass: RootRouter})
];
export function clickOnElement(anchorEl) {
var dispatchedEvent = DOM.createMouseEvent('click');
DOM.dispatchEvent(anchorEl, dispatchedEvent);
return dispatchedEvent;
}
export function getHref(elt) {
return DOM.getAttribute(elt, 'href');
}
/**
* Router integration suite DSL
*/
var specNameBuilder = [];
// we add the specs themselves onto this map
export var specs = {};
export function describeRouter(description: string, fn: Function, exclusive = false): void {
var specName = descriptionToSpecName(description);
specNameBuilder.push(specName);
if (exclusive) {
ddescribe(description, fn);
} else {
describe(description, fn);
}
specNameBuilder.pop();
}
export function ddescribeRouter(description: string, fn: Function, exclusive = false): void {
describeRouter(description, fn, true);
}
export function describeWithAndWithout(description: string, fn: Function): void {
// the "without" case is usually simpler, so we opt to run this spec first
describeWithout(description, fn);
describeWith(description, fn);
}
export function describeWith(description: string, fn: Function): void {
var specName = 'with ' + description;
specNameBuilder.push(specName);
describe(specName, fn);
specNameBuilder.pop();
}
export function describeWithout(description: string, fn: Function): void {
var specName = 'without ' + description;
specNameBuilder.push(specName);
describe(specName, fn);
specNameBuilder.pop();
}
function descriptionToSpecName(description: string): string {
return spaceCaseToCamelCase(description);
}
// this helper looks up the suite registered from the "impl" folder in this directory
export function itShouldRoute() {
var specSuiteName = spaceCaseToCamelCase(specNameBuilder.join(' '));
var spec = specs[specSuiteName];
if (isBlank(spec)) {
throw new BaseException(`Router integration spec suite "${specSuiteName}" was not found.`);
} else {
// todo: remove spec from map, throw if there are extra left over??
spec();
}
}
function spaceCaseToCamelCase(str: string): string {
var words = str.split(' ');
var first = words.shift();
return first + words.map(title).join('');
}
function title(str: string): string {
return str[0].toUpperCase() + str.substring(1);
}<|fim▁end|> | template: string = "<router-outlet></router-outlet>") { |
<|file_name|>layout_optimizer_test.py<|end_file_name|><|fim▁begin|># Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Grappler LayoutOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import device_properties_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.grappler import cluster as gcluster
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.layers import convolutional as conv_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import saver as saver_lib
def _weight(shape):
"""Generates a weight of a given shape."""
return random_ops.truncated_normal(shape, seed=0, stddev=0.1)
def _bias(shape):
"""Generates a bias of a given shape."""
return constant_op.constant(0.1, shape=shape)
def _conv2d(x, w):
"""Returns a 2d convolution layer with full stride."""
return nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')
def _max_pool_2x2(x):
"""Downsamples a feature map by 2X."""
return nn.max_pool(
x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Taken from tensorflow/examples/tutorials/mnist/mnist_deep.py
def _two_layer_model(x):
x_image = array_ops.reshape(x, [-1, 28, 28, 1])
w_conv1 = _weight([5, 5, 1, 32])
b_conv1 = _bias([32])
h_conv1 = nn.relu(_conv2d(x_image, w_conv1) + b_conv1)
h_pool1 = _max_pool_2x2(h_conv1)
w_conv2 = _weight([5, 5, 32, 64])
b_conv2 = _bias([64])
h_conv2 = nn.relu(_conv2d(h_pool1, w_conv2) + b_conv2)
h_pool2 = _max_pool_2x2(h_conv2)
return h_pool2
def _model_with_second_port():
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([2, 5, 5, 4], seed=0)
scale = constant_op.constant(0.1, shape=[4])
offset = constant_op.constant(0.3, shape=[4])
y, mean, _ = nn.fused_batch_norm(x, scale, offset)
mul = math_ops.add(y, mean)
output = array_ops.identity(mul)
return output
def _model_with_branch(x):
x_image = array_ops.reshape(x, [-1, 28, 28, 1])
w_conv1 = _weight([5, 5, 1, 32])
w_conv2 = _weight([5, 5, 1, 32])
c_conv1 = _conv2d(x_image, w_conv1)
c_conv2 = _conv2d(x_image, w_conv2)
add = math_ops.add(c_conv1, c_conv2)
return add
def _model_with_vec_and_4d(x):
x_image = array_ops.reshape(x, [-1, 28, 28, 1])
w_conv1 = _weight([5, 5, 1, 32])
c_conv1 = _conv2d(x_image, w_conv1)
vector = constant_op.constant(6.4, shape=[32])
add = math_ops.add(c_conv1, vector)
return add
def _loop():
random_seed.set_random_seed(0)
x1 = random_ops.truncated_normal([1, 784], seed=0)
x2 = random_ops.truncated_normal([1, 784], seed=0)
x3 = random_ops.truncated_normal([1, 784], seed=0)
x4 = random_ops.truncated_normal([1, 784], seed=0)
elems = (x1, x2, x3, x4)
outputs = functional_ops.map_fn(_two_layer_model, elems, dtype=dtypes.float32)
return outputs
def _loop_with_branch():
random_seed.set_random_seed(0)
x1 = random_ops.truncated_normal([1, 784], seed=0)
x2 = random_ops.truncated_normal([1, 784], seed=0)
x3 = random_ops.truncated_normal([1, 784], seed=0)
x4 = random_ops.truncated_normal([1, 784], seed=0)
elems = (x1, x2, x3, x4)
outputs = functional_ops.map_fn(
_model_with_branch, elems, dtype=dtypes.float32)
return outputs
def _loop_with_vec_and_4d():
random_seed.set_random_seed(0)
x1 = random_ops.truncated_normal([1, 784], seed=0)
x2 = random_ops.truncated_normal([1, 784], seed=0)
x3 = random_ops.truncated_normal([1, 784], seed=0)
x4 = random_ops.truncated_normal([1, 784], seed=0)
elems = (x1, x2, x3, x4)
outputs = functional_ops.map_fn(
_model_with_vec_and_4d, elems, dtype=dtypes.float32)
return outputs
def _get_config(layout_optimizer=True):
if layout_optimizer:
rewrite_options = rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON,
# do not remove duplicated nodes
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF)
else:
rewrite_options = rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.OFF,
# do not remove duplicated nodes
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF)
rewrite_options.min_graph_nodes = -1
graph_options = config_pb2.GraphOptions(
rewrite_options=rewrite_options, build_cost_model=1)
config = config_pb2.ConfigProto(graph_options=graph_options)
config.graph_options.optimizer_options.opt_level = -1
return config
def _simple_metagraph(depthwise=False):
random_seed.set_random_seed(0)
x = variables.Variable(random_ops.truncated_normal([1, 200, 200, 3], seed=0))
conv = conv_layers.separable_conv2d if depthwise else conv_layers.conv2d
y = conv(x, 32, [3, 3])
z = conv(y, 32, [3, 3])
optimizer = gradient_descent.GradientDescentOptimizer(1e-4)
loss = math_ops.reduce_mean(z)
train_op = optimizer.minimize(loss)
graph = ops.get_default_graph()
graph.add_to_collection('train_op', train_op)
meta_graph = saver_lib.export_meta_graph(graph_def=graph.as_graph_def())
return meta_graph
def _get_cluster():
named_device = device_properties_pb2.NamedDevice()
named_device.name = '/GPU:0'
named_device.properties.type = 'GPU'
named_device.properties.num_cores = 24
named_device.properties.frequency = 1000
named_device.properties.environment['architecture'] = '4'
cluster = gcluster.Cluster(devices=[named_device])
return cluster
def _is_transpose(node):
return node.endswith('TransposeNHWCToNCHW-LayoutOptimizer') or node.endswith(
'TransposeNCHWToNHWC-LayoutOptimizer')
def _is_permute(node):
return node.endswith('VecPermuteNHWCToNCHW-LayoutOptimizer') or node.endswith(
'VecPermuteNCHWToNHWC-LayoutOptimizer')
class LayoutOptimizerTest(test.TestCase):
"""Tests the Grappler layout optimizer."""
def _assert_trans_nchw_to_nhwc(self, name, nodes):
self.assertIn(name + '-TransposeNCHWToNHWC-LayoutOptimizer', nodes)
def _assert_trans_nhwc_to_nchw(self, name, nodes):
self.assertIn(name + '-TransposeNHWCToNCHW-LayoutOptimizer', nodes)
def _assert_map_nhwc_to_nchw(self, name, nodes):
self.assertIn(name + '-DimMapNHWCToNCHW-LayoutOptimizer', nodes)
def _assert_vec_nchw_to_nhwc(self, name, nodes):
self.assertIn(name + '-VecPermuteNCHWToNHWC-LayoutOptimizer', nodes)
def _assert_vec_nhwc_to_nchw(self, name, nodes):
self.assertIn(name + '-VecPermuteNHWCToNCHW-LayoutOptimizer', nodes)
def _train(self, checkpoint_path, layout_optimizer=False, restore=False):
ops.reset_default_graph()
graph = ops.get_default_graph()
with session.Session(
config=_get_config(layout_optimizer), graph=graph) as sess:
batch = 2
height = 6
width = 7
input_channels = 3
shape = [batch, height, width, input_channels]
image = array_ops.placeholder(dtype='float32', shape=shape)
conv1 = conv_layers.conv2d(image, 32, [3, 3])
conv2 = conv_layers.conv2d(conv1, 32, [3, 3])
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
loss = math_ops.reduce_mean(conv2)
train_op = optimizer.minimize(loss)
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
if restore:
saver.restore(sess, checkpoint_path)
else:
self.evaluate(variables.global_variables_initializer())
np.random.seed(0)
for _ in range(2):
image_val = np.random.rand(*shape).astype(np.float32)
sess.run([loss, train_op], feed_dict={image: image_val})
if restore:
all_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
all_vars_values = [var.eval(session=sess) for var in all_vars]
return all_vars_values
else:
saver.save(sess, checkpoint_path)
def testTwoConvLayers(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
output = _two_layer_model(x)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Relu_1-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSplitWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dim = array_ops.placeholder(dtype='int32')
split = array_ops.split(conv, 2, axis=dim)
scale = constant_op.constant(0.1, shape=[32])
offset = constant_op.constant(0.3, shape=[32])
bn0 = nn.fused_batch_norm(split[0], scale, offset)
bn1 = nn.fused_batch_norm(split[1], scale, offset)
add = bn0[0] + bn1[0]
output = array_ops.identity(add)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={dim: 3})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata, feed_dict={dim: 3})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('add_2-0-0', nodes)
self._assert_map_nhwc_to_nchw('split-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSplitVWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dim = array_ops.placeholder(dtype='int32')
sizes = constant_op.constant([50, 10, 4], shape=[3])
split = gen_array_ops.split_v(
value=conv, size_splits=sizes, axis=dim, num_split=3)
output = math_ops.reduce_sum(split[0])
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={dim: 3})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata, feed_dict={dim: 3})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('SplitV-0-0', nodes)
self._assert_map_nhwc_to_nchw('SplitV-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testPadWithConstPaddings(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
paddings_val = [[1, 2], [3, 4], [5, 6], [7, 8]]
paddings = constant_op.constant(
paddings_val, dtype='int32', name='PaddingsConst')
pad = array_ops.pad(conv, paddings)
output = array_ops.identity(pad)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Pad-0-0', nodes)
self.assertIn('Pad-1-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSum(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv)
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testCast(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
cast = math_ops.cast(conv, dtype='bool')
output = array_ops.identity(cast)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Cast-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSqueeze(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[1, 2])
squeeze = array_ops.squeeze(reduce_sum)
output = array_ops.identity(squeeze)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSqueezeAlongHW(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[1, 2], keepdims=True)
squeeze = array_ops.squeeze(reduce_sum, axis=[1, 2])
output = array_ops.identity(squeeze)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSqueezeAlongNHW(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[0, 1, 2], keepdims=True)
squeeze = array_ops.squeeze(reduce_sum, axis=[0, 1, 2])
output = array_ops.identity(squeeze)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongHWC(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[1, 2, 3])
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongNHW(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[0, 1, 2])
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongC(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[3])
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongCKeepDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[3], keepdims=True)
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Sum-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongHKeepDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[2], keepdims=True)
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongWCKeepDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[2, 3], keepdims=True)
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testConcatWithControlDependency(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
axis = constant_op.constant(3)
var = variables.Variable(3)
assign = state_ops.assign(var, 6)
with ops.control_dependencies([assign]):
concat = array_ops.concat([conv, conv], axis)
output = array_ops.identity(concat)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('concat-0-0', nodes)
self.assertIn('concat-2-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testFill(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = array_ops.placeholder(dtype='float32')
conv = _two_layer_model(x)
shape = array_ops.shape(conv)
scalar = array_ops.constant(5.7)
fill = array_ops.fill(shape, scalar)
output = array_ops.identity(fill)
x_val = [3.4] * 784
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={x: x_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
x: x_val
})
nodes = []
num_transposes = 0
num_vec_permute = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
if _is_permute(node.name):
num_vec_permute += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
# Two vector permute nodes were initially added in the Expand phase of
# LayoutOptimizer; they cancelled out each other in the Collapse phase.
expected_vec_permute = 0
self.assertEqual(expected_vec_permute, num_vec_permute)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Fill-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testTile(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
multiple = array_ops.placeholder(dtype='int32')
tile = array_ops.tile(conv, multiple)
output = array_ops.identity(tile)
multiple_val = [2, 3, 4, 1]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={multiple: multiple_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
multiple: multiple_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Tile-0-0', nodes)
self._assert_vec_nhwc_to_nchw('Tile-1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReverseWithConstDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dims = constant_op.constant([3, 1], name='DimsConst')
reverse = array_ops.reverse(conv, dims)
output = array_ops.identity(reverse)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('ReverseV2-0-0', nodes)
self.assertIn('ReverseV2-1-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReverseWithNonConstDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dims = array_ops.placeholder(dtype='int32')
reverse = array_ops.reverse(conv, dims)
output = array_ops.identity(reverse)
dims_val = [2, 3]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={dims: dims_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
dims: dims_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('ReverseV2-0-0', nodes)
self._assert_map_nhwc_to_nchw('ReverseV2-1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSelectOp(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
add = math_ops.add(conv, conv)
mean = math_ops.reduce_mean(conv)
condition = math_ops.less(conv, mean)
select = gen_math_ops.select(condition, conv, add)
output = array_ops.identity(select)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Select-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSelectOpConditionUnknownShape(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
add = math_ops.add(conv, conv)
condition = array_ops.placeholder(dtype='bool')
select = gen_math_ops.select(condition, conv, add)
output = array_ops.identity(select)
condition_val = np.zeros((1, 7, 7, 64))
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={condition: condition_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={condition: condition_val})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 3
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSelectOpScalarCondition(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
add = math_ops.add(conv, conv)
condition = constant_op.constant(True)
select = gen_math_ops.select(condition, conv, add)
output = array_ops.identity(select)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Select-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testPadWithNonConstPaddings(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
paddings = array_ops.placeholder(dtype='int32')
pad = array_ops.pad(conv, paddings)
output = array_ops.identity(pad)
paddings_val = [[1, 2], [3, 4], [5, 6], [7, 8]]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={paddings: paddings_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
paddings: paddings_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Pad-0-0', nodes)
self._assert_vec_nhwc_to_nchw('Pad-1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testMaxPoolV2(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
ksize = constant_op.constant([1, 2, 3, 1], shape=[4])
strides = array_ops.placeholder(dtype='int32', shape=[4])
max_pool = gen_nn_ops.max_pool_v2(conv, ksize, strides, 'VALID')
output = array_ops.identity(max_pool)
strides_val = [1, 3, 2, 1]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={strides: strides_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
strides: strides_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('MaxPoolV2-0-0', nodes)
self._assert_vec_nhwc_to_nchw('MaxPoolV2-2', nodes)
self.assertIn('MaxPoolV2-1-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testMaxPoolGradV2(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
ksize = constant_op.constant([1, 2, 3, 1], shape=[4])
strides = array_ops.placeholder(dtype='int32', shape=[4])
max_pool_grad = gen_nn_ops.max_pool_grad_v2(conv, conv, conv, ksize,
strides, 'VALID')
output = array_ops.identity(max_pool_grad)
strides_val = [1, 3, 2, 1]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={strides: strides_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
strides: strides_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('MaxPoolGradV2-0-0', nodes)
self._assert_vec_nhwc_to_nchw('MaxPoolGradV2-4', nodes)
self.assertIn('MaxPoolGradV2-3-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSliceWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
size = array_ops.placeholder(dtype='int32')
s = array_ops.slice(conv, [0, 0, 0, 0], size)
output = array_ops.identity(s)
size_val = [1, 2, 3, 4]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={size: size_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
size: size_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Slice-0-0', nodes)
self._assert_vec_nhwc_to_nchw('Slice-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testStridedSliceWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
end = array_ops.placeholder(dtype='int32')
s = array_ops.strided_slice(conv, [0, 0, 0, 0], end, strides=[1, 2, 3, 1])
output = array_ops.identity(s)
end_val = [1, 2, 3, 4]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={end: end_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
end: end_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('StridedSlice-0-0', nodes)
self._assert_vec_nhwc_to_nchw('StridedSlice-2', nodes)
self.assertIn('StridedSlice-1-LayoutOptimizer', nodes)
self.assertIn('StridedSlice-3-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testStridedSliceWithMask1011(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
# This will generate a StridedSlice op with begin mask and
# end mask 11(1011).
s = conv[:, :, 1:-1, :]
output = array_ops.identity(s)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('strided_slice-0-0', nodes)
self.assertIn('strided_slice-1-LayoutOptimizer', nodes)
self.assertIn('strided_slice-2-LayoutOptimizer', nodes)
self.assertIn('strided_slice-3-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testStridedSliceWithMask0111(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
# This will generate a StridedSlice op with begin mask and
# end mask 7(0111).
s = conv[:, :, :, 1:-1]
output = array_ops.identity(s)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('strided_slice-0-0', nodes)
self.assertIn('strided_slice-1-LayoutOptimizer', nodes)
self.assertIn('strided_slice-2-LayoutOptimizer', nodes)
self.assertIn('strided_slice-3-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testStridedSliceGradWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
end = array_ops.placeholder(dtype='int32')
shape = array_ops.shape(conv)
end_val = [1, 2, 3, 4]
s = array_ops.strided_slice(
conv, [0, 0, 0, 0], end_val, strides=[1, 2, 3, 1])
s_grad = array_ops.strided_slice_grad(shape, [0, 0, 0, 0], end,
[1, 2, 3, 1], s)
output = array_ops.identity(s_grad)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={end: end_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
end: end_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('StridedSliceGrad-0-0', nodes)
self._assert_vec_nhwc_to_nchw('StridedSliceGrad-2', nodes)
self.assertIn('StridedSlice-1-LayoutOptimizer', nodes)
self.assertIn('StridedSlice-2-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testShapeN(self):
if test.is_gpu_available(cuda_only=True):
x = array_ops.placeholder(dtype='float32')
conv = _two_layer_model(x)
shapen = array_ops.shape_n([conv, conv])
output = math_ops.add(shapen[0], shapen[1])
x_val = [1.7] * 784
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={x: x_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
x: x_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_vec_nchw_to_nhwc('ShapeN-0-0', nodes)
self.assertAllEqual(output_val_ref, output_val)
def testShapeNFollowedByNotConvertibleNodeReshape(self):
if test.is_gpu_available(cuda_only=True):
x = array_ops.placeholder(dtype='float32')
conv = _two_layer_model(x)
conv_reshape = array_ops.reshape(conv, [1, 1, 1, -1])
shapen = array_ops.shape_n([conv, conv_reshape])
shape = array_ops.identity(shapen[1])
ones = array_ops.ones(shape)
output = math_ops.add_n([conv_reshape, ones])
x_val = [1.7] * 784
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={x: x_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={x: x_val})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testLoop(self):
if test.is_gpu_available(cuda_only=True):
output = _loop()
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('map/while/Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('map/while/MaxPool_1-0-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testLoopWithBranch(self):
if test.is_gpu_available(cuda_only=True):
output = _loop_with_branch()
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 3
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('map/while/Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('map/while/Add_1-0-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testLoopWithVecAnd4D(self):
if test.is_gpu_available(cuda_only=True):
output = _loop_with_vec_and_4d()
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('map/while/Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('map/while/Add_1-0-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testBinaryOpSecondPort(self):
if test.is_gpu_available(cuda_only=True):
output = _model_with_second_port()
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
<|fim▁hole|> for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('FusedBatchNorm-0', nodes)
self._assert_trans_nchw_to_nhwc('Add-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.run_deprecated_v1
def testGradient(self):
meta_graph = _simple_metagraph()
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.CopyFrom(
rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON,
min_graph_nodes=-1))
optimized_graph = tf_optimizer.OptimizeGraph(
config, meta_graph, cluster=_get_cluster())
found = 0
for node in optimized_graph.node:
if node.op in ['Conv2D', 'Conv2DBackpropFilter', 'Conv2DBackpropInput']:
found += 1
self.assertEqual(node.attr['data_format'].s, b'NCHW')
self.assertEqual(found, 5)
@test_util.run_deprecated_v1
def testDepthwise(self):
meta_graph = _simple_metagraph(depthwise=True)
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.CopyFrom(
rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON,
min_graph_nodes=-1))
optimized_graph = tf_optimizer.OptimizeGraph(
config, meta_graph, cluster=_get_cluster())
found = 0
for node in optimized_graph.node:
if node.op in [
'DepthwiseConv2dNative', 'DepthwiseConv2dNativeBackpropFilter',
'DepthwiseConv2dNativeBackpropInput'
]:
found += 1
self.assertEqual(node.attr['data_format'].s, b'NCHW')
self.assertEqual(found, 6)
def testCheckpointCompatibility(self):
if not test.is_gpu_available(cuda_only=True):
self.skipTest('GPU required')
checkpoint_path = self.get_temp_dir()
self._train(checkpoint_path)
vars_expected = self._train(checkpoint_path, restore=True)
vars_layout_optimized = self._train(
checkpoint_path, restore=True, layout_optimizer=True)
for var_expected, var_layout_optimized in zip(vars_expected,
vars_layout_optimized):
self.assertAllClose(var_expected, var_layout_optimized, atol=1e-6)
if __name__ == '__main__':
test.main()<|fim▁end|> | nodes = []
num_transposes = 0 |
<|file_name|>common.go<|end_file_name|><|fim▁begin|>package domain
import (
"fmt"
"regexp"
)<|fim▁hole|> NameRegex = "^[A-Za-z0-9]+$"
URLRegex = `^((ftp|http|https):\/\/)?(\S+(:\S*)?@)?((([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))|((www\.)?)?(([a-z\x{00a1}-\x{ffff}0-9]+-?-?_?)*[a-z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-z\x{00a1}-\x{ffff}]{2,}))?)|localhost)(:(\d{1,5}))?((\/|\?|#)[^\s]*)?$`
NOT_FOUND_ERROR = "not found"
MALFORMED_ERROR = "malformed"
)
type NamesRepository interface {
Get(name Name) (URL, error)
Put(name Name, url URL) error
DeleteAll() error
}
var (
nameRegex *regexp.Regexp
urlRegex *regexp.Regexp
)
func init() {
// A Regexp is safe for concurrent use by multiple goroutines.
nameRegex = regexp.MustCompile(NameRegex)
nameRegex.Longest()
urlRegex = regexp.MustCompile(URLRegex)
}
type Validator interface {
Validate() error
}
type Name string
func (t Name) Validate() error {
if len(t) == 0 {
return fmt.Errorf("malformed name")
}
if !nameRegex.MatchString(string(t)) {
return fmt.Errorf("malformed name [%s]", t)
}
return nil
}
type URL string
func (t URL) Validate() error {
if len(t) == 0 {
return fmt.Errorf("malformed url")
}
if !urlRegex.MatchString(string(t)) {
return fmt.Errorf("malformed url [%s]", t)
}
return nil
}<|fim▁end|> |
const ( |
<|file_name|>algorithm_to_midi_test.py<|end_file_name|><|fim▁begin|>from player import Player
import pandas as pd
import time
import asyncio
from midiutil import MIDIFile
PATTERNS = [[0, 0, 0, 0, 0, 0, 0, 0], #0
[1, 0, 0, 0, 0, 0, 0, 0], #1
[1, 0, 0, 0, 1, 0, 0, 0], #2
[1, 0, 1, 0, 1, 0, 0, 0], #3
[1, 0, 1, 0, 1, 0, 1, 0], #4
[1, 1, 1, 0, 1, 0, 1, 0], #5
[1, 1, 1, 0, 1, 1, 1, 0], #6
[1, 1, 1, 1, 1, 1, 1, 1]] #7
MAX_MB = [10,10,1,1] # max mb, these values directly impact the intensity of the patterns generated
def pattern_from_mb_interval(mb, max_mb):
''' maps mb count to one of the patterns depending on the value'''
pattern_index = int(mb*7/max_mb)
try: # if mb > max_mb value, use the last pattern
pattern = PATTERNS[pattern_index]
except IndexError:
pattern = PATTERNS[7]
return pattern
def index_to_note(index):
'''patterns are converted into a 16 x N matrix where each column is patterns for one of the 16 notes.
given the index of the column, this returns the midi code of the note'''
dt = {0:36, 1:38, 2:41, 3:43,4:48,5:50,6:53,7:55,8:60,9:96,10:65,11:67,12:72,13:74,14:77,15:79}
return dt[index]
<|fim▁hole|>
def fix_empty_regions(arr):
''' fills empty values so there would always be 4 rows for each second'''
new_list = []
counter = 1
index = 0
while True:
try:
i = arr[index]
except:
break
if counter == int(i[3]):
new_list.append(i)
else:
new_list.append((i[0],i[1],i[2],counter, 0, 0, 0,0,0,0))
index-=1
if counter > 3:
counter = 1
else:
counter+=1
index+=1
return new_list
def group_to_regions(fixed_data):
'''groups the data into lists of 4 items'''
pats = []
for a, b, c, d in zip(*[iter(fixed_data)]*4):
pat = []
for i in [a,b,c,d]:
pat.append(i)
pats.append(pat)
return pats
def get_patterns(grouped_data):
'''convert data into patterns'''
all_patterns = []
for second in grouped_data:
for row in second:
all_patterns.append(pattern_from_mb_interval(row[6], MAX_MB[0]))
all_patterns.append(pattern_from_mb_interval(row[7], MAX_MB[1]))
all_patterns.append(pattern_from_mb_interval(row[8], MAX_MB[2]))
all_patterns.append(pattern_from_mb_interval(row[9], MAX_MB[3]))
return all_patterns
def group_patterns(all_patterns):
'''group the patterns into 16 x N matrix'''
counter = 0
all_pat = []
new_pat = []
for pattern in all_patterns:
new_pat.append(pattern)
counter+=1
if counter > 15:
counter = 0
all_pat.append(new_pat)
new_pat = []
return all_pat
'''UTILS TO PLAY SOUNDS'''
'''
async def play_pattern(pattern, tempo, note_index):
delay = 60/tempo
pitch = index_to_note(note_index)
for note in pattern:
if note == 1:
Player.play_sound(pitch, 127, 2000)
await asyncio.sleep(delay)
else:
await asyncio.sleep(delay)
def sequencer(patterns, tempo):
'in > list of lists of patterns, plays notes using pygame for testing'
for sequences in patterns:
index = 0
ioloop = asyncio.get_event_loop()
tasks = []
for sequence in sequences:
#pitch = index_to_note(index)
tasks.append(ioloop.create_task(play_pattern(sequence, tempo, index)))
index +=1
ioloop.run_until_complete(asyncio.wait(tasks))
ioloop.close()
'''
'''MIDI UTILS'''
def pattern_to_midi(pattern, file, note, start_time, duration = 1, volume=127):
'''writes one pattern to midi'''
track = 0
channel = 0
for ping in pattern:
if ping != 0:
file.addNote(track, channel, note, start_time, duration,volume)
start_time+=1
def patterns_to_midi(patterns, file, start_time):
'''writes 16 patterns to midi, each pattern is for a different note'''
note_index = 0
for pattern in patterns:
note = index_to_note(note_index)
pattern_to_midi(pattern, file, note, start_time)
note_index +=1
def grouped_patterns_to_midi(grouped_patterns, file):
'''takes grouped patterns and writes them to midi'''
start_time = 0
counter = 0
lenght = len(grouped_patterns)
for patterns in grouped_patterns:
print('processed '+ str(counter) + ' patterns out of '+ str(lenght))
counter+=1
patterns_to_midi(patterns, file, start_time)
start_time += 8
data = pd.read_csv('aggregated_data_snippet.csv') # this takes data aggregated into regions from agg_csv_to_csv.py
fixed_data = fix_empty_regions(data.to_records())
grouped_data = group_to_regions(fixed_data)
patterns = get_patterns(grouped_data)
grouped_patterns = group_patterns(patterns)
midi = MIDIFile(1)
midi.addTempo(0, 0, 120)
grouped_patterns_to_midi(grouped_patterns[1000:3000], midi) # python hangs if trying to write the whole file
print('Done processing, writing to file')
output_file = open("test.mid", "wb")
midi.writeFile(output_file)
output_file.close()<|fim▁end|> |
'''DATA PROCESSING'''
|
<|file_name|>alsace20.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Catch-up TV & More
Copyright (C) 2019 SylvainCecchetto
This file is part of Catch-up TV & More.
Catch-up TV & More is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Catch-up TV & More is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.<|fim▁hole|> Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
# The unicode_literals import only has
# an effect on Python 2.
# It makes string literals as unicode like in Python 3
from __future__ import unicode_literals
from codequick import Route, Resolver, Listitem, utils, Script
from resources.lib import web_utils
from resources.lib.menu_utils import item_post_treatment
from resources.lib.kodi_utils import get_selected_item_art, get_selected_item_label, get_selected_item_info
import inputstreamhelper
import json
import re
import urlquick
# TODO
# Find a way for mpd inputstream not protected by DRM to be downloadable by youtube-dl
# Add date info to catch-up tv video
URL_ROOT = "https://www.alsace20.tv"
URL_LIVE = URL_ROOT + "/emb/live1"
@Route.register
def list_categories(plugin, item_id, **kwargs):
"""
Build categories listing
- ...
"""
resp = urlquick.get(URL_ROOT)
root = resp.parse("ul", attrs={"class": "menu-vod hidden-xs"})
for category_datas in root.iterfind(".//li"):
category_name = category_datas.find('.//a').text
if '#' in category_datas.find('.//a').get('href'):
category_url = URL_ROOT
else:
category_url = URL_ROOT + category_datas.find('.//a').get('href')
item = Listitem()
item.label = category_name
item.set_callback(
list_programs, item_id=item_id, category_url=category_url)
item_post_treatment(item)
yield item
@Route.register
def list_programs(plugin, item_id, category_url, **kwargs):
"""
Build programs listing
- ...
"""
resp = urlquick.get(category_url)
root = resp.parse("div", attrs={"class": "emissions hidden-xs"})
for program_datas in root.iterfind(".//a"):
if 'VOD/est' in category_url:
if 'Est' in program_datas.get('href').split('/')[2]:
program_name = program_datas.find(
".//div[@class='title']").text
program_image = URL_ROOT + re.compile(r'url\((.*?)\)').findall(
program_datas.find(".//div[@class='bg']").get('style'))[0]
program_url = URL_ROOT + program_datas.get('href')
item = Listitem()
item.label = program_name
item.art['thumb'] = item.art['landscape'] = program_image
item.set_callback(
list_videos, item_id=item_id, program_url=program_url)
item_post_treatment(item)
yield item
elif 'VOD' in category_url:
if program_datas.get('href').split('/')[2] in category_url:
program_name = program_datas.find(
".//div[@class='title']").text
program_image = URL_ROOT + re.compile(r'url\((.*?)\)').findall(
program_datas.find(".//div[@class='bg']").get('style'))[0]
program_url = URL_ROOT + program_datas.get('href')
item = Listitem()
item.label = program_name
item.art['thumb'] = item.art['landscape'] = program_image
item.set_callback(
list_videos, item_id=item_id, program_url=program_url)
item_post_treatment(item)
yield item
else:
program_name = program_datas.find(".//div[@class='title']").text
program_image = URL_ROOT + re.compile(r'url\((.*?)\)').findall(
program_datas.find(".//div[@class='bg']").get('style'))[0]
program_url = URL_ROOT + program_datas.get('href')
item = Listitem()
item.label = program_name
item.art['thumb'] = item.art['landscape'] = program_image
item.set_callback(
list_videos, item_id=item_id, program_url=program_url)
item_post_treatment(item)
yield item
@Route.register
def list_videos(plugin, item_id, program_url, **kwargs):
resp = urlquick.get(program_url)
root = resp.parse("ul", attrs={"class": "list-vids"})
for video_datas in root.iterfind(".//li"):
video_title = video_datas.find('.//h2').text
video_image = URL_ROOT + '/videoimages/' + video_datas.find(
".//div[@class='img']").get('data-img')
video_plot = ''
if video_datas.find(".//div[@class='resume']").text is not None:
video_plot = video_datas.find(
".//div[@class='resume']").text.strip()
video_url = URL_ROOT + video_datas.find('.//a').get('href')
item = Listitem()
item.label = video_title
item.art['thumb'] = item.art['landscape'] = video_image
item.info['plot'] = video_plot
item.set_callback(
get_video_url,
item_id=item_id,
video_url=video_url)
item_post_treatment(item, is_playable=True, is_downloadable=False)
yield item
@Resolver.register
def get_video_url(plugin,
item_id,
video_url,
download_mode=False,
**kwargs):
is_helper = inputstreamhelper.Helper('mpd')
if not is_helper.check_inputstream():
return False
resp = urlquick.get(
video_url, headers={"User-Agent": web_utils.get_random_ua()}, max_age=-1)
root = resp.parse()
url_stream_datas = URL_ROOT + root.find(".//div[@class='HDR_VISIO']").get(
"data-url") + "&mode=html"
resp2 = urlquick.get(
url_stream_datas,
headers={"User-Agent": web_utils.get_random_ua()},
max_age=-1)
json_parser = json.loads(resp2.text)
item = Listitem()
item.path = json_parser["files"]["auto"]
item.property["inputstreamaddon"] = "inputstream.adaptive"
item.property["inputstream.adaptive.manifest_type"] = "mpd"
item.label = get_selected_item_label()
item.art.update(get_selected_item_art())
item.info.update(get_selected_item_info())
return item
@Resolver.register
def get_live_url(plugin, item_id, **kwargs):
is_helper = inputstreamhelper.Helper('mpd')
if not is_helper.check_inputstream():
return False
resp = urlquick.get(
URL_LIVE, headers={"User-Agent": web_utils.get_random_ua()}, max_age=-1)
root = resp.parse()
url_live_datas = URL_ROOT + root.find(".//div[@class='HDR_VISIO']").get(
"data-url") + "&mode=html"
resp2 = urlquick.get(
url_live_datas,
headers={"User-Agent": web_utils.get_random_ua()},
max_age=-1)
json_parser = json.loads(resp2.text)
item = Listitem()
item.path = json_parser["files"]["auto"]
item.property["inputstreamaddon"] = "inputstream.adaptive"
item.property["inputstream.adaptive.manifest_type"] = "mpd"
item.label = get_selected_item_label()
item.art.update(get_selected_item_art())
item.info.update(get_selected_item_info())
return item<|fim▁end|> |
You should have received a copy of the GNU General Public License along
with Catch-up TV & More; if not, write to the Free Software Foundation, |
<|file_name|>plots.py<|end_file_name|><|fim▁begin|># Copyright 2020 Department of Computational Biology for Infection Research - Helmholtz Centre for Infection Research
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from src.utils import labels as utils_labels
from src.utils import load_ncbi_taxinfo
from src import binning_classes
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import matplotlib.ticker as ticker
import numpy as np
import os, sys, inspect
import pandas as pd
from collections import OrderedDict
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
def create_colors_list():
colors_list = []
for color in plt.cm.tab10(np.linspace(0, 1, 10))[:-1]:
colors_list.append(tuple(color))
colors_list.append("black")
for color in plt.cm.Set2(np.linspace(0, 1, 8)):
colors_list.append(tuple(color))
for color in plt.cm.Set3(np.linspace(0, 1, 12)):
colors_list.append(tuple(color))
return colors_list
def create_legend(color_indices, available_tools, output_dir):
colors_list = create_colors_list()
if color_indices:
colors_list = [colors_list[i] for i in color_indices]
colors_iter = iter(colors_list)
circles = [Line2D([], [], markeredgewidth=0.0, linestyle="None", marker="o", markersize=10, markerfacecolor=next(colors_iter)) for label in available_tools]
fig = plt.figure(figsize=(0.5, 0.5))
fig.legend(circles, available_tools, loc='center', frameon=False, ncol=5, handletextpad=0.1)
fig.savefig(os.path.join(output_dir, 'genome', 'legend.pdf'), dpi=100, format='pdf', bbox_inches='tight')
plt.close(fig)
def plot_precision_vs_bin_size(pd_bins, output_dir):
pd_plot = pd_bins[pd_bins[utils_labels.TOOL] != utils_labels.GS]
for tool_label, pd_tool in pd_plot.groupby(utils_labels.TOOL):
fig, axs = plt.subplots(figsize=(5, 4.5))
axs.scatter(np.log(pd_tool['total_length']), pd_tool['precision_bp'], marker='o')
axs.set_xlim([None, np.log(pd_tool['total_length'].max())])
axs.set_ylim([0.0, 1.0])
axs.set_title(tool_label, fontsize=12)
plt.ylabel('Purity per bin (%)', fontsize=12)
plt.xlabel('Bin size [log(# bp)]', fontsize=12)
fig.savefig(os.path.join(output_dir, 'genome', tool_label, 'purity_vs_bin_size.png'), dpi=200, format='png', bbox_inches='tight')
plt.close(fig)
def plot_by_genome_coverage(pd_bins, pd_target_column, available_tools, output_dir):
colors_list = create_colors_list()
if len(available_tools) > len(colors_list):
raise RuntimeError("Plot only supports 29 colors")
fig, axs = plt.subplots(figsize=(5, 4.5))
for i, (color, tool) in enumerate(zip(colors_list, available_tools)):
pd_tool = pd_bins[pd_bins[utils_labels.TOOL] == tool].sort_values(by=['genome_index'])
axs.scatter(pd_tool['genome_coverage'], pd_tool[pd_target_column], marker='o', color=colors_list[i], s=[3] * pd_tool.shape[0])
window = 50
rolling_mean = pd_tool[pd_target_column].rolling(window=window, min_periods=10).mean()
axs.plot(pd_tool['genome_coverage'], rolling_mean, color=colors_list[i])
axs.set_ylim([-0.01, 1.01])
axs.set_xticklabels(['{:,.1f}'.format(np.exp(x)) for x in axs.get_xticks()], fontsize=12)
axs.set_yticklabels(['{:3.0f}'.format(x * 100) for x in axs.get_yticks()], fontsize=12)
axs.tick_params(axis='x', labelsize=12)
if pd_target_column == 'precision_bp':
ylabel = 'Purity per bin (%)'
file_name = 'purity_by_genome_coverage'
else:
ylabel = 'Completeness per genome (%)'
file_name = 'completeness_by_genome_coverage'
plt.ylabel(ylabel, fontsize=15)
plt.xlabel('Average genome coverage', fontsize=15)
colors_iter = iter(colors_list)
circles = []
for x in range(len(available_tools)):
circles.append(Line2D([], [], markeredgewidth=0.0, linestyle="None", marker="o", markersize=11, markerfacecolor=next(colors_iter)))
lgd = plt.legend(circles, available_tools, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., handlelength=0, frameon=False, fontsize=14)
fig.savefig(os.path.join(output_dir, 'genome', file_name + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)
def get_pd_genomes_recall(sample_id_to_queries_list):
pd_genomes_recall = pd.DataFrame()
for sample_id in sample_id_to_queries_list:
for query in sample_id_to_queries_list[sample_id]:
if not isinstance(query, binning_classes.GenomeQuery):
continue
recall_df = query.recall_df_cami1[['genome_id', 'recall_bp']].copy()
recall_df[utils_labels.TOOL] = query.label
recall_df['sample_id'] = sample_id
recall_df = recall_df.reset_index().set_index(['sample_id', utils_labels.TOOL])
pd_genomes_recall = pd.concat([pd_genomes_recall, recall_df])
return pd_genomes_recall
def plot_precision_recall_by_coverage(sample_id_to_queries_list, pd_bins_g, coverages_pd, available_tools, output_dir):
# compute average genome coverage if coverages for multiple samples were provided
coverages_pd = coverages_pd.groupby(['GENOMEID']).mean()
coverages_pd.rename(columns={'GENOMEID': 'genome_id'})
coverages_pd = coverages_pd.sort_values(by=['COVERAGE'])
coverages_pd['rank'] = coverages_pd['COVERAGE'].rank()
pd_genomes_recall = get_pd_genomes_recall(sample_id_to_queries_list)
pd_genomes_recall['genome_index'] = pd_genomes_recall['genome_id'].map(coverages_pd['rank'].to_dict())
pd_genomes_recall = pd_genomes_recall.reset_index()
pd_genomes_recall['genome_coverage'] = np.log(pd_genomes_recall['genome_id'].map(coverages_pd['COVERAGE'].to_dict()))
plot_by_genome_coverage(pd_genomes_recall, 'recall_bp', available_tools, output_dir)
pd_bins_precision = pd_bins_g[[utils_labels.TOOL, 'precision_bp', 'genome_id']].copy().dropna(subset=['precision_bp'])
pd_bins_precision['genome_index'] = pd_bins_precision['genome_id'].map(coverages_pd['rank'].to_dict())
pd_bins_precision['genome_coverage'] = np.log(pd_bins_precision['genome_id'].map(coverages_pd['COVERAGE'].to_dict()))
plot_by_genome_coverage(pd_bins_precision, 'precision_bp', available_tools, output_dir)
def plot_heatmap(df_confusion, sample_id, output_dir, label, separate_bar=False, log_scale=False):
if log_scale:
df_confusion = df_confusion.apply(np.log10, inplace=True).replace(-np.inf, 0)
fig, axs = plt.subplots(figsize=(10, 8))
fontsize = 20
# replace columns and rows labels by numbers
d = {value: key for (key, value) in enumerate(df_confusion.columns.tolist(), 1)}
df_confusion = df_confusion.rename(index=str, columns=d)
df_confusion.index = range(1, len(df_confusion) + 1)
xticklabels = int(round(df_confusion.shape[1] / 10, -1))
yticklabels = int(round(df_confusion.shape[0] / 10, -1))
sns_plot = sns.heatmap(df_confusion, ax=axs, annot=False, cmap="YlGnBu_r", xticklabels=xticklabels, yticklabels=yticklabels, cbar=False, rasterized=True)
# sns_plot = sns.heatmap(df_confusion, ax=axs, annot=False, cmap="YlGnBu_r", xticklabels=False, yticklabels=False, cbar=True, rasterized=True)
sns_plot.set_xlabel("Genomes", fontsize=fontsize)
sns_plot.set_ylabel("Predicted bins", fontsize=fontsize)
plt.yticks(fontsize=12, rotation=0)
plt.xticks(fontsize=12)
mappable = sns_plot.get_children()[0]
cbar_ax = fig.add_axes([.915, .11, .017, .77])
cbar = plt.colorbar(mappable, ax=axs, cax=cbar_ax, orientation='vertical')
if log_scale:
cbar.set_label(fontsize=fontsize, label='log$_{10}$(# bp)')
else:
fmt = lambda x, pos: '{:.0f}'.format(x / 1000000)
cbar = plt.colorbar(mappable, ax=axs, cax=cbar_ax, orientation='vertical', format=ticker.FuncFormatter(fmt))
cbar.set_label(fontsize=fontsize, label='Millions of base pairs')
cbar.ax.tick_params(labelsize=fontsize)
cbar.outline.set_edgecolor(None)
axs.set_title(label, fontsize=fontsize, pad=10)
axs.set_ylim([len(df_confusion), 0])
# plt.yticks(fontsize=14, rotation=0)
# plt.xticks(fontsize=14)
output_dir = os.path.join(output_dir, 'genome', label)
fig.savefig(os.path.join(output_dir, 'heatmap_' + sample_id + '.pdf'), dpi=100, format='pdf', bbox_inches='tight')
fig.savefig(os.path.join(output_dir, 'heatmap_' + sample_id + '.png'), dpi=200, format='png', bbox_inches='tight')
plt.close(fig)
if not separate_bar:
return
# create separate figure for bar
fig = plt.figure(figsize=(6, 6))
mappable = sns_plot.get_children()[0]
fmt = lambda x, pos: '{:.0f}'.format(x / 1000000)
cbar = plt.colorbar(mappable, orientation='vertical', label='[millions of base pairs]', format=ticker.FuncFormatter(fmt))
text = cbar.ax.yaxis.label
font = matplotlib.font_manager.FontProperties(size=16)
text.set_font_properties(font)
cbar.outline.set_visible(False)
cbar.ax.tick_params(labelsize=14)
# store separate bar figure<|fim▁hole|>
plt.close(fig)
def plot_boxplot(sample_id_to_queries_list, metric_name, output_dir, available_tools):
pd_bins = pd.DataFrame()
for sample_id in sample_id_to_queries_list:
for query in sample_id_to_queries_list[sample_id]:
metric_df = getattr(query, metric_name.replace('_bp', '_df')).copy()
metric_df[utils_labels.TOOL] = query.label
metric_df['sample_id'] = sample_id
metric_df = metric_df.reset_index().set_index(['sample_id', utils_labels.TOOL])
pd_bins = pd.concat([pd_bins, metric_df])
metric_all = []
for tool in available_tools:
pd_tool = pd_bins.iloc[pd_bins.index.get_level_values(utils_labels.TOOL) == tool]
metric_all.append(pd_tool[metric_name][pd_tool[metric_name].notnull()].tolist())
fig, axs = plt.subplots(figsize=(6, 5))
medianprops = dict(linewidth=2.5, color='gold')
bplot = axs.boxplot(metric_all, notch=0, vert=0, patch_artist=True, labels=available_tools, medianprops=medianprops, sym='k.')
colors_iter = iter(create_colors_list())
# turn on grid
axs.grid(which='major', linestyle=':', linewidth='0.5', color='lightgrey')
# force axes to be from 0 to 100%
axs.set_xlim([-0.01, 1.01])
# transform plot_labels to percentages
vals = axs.get_xticks()
axs.set_xticklabels(['{:3.0f}'.format(x * 100) for x in vals])
# enable code to rotate labels
tick_labels = axs.get_yticklabels()
plt.setp(tick_labels, fontsize=13) ## rotation=55
for box in bplot['boxes']:
box.set(facecolor=next(colors_iter), linewidth=0.1)
plt.ylim(plt.ylim()[::-1])
if metric_name == 'precision_bp':
axs.set_xlabel('Purity per bin (%)', fontsize=13)
metric_name = 'purity_bp'
else:
axs.set_xlabel('Completeness per genome (%)', fontsize=13)
metric_name = 'completeness_bp'
fig.savefig(os.path.join(output_dir, 'genome', 'boxplot_' + metric_name + '.pdf'), dpi=100, format='pdf', bbox_inches='tight')
fig.savefig(os.path.join(output_dir, 'genome', 'boxplot_' + metric_name + '.png'), dpi=200, format='png', bbox_inches='tight')
# remove labels but keep grid
# axs.get_yaxis().set_ticklabels([])
# for tic in axs.yaxis.get_major_ticks():
# tic.tick1line.set_visible(False)
# tic.tick2line.set_visible(False)
# tic.label1.set_visible(False)
# tic.label2.set_visible(False)
# fig.savefig(os.path.join(output_dir, 'genome', 'boxplot_' + metric_name + '_wo_legend.pdf'), dpi=100, format='pdf', bbox_inches='tight')
plt.close(fig)
def plot_summary(color_indices, df_results, labels, output_dir, rank, plot_type, file_name, xlabel, ylabel):
available_tools = df_results[utils_labels.TOOL].unique()
tools = [tool for tool in labels if tool in available_tools]
colors_list = create_colors_list()
if color_indices:
colors_list = [colors_list[i] for i in color_indices]
df_mean = df_results.groupby(utils_labels.TOOL).mean().reindex(tools)
binning_type = df_results[utils_labels.BINNING_TYPE].iloc[0]
if len(df_mean) > len(colors_list):
raise RuntimeError("Plot only supports 29 colors")
fig, axs = plt.subplots(figsize=(5, 4.5))
# force axes to be from 0 to 100%
axs.set_xlim([0.0, 1.0])
axs.set_ylim([0.0, 1.0])
if plot_type == 'e':
for i, (tool, df_row) in enumerate(df_mean.iterrows()):
axs.errorbar(df_row[utils_labels.AVG_PRECISION_BP], df_row[utils_labels.AVG_RECALL_BP], xerr=df_row['avg_precision_bp_var'], yerr=df_row['avg_recall_bp_var'],
fmt='o',
ecolor=colors_list[i],
mec=colors_list[i],
mfc=colors_list[i],
capsize=3,
markersize=8)
if plot_type == 'f':
for i, (tool, df_row) in enumerate(df_mean.iterrows()):
axs.errorbar(df_row[utils_labels.AVG_PRECISION_SEQ], df_row[utils_labels.AVG_RECALL_SEQ], xerr=df_row[utils_labels.AVG_PRECISION_SEQ_SEM], yerr=df_row[utils_labels.AVG_RECALL_SEQ_SEM],
fmt='o',
ecolor=colors_list[i],
mec=colors_list[i],
mfc=colors_list[i],
capsize=3,
markersize=8)
if plot_type == 'w':
for i, (tool, df_row) in enumerate(df_mean.iterrows()):
axs.plot(df_row[utils_labels.PRECISION_PER_BP], df_row[utils_labels.RECALL_PER_BP], marker='o', color=colors_list[i], markersize=10)
if plot_type == 'x':
for i, (tool, df_row) in enumerate(df_mean.iterrows()):
axs.plot(df_row[utils_labels.PRECISION_PER_SEQ], df_row[utils_labels.RECALL_PER_SEQ], marker='o', color=colors_list[i], markersize=10)
elif plot_type == 'p':
for i, (tool, df_row) in enumerate(df_mean.iterrows()):
axs.plot(df_row[utils_labels.ARI_BY_BP], df_row[utils_labels.PERCENTAGE_ASSIGNED_BPS], marker='o', color=colors_list[i], markersize=10)
# turn on grid
# axs.minorticks_on()
axs.grid(which='major', linestyle=':', linewidth='0.5')
# axs.grid(which='minor', linestyle=':', linewidth='0.5')
# transform plot_labels to percentages
if plot_type != 'p':
vals = axs.get_xticks()
axs.set_xticklabels(['{:3.0f}'.format(x * 100) for x in vals], fontsize=11)
else:
axs.tick_params(axis='x', labelsize=12)
vals = axs.get_yticks()
axs.set_yticklabels(['{:3.0f}'.format(x * 100) for x in vals], fontsize=11)
if rank:
file_name = rank + '_' + file_name
plt.title(rank)
ylabel = ylabel.replace('genome', 'taxon')
plt.xlabel(xlabel, fontsize=13)
plt.ylabel(ylabel, fontsize=13)
plt.tight_layout()
fig.savefig(os.path.join(output_dir, binning_type, file_name + '.eps'), dpi=100, format='eps', bbox_inches='tight')
colors_iter = iter(colors_list)
circles = []
for x in range(len(df_mean)):
circles.append(Line2D([], [], markeredgewidth=0.0, linestyle="None", marker="o", markersize=11, markerfacecolor=next(colors_iter)))
lgd = plt.legend(circles, tools, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., handlelength=0, frameon=False, fontsize=12)
fig.savefig(os.path.join(output_dir, binning_type, file_name + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
fig.savefig(os.path.join(output_dir, binning_type, file_name + '.png'), dpi=200, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)
def plot_avg_precision_recall(colors, df_results, labels, output_dir, rank=None):
plot_summary(colors,
df_results,
labels,
output_dir,
rank,
'e',
'avg_purity_completeness_bp',
'Average purity per bin (%)',
'Average completeness per genome (%)')
plot_summary(colors,
df_results,
labels,
output_dir,
rank,
'f',
'avg_purity_completeness_seq',
'Average purity per bin (%)',
'Average completeness per genome (%)')
def plot_precision_recall(colors, summary_per_query, labels, output_dir, rank=None):
plot_summary(colors,
summary_per_query,
labels,
output_dir,
rank,
'w',
'purity_recall_bp',
'Purity for sample (%)',
'Completeness for sample (%)')
plot_summary(colors,
summary_per_query,
labels,
output_dir,
rank,
'x',
'purity_completeness_seq',
'Purity for sample (%)',
'Completeness for sample (%)')
def plot_adjusted_rand_index_vs_assigned_bps(colors, summary_per_query, labels, output_dir, rank=None):
plot_summary(colors,
summary_per_query,
labels,
output_dir,
rank,
'p',
'ari_vs_assigned_bps',
'Adjusted Rand index',
'Percentage of binned base pairs')
def plot_taxonomic_results(df_summary_t, metrics_list, errors_list, file_name, output_dir):
colors_list = ["#006cba", "#008000", "#ba9e00", "red"]
for tool, pd_results in df_summary_t.groupby(utils_labels.TOOL):
dict_metric_list = []
for metric in metrics_list:
rank_to_metric = OrderedDict([(k, .0) for k in load_ncbi_taxinfo.RANKS])
dict_metric_list.append(rank_to_metric)
dict_error_list = []
for error in errors_list:
rank_to_metric_error = OrderedDict([(k, .0) for k in load_ncbi_taxinfo.RANKS])
dict_error_list.append(rank_to_metric_error)
for index, row in pd_results.iterrows():
for rank_to_metric, metric in zip(dict_metric_list, metrics_list):
rank_to_metric[row[utils_labels.RANK]] = .0 if np.isnan(row[metric]) else row[metric]
for rank_to_metric_error, error in zip(dict_error_list, errors_list):
rank_to_metric_error[row[utils_labels.RANK]] = .0 if np.isnan(row[error]) else row[error]
fig, axs = plt.subplots(figsize=(6, 5))
# force axes to be from 0 to 100%
axs.set_xlim([0, 7])
axs.set_ylim([0.0, 1.0])
x_values = range(len(load_ncbi_taxinfo.RANKS))
y_values_list = []
for rank_to_metric, color in zip(dict_metric_list, colors_list):
y_values = list(rank_to_metric.values())
axs.plot(x_values, y_values, color=color)
y_values_list.append(y_values)
for rank_to_metric_error, y_values, color in zip(dict_error_list, y_values_list, colors_list):
sem = list(rank_to_metric_error.values())
plt.fill_between(x_values, np.subtract(y_values, sem).tolist(), np.add(y_values, sem).tolist(), color=color, alpha=0.5)
plt.xticks(x_values, load_ncbi_taxinfo.RANKS, rotation='vertical')
vals = axs.get_yticks()
axs.set_yticklabels(['{:3.0f}%'.format(x * 100) for x in vals])
lgd = plt.legend(metrics_list, loc=1, borderaxespad=0., handlelength=2, frameon=False)
plt.tight_layout()
fig.savefig(os.path.join(output_dir, 'taxonomic', tool, file_name + '.png'), dpi=100, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight')
fig.savefig(os.path.join(output_dir, 'taxonomic', tool, file_name + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)
def create_contamination_column(pd_tool_bins):
pd_tool_bins['newcolumn'] = 1 - pd_tool_bins['precision_bp']
def create_completeness_minus_contamination_column(pd_tool_bins):
pd_tool_bins['newcolumn'] = pd_tool_bins['recall_bp'] + pd_tool_bins['precision_bp'] - 1
def plot_contamination(pd_bins, binning_type, title, xlabel, ylabel, create_column_function, output_dir):
if len(pd_bins) == 0:
return
pd_bins_copy = pd_bins[[utils_labels.TOOL, 'precision_bp', 'recall_bp']].copy().dropna(subset=['precision_bp'])
create_column_function(pd_bins_copy)
colors_list = create_colors_list()
fig, axs = plt.subplots(figsize=(6, 5))
tools = pd_bins_copy[utils_labels.TOOL].unique().tolist()
for color, tool in zip(colors_list, tools):
pd_tool_bins = pd_bins_copy[pd_bins_copy[utils_labels.TOOL] == tool]
pd_tool_bins = pd_tool_bins.sort_values(by='newcolumn', ascending=False).reset_index()
pd_tool_bins = pd_tool_bins.drop(['index'], axis=1)
axs.plot(list(range(1, len(pd_tool_bins) + 1)), pd_tool_bins['newcolumn'], color=color)
min_value = pd_bins_copy['newcolumn'].min()
axs.set_ylim(min_value if min_value < 1.0 else .9, 1.0)
axs.set_xlim(1, None)
axs.grid(which='major', linestyle='-', linewidth='0.5', color='lightgrey')
# transform plot_labels to percentages
vals = axs.get_yticks()
axs.set_yticklabels(['{:3.0f}'.format(y * 100) for y in vals])
plt.xlabel(xlabel, fontsize=14)
plt.ylabel(ylabel + ' [%]', fontsize=14)
lgd = plt.legend(tools, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., handlelength=1, frameon=False, fontsize=12)
plt.tight_layout()
file_name = title.lower().replace(' ', '_').replace('-', 'minus').replace('|', '')
fig.savefig(os.path.join(output_dir, binning_type, file_name + '.png'), dpi=100, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight')
fig.savefig(os.path.join(output_dir, binning_type, file_name + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)
def get_number_of_hq_bins(tools, pd_bins):
pd_counts = pd.DataFrame()
pd_bins_copy = pd_bins[[utils_labels.TOOL, 'precision_bp', 'recall_bp']].copy().dropna(subset=['precision_bp'])
for tool in tools:
pd_tool_bins = pd_bins_copy[pd_bins_copy[utils_labels.TOOL] == tool]
x50 = pd_tool_bins[(pd_tool_bins['recall_bp'] > .5) & (pd_tool_bins['precision_bp'] > .9)].shape[0]
x70 = pd_tool_bins[(pd_tool_bins['recall_bp'] > .7) & (pd_tool_bins['precision_bp'] > .9)].shape[0]
x90 = pd_tool_bins[(pd_tool_bins['recall_bp'] > .9) & (pd_tool_bins['precision_bp'] > .9)].shape[0]
pd_tool_counts = pd.DataFrame([[x90, x70, x50]], columns=['>90%', '>70%', '>50%'], index=[tool])
pd_counts = pd_counts.append(pd_tool_counts)
return pd_counts
def get_number_of_hq_bins_by_score(tools, pd_bins):
pd_counts = pd.DataFrame()
pd_bins_copy = pd_bins[[utils_labels.TOOL, 'precision_bp', 'recall_bp']].copy().dropna(subset=['precision_bp'])
pd_bins_copy['newcolumn'] = pd_bins_copy['recall_bp'] + 5 * (pd_bins_copy['precision_bp'] - 1)
for tool in tools:
pd_tool_bins = pd_bins_copy[pd_bins_copy[utils_labels.TOOL] == tool]
x50 = pd_tool_bins[pd_tool_bins['newcolumn'] > .5].shape[0]
x70 = pd_tool_bins[pd_tool_bins['newcolumn'] > .7].shape[0]
x90 = pd_tool_bins[pd_tool_bins['newcolumn'] > .9].shape[0]
x50 -= x70
x70 -= x90
pd_tool_counts = pd.DataFrame([[x90, x70, x50]], columns=['>90', '>70', '>50'], index=[tool])
pd_counts = pd_counts.append(pd_tool_counts)
return pd_counts
def plot_counts(pd_bins, tools, output_dir, output_file, get_bin_counts_function):
pd_counts = get_bin_counts_function(tools, pd_bins)
fig, axs = plt.subplots(figsize=(11, 5))
if output_file == 'bin_counts':
fig = pd_counts.plot.bar(ax=axs, stacked=False, color=['#28334AFF', '#FBDE44FF', '#F65058FF'], width=.8, legend=None).get_figure()
else:
fig = pd_counts.plot.bar(ax=axs, stacked=True, color=['#9B4A97FF', '#FC766AFF', '#F9A12EFF'], width=.8, legend=None).get_figure()
axs.tick_params(axis='x', labelrotation=45, length=0)
axs.set_xticklabels(tools, horizontalalignment='right', fontsize=14)
axs.set_xlabel(None)
# axs.yaxis.set_major_locator(MaxNLocator(integer=True))
h, l = axs.get_legend_handles_labels()
axs.set_ylabel('#genome bins', fontsize=14)
# axs.grid(which='major', linestyle=':', linewidth='0.5')
# axs.grid(which='minor', linestyle=':', linewidth='0.5')
ph = [plt.plot([], marker='', ls='')[0]]
handles = ph + h
if output_file == 'bin_counts':
labels = ['Contamination < 10% Completeness '] + l
bbox_to_anchor = (0.49, 1.02)
else:
labels = ['Score '] + l
y_values = (pd_counts['>90'] + pd_counts['>70'] + pd_counts['>50']).tolist()
for i, v in enumerate(y_values):
axs.text(i - .25, v + 5, str(v), color='black', fontweight='bold')
bbox_to_anchor = (0.47, 1.02)
lgd = plt.legend(handles, labels, bbox_to_anchor=bbox_to_anchor, columnspacing=.5, loc=8, borderaxespad=0., handlelength=1, frameon=False, fontsize=14, ncol=5)
# plt.subplots_adjust(hspace=0.6, wspace=0.2)
fig.savefig(os.path.join(output_dir, 'genome', output_file + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
fig.savefig(os.path.join(output_dir, 'genome', output_file + '.png'), dpi=200, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)<|fim▁end|> | plt.gca().set_visible(False)
fig.savefig(os.path.join(output_dir, 'heatmap_bar.pdf'), dpi=100, format='pdf', bbox_inches='tight') |
<|file_name|>wsgi.py<|end_file_name|><|fim▁begin|>"""
WSGI config for c2asm project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
<|fim▁hole|>import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "c2asm.settings")
application = get_wsgi_application()<|fim▁end|> | |
<|file_name|>checksum.cpp<|end_file_name|><|fim▁begin|>#include "util/checksum.hpp"
namespace trillek {
namespace util {
namespace algorithm {
static uint32_t crc32_table[256];
static bool crc32_table_computed = false;
static void GenCRC32Table()
{
uint32_t c;
uint32_t i;
int k;
for(i = 0; i < 256; i++) {
c = i;
for(k = 0; k < 8; k++) {
if(c & 1)
c = 0xedb88320ul ^ (c >> 1);
else
c = c >> 1;
}
crc32_table[i] = c;
}
crc32_table_computed = true;
}
void Crc32::Update(char d) {
if(!crc32_table_computed)
GenCRC32Table();
ldata = crc32_table[(ldata ^ ((unsigned char)d)) & 0xff] ^ (ldata >> 8);
}
void Crc32::Update(const std::string &d) {
uint32_t c = ldata;
std::string::size_type n, l = d.length();
if(!crc32_table_computed)
GenCRC32Table();
for(n = 0; n < l; n++) {
c = crc32_table[(c ^ ((unsigned char)d[n])) & 0xff] ^ (c >> 8);
}
ldata = c;
}
void Crc32::Update(const void *dv, size_t l) {
uint32_t c = ldata;
std::string::size_type n;
char * d = (char*)dv;
if(!crc32_table_computed)
GenCRC32Table();
for(n = 0; n < l; n++) {
c = crc32_table[(c ^ ((unsigned char)d[n])) & 0xff] ^ (c >> 8);
}
ldata = c;
}
static const uint32_t ADLER_LIMIT = 5552;
static const uint32_t ADLER_BASE = 65521u;
void Adler32::Update(const std::string &d) {
Update(d.data(), d.length());
}
void Adler32::Update(const void *dv, size_t l) {
uint32_t c1 = ldata & 0xffff;
uint32_t c2 = (ldata >> 16) & 0xffff;
unsigned char * d = (unsigned char*)dv;
std::string::size_type n = 0;
while(l >= ADLER_LIMIT) {
l -= ADLER_LIMIT;<|fim▁hole|> c1 += d[n+ 4]; c2 += c1; c1 += d[n+ 5]; c2 += c1;
c1 += d[n+ 6]; c2 += c1; c1 += d[n+ 7]; c2 += c1;
c1 += d[n+ 8]; c2 += c1; c1 += d[n+ 9]; c2 += c1;
c1 += d[n+10]; c2 += c1; c1 += d[n+11]; c2 += c1;
c1 += d[n+12]; c2 += c1; c1 += d[n+13]; c2 += c1;
c1 += d[n+14]; c2 += c1; c1 += d[n+15]; c2 += c1;
n += 16;
}
c1 %= ADLER_BASE;
c2 %= ADLER_BASE;
}
for(; l; n++, l--) {
c1 += d[n];
while(c1 >= ADLER_BASE) {
c1 -= ADLER_BASE;
}
c2 += c1;
while(c2 >= ADLER_BASE) {
c2 -= ADLER_BASE;
}
}
ldata = (c2 << 16) + c1;
}
} // algorithm
} // util
} // trillek<|fim▁end|> | uint32_t limit = ADLER_LIMIT / 16;
while(limit--) {
c1 += d[n ]; c2 += c1; c1 += d[n+ 1]; c2 += c1;
c1 += d[n+ 2]; c2 += c1; c1 += d[n+ 3]; c2 += c1; |
<|file_name|>ex31.py<|end_file_name|><|fim▁begin|>print "You enter a dark room with two doors. Do you go through door #1 or door #2?"
door = raw_input("> ")
if door == "1":
print "There`s a giant bear here eating a chees cake. What do you do?"
print "1. Take the cake."
print "2. Scream at the bear."
bear = raw_input("> ")
if bear == "1":
print "The bear eats your face off. Good job!"
elif bear == "2":<|fim▁hole|> print "The bear eats your legs off. Good job!"
else:
print "Well, doing %s is probably better. Bear runs away." %bear
elif door =="2":
print "You stare into the endless abyss at Cthulhu's retina."
print "1. Blueberries."
print "2. Yellow jacket clothespins."
print "3. Understanding revolvers yelling melodies."
insanity = raw_input("> ")
if insanity == "1" or insanity =="2":
print "Your body survives powered by a mind of jello. Good job!"
else:
print "The insanity rots your eyes into a pool of muck. Good job!"
else:
print "You stumble around and fall on a knife and die. Good job!"<|fim▁end|> | |
<|file_name|>autoderef-method-priority.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait double {
fn double(self) -> uint;
}
impl double for uint {
fn double(self) -> uint { self }
}
impl double for @uint {
fn double(self) -> uint { *self * 2u }<|fim▁hole|>pub fn main() {
let x = @3u;
assert_eq!(x.double(), 6u);
}<|fim▁end|> | }
|
<|file_name|>devtools.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::CSSStyleDeclarationBinding::CSSStyleDeclarationMethods;
use crate::dom::bindings::codegen::Bindings::DOMRectBinding::DOMRectMethods;
use crate::dom::bindings::codegen::Bindings::DocumentBinding::DocumentMethods;
use crate::dom::bindings::codegen::Bindings::ElementBinding::ElementMethods;
use crate::dom::bindings::codegen::Bindings::WindowBinding::WindowMethods;
use crate::dom::bindings::conversions::{jsstring_to_str, ConversionResult, FromJSValConvertible};
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::str::DOMString;
use crate::dom::document::AnimationFrameCallback;
use crate::dom::element::Element;
use crate::dom::globalscope::GlobalScope;
use crate::dom::node::{window_from_node, Node, ShadowIncluding};
use crate::dom::window::Window;
use crate::realms::enter_realm;
use crate::script_thread::Documents;
use devtools_traits::{AutoMargins, ComputedNodeLayout, TimelineMarkerType};
use devtools_traits::{EvaluateJSReply, Modification, NodeInfo, TimelineMarker};<|fim▁hole|>use msg::constellation_msg::PipelineId;
use std::ffi::CStr;
use std::str;
use uuid::Uuid;
#[allow(unsafe_code)]
pub fn handle_evaluate_js(global: &GlobalScope, eval: String, reply: IpcSender<EvaluateJSReply>) {
// global.get_cx() returns a valid `JSContext` pointer, so this is safe.
let result = unsafe {
let cx = global.get_cx();
let _ac = enter_realm(global);
rooted!(in(*cx) let mut rval = UndefinedValue());
global.evaluate_js_on_global_with_result(&eval, rval.handle_mut());
if rval.is_undefined() {
EvaluateJSReply::VoidValue
} else if rval.is_boolean() {
EvaluateJSReply::BooleanValue(rval.to_boolean())
} else if rval.is_double() || rval.is_int32() {
EvaluateJSReply::NumberValue(
match FromJSValConvertible::from_jsval(*cx, rval.handle(), ()) {
Ok(ConversionResult::Success(v)) => v,
_ => unreachable!(),
},
)
} else if rval.is_string() {
EvaluateJSReply::StringValue(String::from(jsstring_to_str(*cx, rval.to_string())))
} else if rval.is_null() {
EvaluateJSReply::NullValue
} else {
assert!(rval.is_object());
rooted!(in(*cx) let obj = rval.to_object());
let class_name = CStr::from_ptr(ObjectClassName(*cx, obj.handle()));
let class_name = str::from_utf8(class_name.to_bytes()).unwrap();
EvaluateJSReply::ActorValue {
class: class_name.to_owned(),
uuid: Uuid::new_v4().to_string(),
}
}
};
reply.send(result).unwrap();
}
pub fn handle_get_root_node(
documents: &Documents,
pipeline: PipelineId,
reply: IpcSender<Option<NodeInfo>>,
) {
let info = documents
.find_document(pipeline)
.map(|document| document.upcast::<Node>().summarize());
reply.send(info).unwrap();
}
pub fn handle_get_document_element(
documents: &Documents,
pipeline: PipelineId,
reply: IpcSender<Option<NodeInfo>>,
) {
let info = documents
.find_document(pipeline)
.and_then(|document| document.GetDocumentElement())
.map(|element| element.upcast::<Node>().summarize());
reply.send(info).unwrap();
}
fn find_node_by_unique_id(
documents: &Documents,
pipeline: PipelineId,
node_id: &str,
) -> Option<DomRoot<Node>> {
documents.find_document(pipeline).and_then(|document| {
document
.upcast::<Node>()
.traverse_preorder(ShadowIncluding::Yes)
.find(|candidate| candidate.unique_id() == node_id)
})
}
pub fn handle_get_children(
documents: &Documents,
pipeline: PipelineId,
node_id: String,
reply: IpcSender<Option<Vec<NodeInfo>>>,
) {
match find_node_by_unique_id(documents, pipeline, &*node_id) {
None => return reply.send(None).unwrap(),
Some(parent) => {
let children = parent.children().map(|child| child.summarize()).collect();
reply.send(Some(children)).unwrap();
},
};
}
pub fn handle_get_layout(
documents: &Documents,
pipeline: PipelineId,
node_id: String,
reply: IpcSender<Option<ComputedNodeLayout>>,
) {
let node = match find_node_by_unique_id(documents, pipeline, &*node_id) {
None => return reply.send(None).unwrap(),
Some(found_node) => found_node,
};
let elem = node
.downcast::<Element>()
.expect("should be getting layout of element");
let rect = elem.GetBoundingClientRect();
let width = rect.Width() as f32;
let height = rect.Height() as f32;
let window = window_from_node(&*node);
let elem = node
.downcast::<Element>()
.expect("should be getting layout of element");
let computed_style = window.GetComputedStyle(elem, None);
reply
.send(Some(ComputedNodeLayout {
display: String::from(computed_style.Display()),
position: String::from(computed_style.Position()),
zIndex: String::from(computed_style.ZIndex()),
boxSizing: String::from(computed_style.BoxSizing()),
autoMargins: determine_auto_margins(&window, &*node),
marginTop: String::from(computed_style.MarginTop()),
marginRight: String::from(computed_style.MarginRight()),
marginBottom: String::from(computed_style.MarginBottom()),
marginLeft: String::from(computed_style.MarginLeft()),
borderTopWidth: String::from(computed_style.BorderTopWidth()),
borderRightWidth: String::from(computed_style.BorderRightWidth()),
borderBottomWidth: String::from(computed_style.BorderBottomWidth()),
borderLeftWidth: String::from(computed_style.BorderLeftWidth()),
paddingTop: String::from(computed_style.PaddingTop()),
paddingRight: String::from(computed_style.PaddingRight()),
paddingBottom: String::from(computed_style.PaddingBottom()),
paddingLeft: String::from(computed_style.PaddingLeft()),
width: width,
height: height,
}))
.unwrap();
}
fn determine_auto_margins(window: &Window, node: &Node) -> AutoMargins {
let style = window.style_query(node.to_trusted_node_address()).unwrap();
let margin = style.get_margin();
AutoMargins {
top: margin.margin_top.is_auto(),
right: margin.margin_right.is_auto(),
bottom: margin.margin_bottom.is_auto(),
left: margin.margin_left.is_auto(),
}
}
pub fn handle_modify_attribute(
documents: &Documents,
pipeline: PipelineId,
node_id: String,
modifications: Vec<Modification>,
) {
let node = match find_node_by_unique_id(documents, pipeline, &*node_id) {
None => {
return warn!(
"node id {} for pipeline id {} is not found",
&node_id, &pipeline
);
},
Some(found_node) => found_node,
};
let elem = node
.downcast::<Element>()
.expect("should be getting layout of element");
for modification in modifications {
match modification.newValue {
Some(string) => {
let _ = elem.SetAttribute(
DOMString::from(modification.attributeName),
DOMString::from(string),
);
},
None => elem.RemoveAttribute(DOMString::from(modification.attributeName)),
}
}
}
pub fn handle_wants_live_notifications(global: &GlobalScope, send_notifications: bool) {
global.set_devtools_wants_updates(send_notifications);
}
pub fn handle_set_timeline_markers(
documents: &Documents,
pipeline: PipelineId,
marker_types: Vec<TimelineMarkerType>,
reply: IpcSender<Option<TimelineMarker>>,
) {
match documents.find_window(pipeline) {
None => reply.send(None).unwrap(),
Some(window) => window.set_devtools_timeline_markers(marker_types, reply),
}
}
pub fn handle_drop_timeline_markers(
documents: &Documents,
pipeline: PipelineId,
marker_types: Vec<TimelineMarkerType>,
) {
if let Some(window) = documents.find_window(pipeline) {
window.drop_devtools_timeline_markers(marker_types);
}
}
pub fn handle_request_animation_frame(documents: &Documents, id: PipelineId, actor_name: String) {
if let Some(doc) = documents.find_document(id) {
doc.request_animation_frame(AnimationFrameCallback::DevtoolsFramerateTick { actor_name });
}
}
pub fn handle_reload(documents: &Documents, id: PipelineId) {
if let Some(win) = documents.find_window(id) {
win.Location().reload_without_origin_check();
}
}<|fim▁end|> | use ipc_channel::ipc::IpcSender;
use js::jsval::UndefinedValue;
use js::rust::wrappers::ObjectClassName; |
<|file_name|>filltypespersp.cpp<|end_file_name|><|fim▁begin|>/*
* Copyright 2011 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "gm.h"
#include "SkGradientShader.h"
namespace skiagm {
class FillTypePerspGM : public GM {
SkPath fPath;
public:
FillTypePerspGM() {}
void makePath() {
if (fPath.isEmpty()) {
const SkScalar radius = SkIntToScalar(45);
fPath.addCircle(SkIntToScalar(50), SkIntToScalar(50), radius);
fPath.addCircle(SkIntToScalar(100), SkIntToScalar(100), radius);
}
}
protected:
SkString onShortName() SK_OVERRIDE {
return SkString("filltypespersp");
}
SkISize onISize() SK_OVERRIDE {
return SkISize::Make(835, 840);
}
void showPath(SkCanvas* canvas, int x, int y, SkPath::FillType ft,
SkScalar scale, const SkPaint& paint) {
const SkRect r = { 0, 0, SkIntToScalar(150), SkIntToScalar(150) };
canvas->save();
canvas->translate(SkIntToScalar(x), SkIntToScalar(y));
canvas->clipRect(r);
canvas->drawColor(SK_ColorWHITE);
fPath.setFillType(ft);
canvas->translate(r.centerX(), r.centerY());
canvas->scale(scale, scale);
canvas->translate(-r.centerX(), -r.centerY());
canvas->drawPath(fPath, paint);
canvas->restore();
}
void showFour(SkCanvas* canvas, SkScalar scale, bool aa) {
SkPaint paint;
SkPoint center = SkPoint::Make(SkIntToScalar(100), SkIntToScalar(100));
SkColor colors[] = {SK_ColorBLUE, SK_ColorRED, SK_ColorGREEN};
SkScalar pos[] = {0, SK_ScalarHalf, SK_Scalar1};
SkShader* s = SkGradientShader::CreateRadial(center,
SkIntToScalar(100),
colors,
pos,
SK_ARRAY_COUNT(colors),
SkShader::kClamp_TileMode);
paint.setShader(s)->unref();
paint.setAntiAlias(aa);
showPath(canvas, 0, 0, SkPath::kWinding_FillType,
scale, paint);
showPath(canvas, 200, 0, SkPath::kEvenOdd_FillType,
scale, paint);
showPath(canvas, 00, 200, SkPath::kInverseWinding_FillType,
scale, paint);
showPath(canvas, 200, 200, SkPath::kInverseEvenOdd_FillType,
scale, paint);
}
void onDraw(SkCanvas* canvas) SK_OVERRIDE {
this->makePath();
// do perspective drawPaint as the background;
SkPaint bkgnrd;
SkPoint center = SkPoint::Make(SkIntToScalar(100),
SkIntToScalar(100));
SkColor colors[] = {SK_ColorBLACK, SK_ColorCYAN,
SK_ColorYELLOW, SK_ColorWHITE};
SkScalar pos[] = {0, SK_ScalarHalf / 2,
3 * SK_ScalarHalf / 2, SK_Scalar1};
SkShader* s = SkGradientShader::CreateRadial(center,
SkIntToScalar(1000),
colors,
pos,
SK_ARRAY_COUNT(colors),
SkShader::kClamp_TileMode);
bkgnrd.setShader(s)->unref();
canvas->save();
canvas->translate(SkIntToScalar(100), SkIntToScalar(100));
SkMatrix mat;
mat.reset();
mat.setPerspY(SK_Scalar1 / 1000);
canvas->concat(mat);
canvas->drawPaint(bkgnrd);
canvas->restore();
// draw the paths in perspective
SkMatrix persp;
persp.reset();
persp.setPerspX(-SK_Scalar1 / 1800);
persp.setPerspY(SK_Scalar1 / 500);
canvas->concat(persp);
canvas->translate(SkIntToScalar(20), SkIntToScalar(20));
const SkScalar scale = SkIntToScalar(5)/4;
showFour(canvas, SK_Scalar1, false);
canvas->translate(SkIntToScalar(450), 0);
showFour(canvas, scale, false);
canvas->translate(SkIntToScalar(-450), SkIntToScalar(450));
showFour(canvas, SK_Scalar1, true);
canvas->translate(SkIntToScalar(450), 0);
showFour(canvas, scale, true);
}
private:
typedef GM INHERITED;
};
//////////////////////////////////////////////////////////////////////////////
<|fim▁hole|>static GM* MyFactory(void*) { return new FillTypePerspGM; }
static GMRegistry reg(MyFactory);
}<|fim▁end|> | |
<|file_name|>S15.8.2.13_A13.js<|end_file_name|><|fim▁begin|>// Copyright 2009 the Sputnik authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/**
* If x is -Infinity and y>0 and y is an odd integer, Math.pow(x,y) is -Infinity
*
* @path ch15/15.8/15.8.2/15.8.2.13/S15.8.2.13_A13.js
* @description Checking if Math.pow(x,y) equals to -Infinity, where x is -Infinity and y>0
*/
// CHECK#1
x = -Infinity;
y = new Array();<|fim▁hole|>
for (i = 0; i < ynum; i++)
{
if (Math.pow(x,y[i]) !== -Infinity)
{
$ERROR("#1: Math.pow(" + x + ", " + y[i] + ") !== -Infinity");
}
}<|fim▁end|> | y[0] = 1;
y[1] = 111;
y[2] = 111111;
ynum = 3; |
<|file_name|>DynamicGraphTestFrame.java<|end_file_name|><|fim▁begin|>package com.googlecode.blaisemath.graph.test;
/*
* #%L
* BlaiseGraphTheory
* --
* Copyright (C) 2009 - 2021 Elisha Peterson
* --
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import com.google.common.graph.Graph;
import com.google.common.graph.Graphs;
import com.googlecode.blaisemath.graph.layout.CircleLayout;
import com.googlecode.blaisemath.graph.layout.CircleLayout.CircleLayoutParameters;
import com.googlecode.blaisemath.graph.layout.RandomBoxLayout;
import com.googlecode.blaisemath.graph.layout.RandomBoxLayout.BoxLayoutParameters;
import com.googlecode.blaisemath.graph.layout.SpringLayout;
import com.googlecode.blaisemath.graph.layout.SpringLayoutParameters;
import com.googlecode.blaisemath.graph.view.GraphComponent;
import com.googlecode.blaisemath.graph.view.VisualGraph;
import com.googlecode.blaisemath.graphics.Graphic;
import com.googlecode.blaisemath.util.Instrument;
import com.googlecode.blaisemath.firestarter.editor.EditorRegistration;
import com.googlecode.blaisemath.firestarter.property.PropertySheet;
import com.googlecode.blaisemath.firestarter.swing.RollupPanel;
import javax.swing.*;
import java.awt.event.WindowAdapter;
import java.awt.event.WindowEvent;
import java.awt.geom.Rectangle2D;
@SuppressWarnings("ALL")
public class DynamicGraphTestFrame extends javax.swing.JFrame {
VisualGraph pga;
/** Flag for when el needs points updated */
boolean updateEL = true;
SpringLayout energyLayout;
final SpringLayoutParameters layoutParams;
final MyTestGraph graph = new MyTestGraph();
Graph<String> graphCopy;
public DynamicGraphTestFrame() {
EditorRegistration.registerEditors();
initComponents();
graphCopy = Graphs.copyOf(graph);
plot.setGraph(graphCopy);
plot.getAdapter().getViewGraph().setDragEnabled(true);
plot.getAdapter().getViewGraph().setPointSelectionEnabled(true);
// PANELS
energyLayout = new SpringLayout();
layoutParams = energyLayout.createParameters();
rollupPanel1.add("Energy Layout", PropertySheet.forBean(layoutParams));
for (Graphic p : plot.getGraphicRoot().getGraphics()) {
rollupPanel1.add(p.toString(), PropertySheet.forBean(p));
}
addWindowListener(new WindowAdapter(){
@Override
public void windowClosing(WindowEvent e) {
Instrument.print(System.out, 50);
}
});
}
/** This method is called from within the constructor to
* initialize the form.
* WARNING: Do NOT modify this code. The content of this method is
* always regenerated by the Form Editor.
*/
@SuppressWarnings("unchecked")
// <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents
private void initComponents() {
jToolBar1 = new javax.swing.JToolBar();
randomLB = new javax.swing.JButton();
circleLB = new javax.swing.JButton();
jSeparator1 = new javax.swing.JToolBar.Separator();
jLabel1 = new javax.swing.JLabel();
energyIB = new javax.swing.JButton();
energyAB = new javax.swing.JButton();
energySB = new javax.swing.JButton();
jSeparator2 = new javax.swing.JToolBar.Separator();
jLabel2 = new javax.swing.JLabel();
addEdgesB = new javax.swing.JButton();
rewireB = new javax.swing.JButton();
addThreadedB = new javax.swing.JButton();
addNodesB = new javax.swing.JButton();
threadStopB = new javax.swing.JButton();
jScrollPane1 = new javax.swing.JScrollPane();
rollupPanel1 = new RollupPanel();
plot = new GraphComponent();
setDefaultCloseOperation(javax.swing.WindowConstants.EXIT_ON_CLOSE);
setBackground(new java.awt.Color(0, 0, 0));
jToolBar1.setRollover(true);
randomLB.setText("Random Layout");
randomLB.setFocusable(false);
randomLB.setHorizontalTextPosition(javax.swing.SwingConstants.CENTER);
randomLB.setVerticalTextPosition(javax.swing.SwingConstants.BOTTOM);
randomLB.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
randomLBActionPerformed(evt);
}
});
jToolBar1.add(randomLB);
circleLB.setText("Circle Layout");
circleLB.setFocusable(false);
circleLB.setHorizontalTextPosition(javax.swing.SwingConstants.CENTER);
circleLB.setVerticalTextPosition(javax.swing.SwingConstants.BOTTOM);
circleLB.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
circleLBActionPerformed(evt);
}
});
jToolBar1.add(circleLB);
jToolBar1.add(jSeparator1);
jLabel1.setText("ENERGY:");
jToolBar1.add(jLabel1);
energyIB.setText("iterate");
energyIB.setFocusable(false);
energyIB.setHorizontalTextPosition(javax.swing.SwingConstants.CENTER);
energyIB.setVerticalTextPosition(javax.swing.SwingConstants.BOTTOM);
energyIB.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
energyIBActionPerformed(evt);
}
});
jToolBar1.add(energyIB);
energyAB.setText("animate");
energyAB.setFocusable(false);
energyAB.setHorizontalTextPosition(javax.swing.SwingConstants.CENTER);
energyAB.setVerticalTextPosition(javax.swing.SwingConstants.BOTTOM);
energyAB.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
energyABActionPerformed(evt);
}
});
jToolBar1.add(energyAB);
energySB.setText("stop");
energySB.setFocusable(false);
energySB.setHorizontalTextPosition(javax.swing.SwingConstants.CENTER);
energySB.setVerticalTextPosition(javax.swing.SwingConstants.BOTTOM);
energySB.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
energySBActionPerformed(evt);
}
});
jToolBar1.add(energySB);
jToolBar1.add(jSeparator2);
jLabel2.setText("ADD:");
jToolBar1.add(jLabel2);
addNodesB.setText("nodes");
addNodesB.setFocusable(false);
addNodesB.setHorizontalTextPosition(javax.swing.SwingConstants.CENTER);
addNodesB.setVerticalTextPosition(javax.swing.SwingConstants.BOTTOM);
addNodesB.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
addNodesBActionPerformed(evt);
}
});
jToolBar1.add(addNodesB);
addEdgesB.setText("edges");
addEdgesB.setFocusable(false);
addEdgesB.setHorizontalTextPosition(javax.swing.SwingConstants.CENTER);
addEdgesB.setVerticalTextPosition(javax.swing.SwingConstants.BOTTOM);
addEdgesB.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
addEdgesBActionPerformed(evt);
}
});
jToolBar1.add(addEdgesB);
rewireB.setText("rewire");
rewireB.setFocusable(false);
rewireB.setHorizontalTextPosition(javax.swing.SwingConstants.CENTER);
rewireB.setVerticalTextPosition(javax.swing.SwingConstants.BOTTOM);
rewireB.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
rewireBActionPerformed(evt);
}
});
jToolBar1.add(rewireB);
addThreadedB.setText("threaded");
addThreadedB.setFocusable(false);
addThreadedB.setHorizontalTextPosition(javax.swing.SwingConstants.CENTER);
addThreadedB.setVerticalTextPosition(javax.swing.SwingConstants.BOTTOM);
addThreadedB.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
addThreadedBActionPerformed(evt);
}
});
jToolBar1.add(addThreadedB);
threadStopB.setText("stop");
threadStopB.setFocusable(false);
threadStopB.setHorizontalTextPosition(javax.swing.SwingConstants.CENTER);
threadStopB.setVerticalTextPosition(javax.swing.SwingConstants.BOTTOM);
threadStopB.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
threadStopBActionPerformed(evt);
}
});
jToolBar1.add(threadStopB);
getContentPane().add(jToolBar1, java.awt.BorderLayout.PAGE_START);
jScrollPane1.setViewportView(rollupPanel1);
getContentPane().add(jScrollPane1, java.awt.BorderLayout.EAST);
getContentPane().add(plot, java.awt.BorderLayout.CENTER);
pack();
}// </editor-fold>//GEN-END:initComponents
private void randomLBActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_randomLBActionPerformed
updateEL = true;
plot.getLayoutManager().applyLayout(RandomBoxLayout.getInstance(), null, new BoxLayoutParameters(new Rectangle2D.Double(-500, -500, 1000, 1000)));
}//GEN-LAST:event_randomLBActionPerformed
private void circleLBActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_circleLBActionPerformed
updateEL = true;
plot.getLayoutManager().applyLayout(CircleLayout.getInstance(), null, new CircleLayoutParameters(500.0));
}//GEN-LAST:event_circleLBActionPerformed
private void energyIBActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_energyIBActionPerformed
if (energyLayout == null) {
energyLayout = new SpringLayout();
}
plot.getLayoutManager().setLayoutAlgorithm(energyLayout);
plot.getLayoutManager().setLayoutParameters(layoutParams);
plot.getLayoutManager().iterateLayout();
updateEL = false;
}//GEN-LAST:event_energyIBActionPerformed
private void energyABActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_energyABActionPerformed
if (energyLayout == null) {
energyLayout = new SpringLayout();
}
plot.getLayoutManager().setLayoutAlgorithm(energyLayout);
plot.getLayoutManager().setLayoutParameters(layoutParams);
plot.getLayoutManager().setLayoutTaskActive(true);
}//GEN-LAST:event_energyABActionPerformed
private void energySBActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_energySBActionPerformed
plot.getLayoutManager().setLayoutTaskActive(false);
}//GEN-LAST:event_energySBActionPerformed
private synchronized void updateGraph() {
SwingUtilities.invokeLater(() -> {
graphCopy = Graphs.copyOf(graph);
plot.getLayoutManager().setGraph(graphCopy);
plot.getAdapter().getViewGraph().setEdgeSet(graphCopy.edges());
});
}
private void addNodesBActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_addNodesBActionPerformed
graph.addNodes(5);
updateGraph();
}//GEN-LAST:event_addNodesBActionPerformed
private void addEdgesBActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_addEdgesBActionPerformed
graph.addEdges(5);
updateGraph();
}//GEN-LAST:event_addEdgesBActionPerformed
private void rewireBActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_rewireBActionPerformed
graph.rewire(50, 5);
updateGraph();
}//GEN-LAST:event_rewireBActionPerformed
final java.util.Timer t = new java.util.Timer();
java.util.TimerTask tt;
private void addThreadedBActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_addThreadedBActionPerformed
if (tt != null) {
tt.cancel();
}
tt = new java.util.TimerTask() {
@Override
public void run() {
graph.removeEdges(10);
graph.addNodes(1);
graph.removeNodes(1);
graph.addEdges(2);
updateGraph();
}
};
t.schedule(tt, 100, 500);
}//GEN-LAST:event_addThreadedBActionPerformed
private void threadStopBActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_threadStopBActionPerformed
if (tt != null) {
tt.cancel();
}
}//GEN-LAST:event_threadStopBActionPerformed
/**
* @param args the command line arguments
*/
public static void main(String args[]) {
java.awt.EventQueue.invokeLater(() -> new DynamicGraphTestFrame().setVisible(true));<|fim▁hole|> private javax.swing.JButton addEdgesB;
private javax.swing.JButton addThreadedB;
private javax.swing.JButton addNodesB;
private javax.swing.JButton circleLB;
private javax.swing.JButton energyAB;
private javax.swing.JButton energyIB;
private javax.swing.JButton energySB;
private javax.swing.JLabel jLabel1;
private javax.swing.JLabel jLabel2;
private javax.swing.JScrollPane jScrollPane1;
private javax.swing.JToolBar.Separator jSeparator1;
private javax.swing.JToolBar.Separator jSeparator2;
private javax.swing.JToolBar jToolBar1;
private GraphComponent plot;
private javax.swing.JButton randomLB;
private javax.swing.JButton rewireB;
private RollupPanel rollupPanel1;
private javax.swing.JButton threadStopB;
// End of variables declaration//GEN-END:variables
}<|fim▁end|> | }
// Variables declaration - do not modify//GEN-BEGIN:variables |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>#![allow(non_camel_case_types)]
// See https://jansson.readthedocs.io/ for API documentation.
use std::os::raw::{c_char, c_int, c_longlong, c_void};
/// The type of a JSON value.
#[repr(u32)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum json_type {
JSON_OBJECT = 0,
JSON_ARRAY = 1,
JSON_STRING = 2,
JSON_INTEGER = 3,
JSON_REAL = 4,
JSON_TRUE = 5,
JSON_FALSE = 6,
JSON_NULL = 7,
}
/// The maximum possible indentation when pretty-printing JSON.
pub const JSON_MAX_INDENT: u32 = 31;
/// A JSON value.
#[repr(C)]
#[derive(Debug)]
pub struct json_t {
pub type_: json_type,
pub refcount: usize,
}
/// An error that occurred during JSON processing.
#[repr(C)]
pub struct json_error_t {
pub line: c_int,
pub column: c_int,
pub position: c_int,
pub source: [c_char; 80usize],
pub text: [c_char; 160usize],
}
pub type json_load_callback_t = unsafe extern "C" fn(buffer: *mut c_void, buflen: usize, data: *mut c_void) -> usize;
pub type json_dump_callback_t = unsafe extern "C" fn(buffer: *const c_char, size: usize, data: *mut c_void) -> c_int;
pub type json_malloc_t = unsafe extern "C" fn(arg1: usize) -> *mut c_void;
pub type json_free_t = unsafe extern "C" fn(arg1: *mut c_void);
pub type json_int_t = c_longlong;
extern "C" {
pub fn json_object() -> *mut json_t;
pub fn json_array() -> *mut json_t;
pub fn json_string(value: *const c_char) -> *mut json_t;
pub fn json_stringn(value: *const c_char, len: usize) -> *mut json_t;
pub fn json_string_nocheck(value: *const c_char) -> *mut json_t;
pub fn json_stringn_nocheck(value: *const c_char, len: usize) -> *mut json_t;
pub fn json_integer(value: json_int_t) -> *mut json_t;
pub fn json_real(value: f64) -> *mut json_t;
pub fn json_true() -> *mut json_t;
pub fn json_false() -> *mut json_t;
pub fn json_null() -> *mut json_t;
pub fn json_delete(json: *mut json_t);
pub fn json_object_seed(seed: usize);
pub fn json_object_size(object: *const json_t) -> usize;
pub fn json_object_get(object: *const json_t, key: *const c_char) -> *mut json_t;
pub fn json_object_set_new(object: *mut json_t, key: *const c_char, value: *mut json_t) -> c_int;
pub fn json_object_set_new_nocheck(object: *mut json_t, key: *const c_char, value: *mut json_t) -> c_int;
pub fn json_object_del(object: *mut json_t, key: *const c_char) -> c_int;
pub fn json_object_clear(object: *mut json_t) -> c_int;
pub fn json_object_update(object: *mut json_t, other: *mut json_t) -> c_int;
pub fn json_object_update_existing(object: *mut json_t, other: *mut json_t) -> c_int;
pub fn json_object_update_missing(object: *mut json_t, other: *mut json_t) -> c_int;
pub fn json_object_iter(object: *mut json_t) -> *mut c_void;
pub fn json_object_iter_at(object: *mut json_t, key: *const c_char) -> *mut c_void;
pub fn json_object_key_to_iter(key: *const c_char) -> *mut c_void;
pub fn json_object_iter_next(object: *mut json_t, iter: *mut c_void) -> *mut c_void;
pub fn json_object_iter_key(iter: *mut c_void) -> *const c_char;
pub fn json_object_iter_value(iter: *mut c_void) -> *mut json_t;
pub fn json_object_iter_set_new(object: *mut json_t, iter: *mut c_void, value: *mut json_t) -> c_int;
pub fn json_array_size(array: *const json_t) -> usize;
pub fn json_array_get(array: *const json_t, index: usize) -> *mut json_t;
pub fn json_array_set_new(array: *mut json_t, index: usize, value: *mut json_t) -> c_int;
pub fn json_array_append_new(array: *mut json_t, value: *mut json_t) -> c_int;
pub fn json_array_insert_new(array: *mut json_t, index: usize, value: *mut json_t) -> c_int;
pub fn json_array_remove(array: *mut json_t, index: usize) -> c_int;
pub fn json_array_clear(array: *mut json_t) -> c_int;
pub fn json_array_extend(array: *mut json_t, other: *mut json_t) -> c_int;
pub fn json_string_value(string: *const json_t) -> *const c_char;
pub fn json_string_length(string: *const json_t) -> usize;
pub fn json_integer_value(integer: *const json_t) -> json_int_t;
pub fn json_real_value(real: *const json_t) -> f64;
pub fn json_number_value(json: *const json_t) -> f64;
pub fn json_string_set(string: *mut json_t, value: *const c_char) -> c_int;
pub fn json_string_setn(string: *mut json_t, value: *const c_char, len: usize) -> c_int;
pub fn json_string_set_nocheck(string: *mut json_t, value: *const c_char) -> c_int;
pub fn json_string_setn_nocheck(string: *mut json_t, value: *const c_char, len: usize) -> c_int;
pub fn json_integer_set(integer: *mut json_t, value: json_int_t) -> c_int;
pub fn json_real_set(real: *mut json_t, value: f64) -> c_int;
pub fn json_pack(fmt: *const c_char, ...) -> *mut json_t;
pub fn json_pack_ex(error: *mut json_error_t, flags: usize, fmt: *const c_char, ...) -> *mut json_t;
pub fn json_unpack(root: *mut json_t, fmt: *const c_char, ...) -> c_int;
pub fn json_unpack_ex(root: *mut json_t, error: *mut json_error_t, flags: usize, fmt: *const c_char, ...) -> c_int;
pub fn json_equal(value1: *mut json_t, value2: *mut json_t) -> c_int;
pub fn json_copy(value: *mut json_t) -> *mut json_t;
pub fn json_deep_copy(value: *const json_t) -> *mut json_t;
pub fn json_loads(input: *const c_char, flags: usize, error: *mut json_error_t) -> *mut json_t;
pub fn json_loadb(buffer: *const c_char, buflen: usize, flags: usize, error: *mut json_error_t) -> *mut json_t;
pub fn json_loadfd(input: c_int, flags: usize, error: *mut json_error_t) -> *mut json_t;
pub fn json_load_file(path: *const c_char, flags: usize, error: *mut json_error_t) -> *mut json_t;
pub fn json_load_callback(callback: json_load_callback_t, data: *mut c_void, flags: usize, error: *mut json_error_t) -> *mut json_t;
pub fn json_dumps(json: *const json_t, flags: usize) -> *mut c_char;
pub fn json_dumpb(json: *const json_t, buffer: *mut c_char, size: usize, flags: usize) -> usize;
pub fn json_dumpfd(json: *const json_t, output: c_int, flags: usize) -> c_int;
pub fn json_dump_file(json: *const json_t, path: *const c_char, flags: usize) -> c_int;
pub fn json_dump_callback(json: *const json_t, callback: json_dump_callback_t, data: *mut c_void, flags: usize) -> c_int;
pub fn json_set_alloc_funcs(malloc_fn: json_malloc_t, free_fn: json_free_t);
pub fn json_get_alloc_funcs(malloc_fn: *mut json_malloc_t, free_fn: *mut json_free_t);
}
pub unsafe fn json_incref(json: *mut json_t) -> *mut json_t {
if !json.is_null() && (*json).refcount != usize::max_value() {
(*json).refcount += 1;
}
json
}
pub unsafe fn json_decref(json: *mut json_t) {
if !json.is_null() && (*json).refcount != usize::max_value() {
(*json).refcount -= 1;
if (*json).refcount == 0 {
json_delete(json);
}
}
}
#[cfg(test)]
use cstr_macro::cstr;
#[cfg(test)]
mod tests {
use super::*;
use std::ffi::CStr;
use std::ptr;
#[test]
fn object_encoding() {
unsafe {
let x = json_object();
json_object_set_new(x, cstr!("a"), json_string(cstr!("alpha")));
json_object_set_new(x, cstr!("b"), json_true());<|fim▁hole|> json_object_set_new(x, cstr!("f"), json_null());
let ys = json_array();
json_array_append_new(ys, json_integer(1));
json_array_append_new(ys, json_integer(3));
json_array_insert_new(ys, 1, json_integer(2));
json_object_set_new(x, cstr!("g"), ys);
let json = r#"{"a": "alpha", "b": true, "c": false, "d": 42, "e": 1.25, "f": null, "g": [1, 2, 3]}"#;
assert_eq!(json, CStr::from_ptr(json_dumps(x, 0x80)).to_str().unwrap());
}
}
#[test]
fn object_decoding() {
unsafe {
let json = cstr!(r#"{"a": {"aa": [true, false], "ab": null}, "b": {}, "c": "charlie", "d": 8.75}"#);
let root = json_loads(json, 0, ptr::null_mut());
assert!((*root).type_ == json_type::JSON_OBJECT);
let a = json_object_get(root, cstr!("a"));
assert!((*a).type_ == json_type::JSON_OBJECT);
let aa = json_object_get(a, cstr!("aa"));
assert!((*aa).type_ == json_type::JSON_ARRAY);
assert_eq!(json_array_size(aa), 2);
assert!((*json_array_get(aa, 0)).type_ == json_type::JSON_TRUE);
assert!((*json_array_get(aa, 1)).type_ == json_type::JSON_FALSE);
let ab = json_object_get(a, cstr!("ab"));
assert!((*ab).type_ == json_type::JSON_NULL);
let b = json_object_get(root, cstr!("b"));
assert!((*b).type_ == json_type::JSON_OBJECT);
assert_eq!(json_object_size(b), 0);
let c = json_object_get(root, cstr!("c"));
assert!((*c).type_ == json_type::JSON_STRING);
assert_eq!("charlie", CStr::from_ptr(json_string_value(c)).to_str().unwrap());
let d = json_object_get(root, cstr!("d"));
assert!((*d).type_ == json_type::JSON_REAL);
assert_eq!(8.75, json_real_value(d));
}
}
}<|fim▁end|> | json_object_set_new(x, cstr!("c"), json_false());
json_object_set_new(x, cstr!("d"), json_integer(42));
json_object_set_new(x, cstr!("e"), json_real(1.25)); |
<|file_name|>ClientAuthHelper.java<|end_file_name|><|fim▁begin|>package org.orchestra.client;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.Reader;
import java.io.UnsupportedEncodingException;
import java.nio.charset.StandardCharsets;
import java.security.SignatureException;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.commons.io.IOUtils;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.shiro.codec.Base64;
import org.apache.shiro.crypto.hash.Md5Hash;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import org.json.simple.JSONObject;
import org.json.simple.JSONValue;
import org.orchestra.auth.Constants;
import org.orchestra.rest.ServerAuthHelper;
import org.orchestra.util.CipherUtil;
import org.orchestra.util.HttpUtil;
import com.mongodb.BasicDBObject;
import com.mongodb.DBCollection;
public class ClientAuthHelper {
private static Map<String, String> cache = new HashMap<String, String>();
private String username;
private String password;
private String filename;
private String apikeyPath;
public ClientAuthHelper(String username, String password) {
this.username = username;
this.password = password;
this.apikeyPath = Client.getProperty("apikey.dir");
this.filename = new Md5Hash(username) + ".apikey";
}
public String getApikey() {
String apikey = cache.get(username);
if(apikey != null) return apikey;
apikey = loadApikeyFromFile();
if(apikey != null) return apikey;
apikey = getApikeyFromServer();
return apikey;
}
private String getApikeyFromServer() {
HttpCommandBuilder builder = new HttpCommandBuilder(username, password);
HttpCommand command = builder.setScheme("https")
.setNeedAuthHeader(true)
.setHost(Client.getProperty("server"))
.setPort(Integer.valueOf(Client.getProperty("port")))
.setAction("update")
.setTarget("apikey")
.addPathParameter(username)
.addPathParameter(Client.getName())
.build();
HttpResponse response = command.execute();
if(200 != response.getStatusLine().getStatusCode()) {
throw new RuntimeException("Unable to get apikey from server.");
}
String apikey = saveApikeyToFile(response);
return apikey;
}
public void removeApikeyFile() {
File file = new File(apikeyPath + "/" + filename);
if(file.exists()) file.delete();
}
public String saveApikeyToFile(HttpResponse response) {
Reader in = null;
try {
in = new InputStreamReader(response.getEntity().getContent());
} catch (IllegalStateException | IOException e) {
e.printStackTrace();
}
JSONObject json = (JSONObject) JSONValue.parse(in);
String apikey = null;
if(json != null) {
apikey = (String) json.get("apikey");
String secret = (String) json.get("secret");
cache.put(username, apikey);
cache.put(apikey, secret);
String jsonString = json.toJSONString();
System.out.println(jsonString);
OutputStream out = null;
try {
String apikey_filename = apikeyPath + "/" + filename;
File file = new File(apikey_filename);
if(file.exists()) {
file.delete();
}
out = new FileOutputStream(apikey_filename);
} catch (FileNotFoundException e) {
e.printStackTrace();
}
CipherUtil.encrypt(jsonString , out , password);
}
return apikey;
}
private String loadApikeyFromFile() {
File file = new File(apikeyPath + "/" + filename);
InputStream in = null;
if(!file.exists()) return null;
try {
in = new FileInputStream(file);
} catch (FileNotFoundException e) {
e.printStackTrace();
}
String jsonString = CipherUtil.decrypt(in, password);
JSONObject json = (JSONObject) JSONValue.parse(jsonString);
if(json == null) return null;
String apikey = (String) json.get("apikey");
String secret = (String) json.get("secret");<|fim▁hole|> return apikey;
}
public String sign(HttpCommandBuilder request) throws SignatureException, UnsupportedEncodingException {
String secret = cache.get(getApikey());
String canonicalRequestHashHex = CipherUtil.toHex(CipherUtil.hash(getCanonicalRequest(request)));
String timestamp = (String) request.getParameter("timestamp");
String nonce = (String) request.getParameter("nonce");
String stringToSign =
Constants.SIGNATURE_ALGORITHM + Constants.NEW_LINE +
timestamp + Constants.NEW_LINE +
canonicalRequestHashHex;
DateTimeFormatter formatter = DateTimeFormat.forPattern(Constants.TIMESTAMP_FORMAT);
DateTime date = formatter.parseDateTime(timestamp);
String dateStamp = date .toString(Constants.DATE_FORMAT);
byte[] kDate = CipherUtil.sign(dateStamp, secret);
byte[] kSigning = CipherUtil.sign(nonce , kDate);
byte[] signature = CipherUtil.sign(stringToSign, kSigning);
String signatureHex = CipherUtil.toHex(signature);
return signatureHex;
}
public static String getCanonicalRequest(HttpCommandBuilder request) throws UnsupportedEncodingException, SignatureException {
String method = request.getMethod();
String canonicalURI = HttpUtil.canonicalURI(request.getPath());
String canonicalQueryString = canonicalizeQueryString(request);
String canonicalHeadersString = canonicalizeHeadersString(request);
String signedHeadersString = getSignedHeadersString(request);
String canonicalRequest =
method + Constants.NEW_LINE +
canonicalURI + Constants.NEW_LINE +
canonicalQueryString + Constants.NEW_LINE +
canonicalHeadersString + Constants.NEW_LINE +
signedHeadersString;
return canonicalRequest;
}
private static String canonicalizeHeadersString(HttpCommandBuilder request) {
Map<String, String> headers = request.getAllHeaders();
StringBuilder buffer = new StringBuilder();
for( Entry<String, String> header : headers.entrySet()) {
if(header.getKey().equalsIgnoreCase(Constants.SIGNED_HEADERS)) continue;
buffer.append(header.getKey().toLowerCase()).append(":");
String values = header.getValue();
buffer.append(values.trim());
buffer.append(Constants.NEW_LINE);
}
return buffer.toString();
}
private static String canonicalizeQueryString(HttpCommandBuilder request) {
String queryString = request.getQueryString();
return HttpUtil.canonicalizeQueryString(queryString);
}
public static String getSignedHeadersString(HttpCommandBuilder request) {
Map<String, String> headers = request.getAllHeaders();
StringBuilder buffer = new StringBuilder();
for(Entry<String, String> header : headers.entrySet()) {
if(header.getKey().equalsIgnoreCase(Constants.SIGNED_HEADERS)) continue;
if (buffer.length() > 0) buffer.append(";");
buffer.append(header.getKey().toLowerCase());
}
return buffer.toString();
}
private static class HeaderComparator implements Comparator<org.apache.http.Header> {
@Override
public int compare(org.apache.http.Header o1,
org.apache.http.Header o2) {
return o1.getName().compareToIgnoreCase(o2.getName());
}
}
}<|fim▁end|> | if(apikey == null || secret == null) return null;
cache.put(username, apikey);
cache.put(apikey, secret); |
<|file_name|>controlgroup-f525e8d53867db2d7a4e30c79b5d800b.js<|end_file_name|><|fim▁begin|>( function( factory ) {
if ( typeof define === "function" && define.amd ) {
// AMD. Register as an anonymous module.
define( [ "jquery" ], factory );
} else {
// Browser globals
factory( jQuery );
}
} ( function( $ ) {
$.ui = $.ui || {};
return $.ui.version = "1.12.1";
} ) );
/*!
* jQuery UI Widget 1.12.1
* http://jqueryui.com
*
* Copyright jQuery Foundation and other contributors
* Released under the MIT license.
* http://jquery.org/license
*/
//>>label: Widget
//>>group: Core
//>>description: Provides a factory for creating stateful widgets with a common API.
//>>docs: http://api.jqueryui.com/jQuery.widget/
//>>demos: http://jqueryui.com/widget/
( function( factory ) {
if ( typeof define === "function" && define.amd ) {
// AMD. Register as an anonymous module.
define( [ "jquery", "./version" ], factory );
} else {
// Browser globals
factory( jQuery );
}
}( function( $ ) {
var widgetUuid = 0;
var widgetSlice = Array.prototype.slice;
$.cleanData = ( function( orig ) {
return function( elems ) {
var events, elem, i;
for ( i = 0; ( elem = elems[ i ] ) != null; i++ ) {
try {
// Only trigger remove when necessary to save time
events = $._data( elem, "events" );
if ( events && events.remove ) {
$( elem ).triggerHandler( "remove" );
}
// Http://bugs.jquery.com/ticket/8235
} catch ( e ) {}
}
orig( elems );
};
} )( $.cleanData );
$.widget = function( name, base, prototype ) {
var existingConstructor, constructor, basePrototype;
// ProxiedPrototype allows the provided prototype to remain unmodified
// so that it can be used as a mixin for multiple widgets (#8876)
var proxiedPrototype = {};
var namespace = name.split( "." )[ 0 ];
name = name.split( "." )[ 1 ];
var fullName = namespace + "-" + name;
if ( !prototype ) {
prototype = base;
base = $.Widget;
}
if ( $.isArray( prototype ) ) {
prototype = $.extend.apply( null, [ {} ].concat( prototype ) );
}
// Create selector for plugin
$.expr[ ":" ][ fullName.toLowerCase() ] = function( elem ) {
return !!$.data( elem, fullName );
};
$[ namespace ] = $[ namespace ] || {};
existingConstructor = $[ namespace ][ name ];
constructor = $[ namespace ][ name ] = function( options, element ) {
// Allow instantiation without "new" keyword
if ( !this._createWidget ) {
return new constructor( options, element );
}
// Allow instantiation without initializing for simple inheritance
// must use "new" keyword (the code above always passes args)
if ( arguments.length ) {
this._createWidget( options, element );
}
};
// Extend with the existing constructor to carry over any static properties
$.extend( constructor, existingConstructor, {
version: prototype.version,
// Copy the object used to create the prototype in case we need to
// redefine the widget later
_proto: $.extend( {}, prototype ),
// Track widgets that inherit from this widget in case this widget is
// redefined after a widget inherits from it
_childConstructors: []
} );
basePrototype = new base();
// We need to make the options hash a property directly on the new instance
// otherwise we'll modify the options hash on the prototype that we're
// inheriting from
basePrototype.options = $.widget.extend( {}, basePrototype.options );
$.each( prototype, function( prop, value ) {
if ( !$.isFunction( value ) ) {
proxiedPrototype[ prop ] = value;
return;
}
proxiedPrototype[ prop ] = ( function() {
function _super() {
return base.prototype[ prop ].apply( this, arguments );
}
function _superApply( args ) {
return base.prototype[ prop ].apply( this, args );
}
return function() {
var __super = this._super;
var __superApply = this._superApply;
var returnValue;
this._super = _super;
this._superApply = _superApply;
returnValue = value.apply( this, arguments );
this._super = __super;
this._superApply = __superApply;
return returnValue;
};
} )();
} );
constructor.prototype = $.widget.extend( basePrototype, {
// TODO: remove support for widgetEventPrefix
// always use the name + a colon as the prefix, e.g., draggable:start
// don't prefix for widgets that aren't DOM-based
widgetEventPrefix: existingConstructor ? ( basePrototype.widgetEventPrefix || name ) : name
}, proxiedPrototype, {
constructor: constructor,
namespace: namespace,
widgetName: name,
widgetFullName: fullName
} );
// If this widget is being redefined then we need to find all widgets that
// are inheriting from it and redefine all of them so that they inherit from
// the new version of this widget. We're essentially trying to replace one
// level in the prototype chain.
if ( existingConstructor ) {
$.each( existingConstructor._childConstructors, function( i, child ) {
var childPrototype = child.prototype;
// Redefine the child widget using the same prototype that was
// originally used, but inherit from the new version of the base
$.widget( childPrototype.namespace + "." + childPrototype.widgetName, constructor,
child._proto );
} );
// Remove the list of existing child constructors from the old constructor
// so the old child constructors can be garbage collected
delete existingConstructor._childConstructors;
} else {
base._childConstructors.push( constructor );
}
$.widget.bridge( name, constructor );
return constructor;
};
$.widget.extend = function( target ) {
var input = widgetSlice.call( arguments, 1 );
var inputIndex = 0;
var inputLength = input.length;
var key;
var value;
for ( ; inputIndex < inputLength; inputIndex++ ) {
for ( key in input[ inputIndex ] ) {
value = input[ inputIndex ][ key ];
if ( input[ inputIndex ].hasOwnProperty( key ) && value !== undefined ) {
// Clone objects
if ( $.isPlainObject( value ) ) {
target[ key ] = $.isPlainObject( target[ key ] ) ?
$.widget.extend( {}, target[ key ], value ) :
// Don't extend strings, arrays, etc. with objects
$.widget.extend( {}, value );
// Copy everything else by reference
} else {
target[ key ] = value;
}
}
}
}
return target;
};
$.widget.bridge = function( name, object ) {
var fullName = object.prototype.widgetFullName || name;
$.fn[ name ] = function( options ) {
var isMethodCall = typeof options === "string";
var args = widgetSlice.call( arguments, 1 );
var returnValue = this;
if ( isMethodCall ) {
// If this is an empty collection, we need to have the instance method
// return undefined instead of the jQuery instance
if ( !this.length && options === "instance" ) {
returnValue = undefined;
} else {
this.each( function() {
var methodValue;
var instance = $.data( this, fullName );
if ( options === "instance" ) {
returnValue = instance;
return false;
}
if ( !instance ) {
return $.error( "cannot call methods on " + name +
" prior to initialization; " +
"attempted to call method '" + options + "'" );
}
if ( !$.isFunction( instance[ options ] ) || options.charAt( 0 ) === "_" ) {
return $.error( "no such method '" + options + "' for " + name +
" widget instance" );
}
methodValue = instance[ options ].apply( instance, args );
if ( methodValue !== instance && methodValue !== undefined ) {
returnValue = methodValue && methodValue.jquery ?
returnValue.pushStack( methodValue.get() ) :
methodValue;
return false;
}
} );
}
} else {
// Allow multiple hashes to be passed on init
if ( args.length ) {
options = $.widget.extend.apply( null, [ options ].concat( args ) );
}
this.each( function() {
var instance = $.data( this, fullName );
if ( instance ) {
instance.option( options || {} );
if ( instance._init ) {
instance._init();
}
} else {
$.data( this, fullName, new object( options, this ) );
}
} );
}
return returnValue;
};
};
$.Widget = function( /* options, element */ ) {};
$.Widget._childConstructors = [];
$.Widget.prototype = {
widgetName: "widget",
widgetEventPrefix: "",
defaultElement: "<div>",
options: {
classes: {},
disabled: false,
// Callbacks
create: null
},
_createWidget: function( options, element ) {
element = $( element || this.defaultElement || this )[ 0 ];
this.element = $( element );
this.uuid = widgetUuid++;
this.eventNamespace = "." + this.widgetName + this.uuid;
this.bindings = $();
this.hoverable = $();
this.focusable = $();
this.classesElementLookup = {};
if ( element !== this ) {
$.data( element, this.widgetFullName, this );
this._on( true, this.element, {
remove: function( event ) {
if ( event.target === element ) {
this.destroy();
}
}
} );
this.document = $( element.style ?
// Element within the document
element.ownerDocument :
// Element is window or document
element.document || element );
this.window = $( this.document[ 0 ].defaultView || this.document[ 0 ].parentWindow );
}
this.options = $.widget.extend( {},
this.options,
this._getCreateOptions(),
options );
this._create();
if ( this.options.disabled ) {
this._setOptionDisabled( this.options.disabled );
}
this._trigger( "create", null, this._getCreateEventData() );
this._init();
},
_getCreateOptions: function() {
return {};
},
_getCreateEventData: $.noop,
_create: $.noop,
_init: $.noop,
destroy: function() {
var that = this;
this._destroy();
$.each( this.classesElementLookup, function( key, value ) {
that._removeClass( value, key );
} );
// We can probably remove the unbind calls in 2.0
// all event bindings should go through this._on()
this.element
.off( this.eventNamespace )
.removeData( this.widgetFullName );
this.widget()
.off( this.eventNamespace )
.removeAttr( "aria-disabled" );
// Clean up events and states
this.bindings.off( this.eventNamespace );
},
_destroy: $.noop,
widget: function() {
return this.element;
},
option: function( key, value ) {
var options = key;
var parts;
var curOption;
var i;
if ( arguments.length === 0 ) {
// Don't return a reference to the internal hash
return $.widget.extend( {}, this.options );
}
if ( typeof key === "string" ) {
// Handle nested keys, e.g., "foo.bar" => { foo: { bar: ___ } }
options = {};
parts = key.split( "." );
key = parts.shift();
if ( parts.length ) {
curOption = options[ key ] = $.widget.extend( {}, this.options[ key ] );
for ( i = 0; i < parts.length - 1; i++ ) {
curOption[ parts[ i ] ] = curOption[ parts[ i ] ] || {};
curOption = curOption[ parts[ i ] ];
}
key = parts.pop();
if ( arguments.length === 1 ) {
return curOption[ key ] === undefined ? null : curOption[ key ];
}
curOption[ key ] = value;
} else {
if ( arguments.length === 1 ) {
return this.options[ key ] === undefined ? null : this.options[ key ];
}
options[ key ] = value;
}
}
this._setOptions( options );
return this;
},
_setOptions: function( options ) {
var key;
for ( key in options ) {
this._setOption( key, options[ key ] );
}
return this;
},
_setOption: function( key, value ) {
if ( key === "classes" ) {
this._setOptionClasses( value );
}
this.options[ key ] = value;
if ( key === "disabled" ) {
this._setOptionDisabled( value );
}
return this;
},
_setOptionClasses: function( value ) {
var classKey, elements, currentElements;
for ( classKey in value ) {
currentElements = this.classesElementLookup[ classKey ];
if ( value[ classKey ] === this.options.classes[ classKey ] ||
!currentElements ||
!currentElements.length ) {
continue;
}
// We are doing this to create a new jQuery object because the _removeClass() call
// on the next line is going to destroy the reference to the current elements being
// tracked. We need to save a copy of this collection so that we can add the new classes
// below.
elements = $( currentElements.get() );
this._removeClass( currentElements, classKey );
// We don't use _addClass() here, because that uses this.options.classes
// for generating the string of classes. We want to use the value passed in from
// _setOption(), this is the new value of the classes option which was passed to
// _setOption(). We pass this value directly to _classes().
elements.addClass( this._classes( {
element: elements,
keys: classKey,
classes: value,
add: true
} ) );
}
},
_setOptionDisabled: function( value ) {
this._toggleClass( this.widget(), this.widgetFullName + "-disabled", null, !!value );
// If the widget is becoming disabled, then nothing is interactive
if ( value ) {
this._removeClass( this.hoverable, null, "ui-state-hover" );
this._removeClass( this.focusable, null, "ui-state-focus" );
}
},
enable: function() {
return this._setOptions( { disabled: false } );
},
disable: function() {
return this._setOptions( { disabled: true } );
},
_classes: function( options ) {
var full = [];
var that = this;
options = $.extend( {
element: this.element,
classes: this.options.classes || {}
}, options );
function processClassString( classes, checkOption ) {
var current, i;
for ( i = 0; i < classes.length; i++ ) {
current = that.classesElementLookup[ classes[ i ] ] || $();
if ( options.add ) {
current = $( $.unique( current.get().concat( options.element.get() ) ) );
} else {
current = $( current.not( options.element ).get() );
}
that.classesElementLookup[ classes[ i ] ] = current;
full.push( classes[ i ] );
if ( checkOption && options.classes[ classes[ i ] ] ) {
full.push( options.classes[ classes[ i ] ] );
}
}
}
this._on( options.element, {
"remove": "_untrackClassesElement"
} );
if ( options.keys ) {
processClassString( options.keys.match( /\S+/g ) || [], true );
}
if ( options.extra ) {
processClassString( options.extra.match( /\S+/g ) || [] );
}
return full.join( " " );
},
_untrackClassesElement: function( event ) {
var that = this;
$.each( that.classesElementLookup, function( key, value ) {
if ( $.inArray( event.target, value ) !== -1 ) {
that.classesElementLookup[ key ] = $( value.not( event.target ).get() );
}
} );
},
_removeClass: function( element, keys, extra ) {
return this._toggleClass( element, keys, extra, false );
},
_addClass: function( element, keys, extra ) {
return this._toggleClass( element, keys, extra, true );
},
_toggleClass: function( element, keys, extra, add ) {
add = ( typeof add === "boolean" ) ? add : extra;
var shift = ( typeof element === "string" || element === null ),
options = {
extra: shift ? keys : extra,
keys: shift ? element : keys,
element: shift ? this.element : element,
add: add
};
options.element.toggleClass( this._classes( options ), add );
return this;
},
_on: function( suppressDisabledCheck, element, handlers ) {
var delegateElement;
var instance = this;
// No suppressDisabledCheck flag, shuffle arguments
if ( typeof suppressDisabledCheck !== "boolean" ) {
handlers = element;
element = suppressDisabledCheck;
suppressDisabledCheck = false;
}
// No element argument, shuffle and use this.element
if ( !handlers ) {
handlers = element;
element = this.element;
delegateElement = this.widget();
} else {
element = delegateElement = $( element );
this.bindings = this.bindings.add( element );
}
$.each( handlers, function( event, handler ) {
function handlerProxy() {
// Allow widgets to customize the disabled handling
// - disabled as an array instead of boolean
// - disabled class as method for disabling individual parts
if ( !suppressDisabledCheck &&
( instance.options.disabled === true ||
$( this ).hasClass( "ui-state-disabled" ) ) ) {
return;
}
return ( typeof handler === "string" ? instance[ handler ] : handler )
.apply( instance, arguments );
}
// Copy the guid so direct unbinding works
if ( typeof handler !== "string" ) {
handlerProxy.guid = handler.guid =
handler.guid || handlerProxy.guid || $.guid++;
}
var match = event.match( /^([\w:-]*)\s*(.*)$/ );
var eventName = match[ 1 ] + instance.eventNamespace;
var selector = match[ 2 ];
if ( selector ) {
delegateElement.on( eventName, selector, handlerProxy );
} else {
element.on( eventName, handlerProxy );
}
} );
},
_off: function( element, eventName ) {
eventName = ( eventName || "" ).split( " " ).join( this.eventNamespace + " " ) +
this.eventNamespace;
element.off( eventName ).off( eventName );
// Clear the stack to avoid memory leaks (#10056)
this.bindings = $( this.bindings.not( element ).get() );
this.focusable = $( this.focusable.not( element ).get() );
this.hoverable = $( this.hoverable.not( element ).get() );
},
_delay: function( handler, delay ) {
function handlerProxy() {
return ( typeof handler === "string" ? instance[ handler ] : handler )
.apply( instance, arguments );
}
var instance = this;
return setTimeout( handlerProxy, delay || 0 );
},
_hoverable: function( element ) {
this.hoverable = this.hoverable.add( element );
this._on( element, {
mouseenter: function( event ) {
this._addClass( $( event.currentTarget ), null, "ui-state-hover" );
},
mouseleave: function( event ) {
this._removeClass( $( event.currentTarget ), null, "ui-state-hover" );
}
} );
},
_focusable: function( element ) {
this.focusable = this.focusable.add( element );
this._on( element, {
focusin: function( event ) {
this._addClass( $( event.currentTarget ), null, "ui-state-focus" );
},
focusout: function( event ) {
this._removeClass( $( event.currentTarget ), null, "ui-state-focus" );
}
} );
},
_trigger: function( type, event, data ) {
var prop, orig;
var callback = this.options[ type ];
data = data || {};
event = $.Event( event );
event.type = ( type === this.widgetEventPrefix ?
type :
this.widgetEventPrefix + type ).toLowerCase();
// The original event may come from any element
// so we need to reset the target on the new event
event.target = this.element[ 0 ];
// Copy original event properties over to the new event
orig = event.originalEvent;
if ( orig ) {
for ( prop in orig ) {
if ( !( prop in event ) ) {
event[ prop ] = orig[ prop ];
}
}
}
this.element.trigger( event, data );
return !( $.isFunction( callback ) &&
callback.apply( this.element[ 0 ], [ event ].concat( data ) ) === false ||
event.isDefaultPrevented() );
}
};
$.each( { show: "fadeIn", hide: "fadeOut" }, function( method, defaultEffect ) {
$.Widget.prototype[ "_" + method ] = function( element, options, callback ) {
if ( typeof options === "string" ) {
options = { effect: options };
}
var hasOptions;
var effectName = !options ?
method :
options === true || typeof options === "number" ?
defaultEffect :
options.effect || defaultEffect;
options = options || {};
if ( typeof options === "number" ) {
options = { duration: options };
}
hasOptions = !$.isEmptyObject( options );
options.complete = callback;
if ( options.delay ) {
element.delay( options.delay );
}
if ( hasOptions && $.effects && $.effects.effect[ effectName ] ) {
element[ method ]( options );
} else if ( effectName !== method && element[ effectName ] ) {
element[ effectName ]( options.duration, options.easing, callback );
} else {
element.queue( function( next ) {
$( this )[ method ]();
if ( callback ) {
callback.call( element[ 0 ] );
}
next();
} );
}
};
} );
return $.widget;
} ) );
/*!
* jQuery UI Controlgroup 1.12.1
* http://jqueryui.com
*
* Copyright jQuery Foundation and other contributors
* Released under the MIT license.
* http://jquery.org/license
*/
//>>label: Controlgroup
//>>group: Widgets
//>>description: Visually groups form control widgets
//>>docs: http://api.jqueryui.com/controlgroup/
//>>demos: http://jqueryui.com/controlgroup/
//>>css.structure: ../../themes/base/core.css
//>>css.structure: ../../themes/base/controlgroup.css
//>>css.theme: ../../themes/base/theme.css
( function( factory ) {
if ( typeof define === "function" && define.amd ) {
// AMD. Register as an anonymous module.
define( [
"jquery",
"../widget"
], factory );
} else {
// Browser globals
factory( jQuery );
}
}( function( $ ) {
var controlgroupCornerRegex = /ui-corner-([a-z]){2,6}/g;
return $.widget( "ui.controlgroup", {
version: "1.12.1",
defaultElement: "<div>",
options: {
direction: "horizontal",
disabled: null,
onlyVisible: true,
items: {
"button": "input[type=button], input[type=submit], input[type=reset], button, a",
"controlgroupLabel": ".ui-controlgroup-label",
"checkboxradio": "input[type='checkbox'], input[type='radio']",
"selectmenu": "select",
"spinner": ".ui-spinner-input"
}
},
_create: function() {
this._enhance();
},
// To support the enhanced option in jQuery Mobile, we isolate DOM manipulation
_enhance: function() {
this.element.attr( "role", "toolbar" );
this.refresh();
},
_destroy: function() {
this._callChildMethod( "destroy" );
this.childWidgets.removeData( "ui-controlgroup-data" );
this.element.removeAttr( "role" );
if ( this.options.items.controlgroupLabel ) {
this.element
.find( this.options.items.controlgroupLabel )
.find( ".ui-controlgroup-label-contents" )
.contents().unwrap();
}
},
_initWidgets: function() {
var that = this,
childWidgets = [];
// First we iterate over each of the items options
$.each( this.options.items, function( widget, selector ) {
var labels;
var options = {};
// Make sure the widget has a selector set
if ( !selector ) {
return;
}
if ( widget === "controlgroupLabel" ) {
labels = that.element.find( selector );
labels.each( function() {
var element = $( this );
if ( element.children( ".ui-controlgroup-label-contents" ).length ) {
return;
}
element.contents()
.wrapAll( "<span class='ui-controlgroup-label-contents'></span>" );
} );
that._addClass( labels, null, "ui-widget ui-widget-content ui-state-default" );
childWidgets = childWidgets.concat( labels.get() );
return;
}
// Make sure the widget actually exists
if ( !$.fn[ widget ] ) {
return;
}
// We assume everything is in the middle to start because we can't determine
// first / last elements until all enhancments are done.
if ( that[ "_" + widget + "Options" ] ) {
options = that[ "_" + widget + "Options" ]( "middle" );
} else {
options = { classes: {} };
}
// Find instances of this widget inside controlgroup and init them<|fim▁hole|> .find( selector )
.each( function() {
var element = $( this );
var instance = element[ widget ]( "instance" );
// We need to clone the default options for this type of widget to avoid
// polluting the variable options which has a wider scope than a single widget.
var instanceOptions = $.widget.extend( {}, options );
// If the button is the child of a spinner ignore it
// TODO: Find a more generic solution
if ( widget === "button" && element.parent( ".ui-spinner" ).length ) {
return;
}
// Create the widget if it doesn't exist
if ( !instance ) {
instance = element[ widget ]()[ widget ]( "instance" );
}
if ( instance ) {
instanceOptions.classes =
that._resolveClassesValues( instanceOptions.classes, instance );
}
element[ widget ]( instanceOptions );
// Store an instance of the controlgroup to be able to reference
// from the outermost element for changing options and refresh
var widgetElement = element[ widget ]( "widget" );
$.data( widgetElement[ 0 ], "ui-controlgroup-data",
instance ? instance : element[ widget ]( "instance" ) );
childWidgets.push( widgetElement[ 0 ] );
} );
} );
this.childWidgets = $( $.unique( childWidgets ) );
this._addClass( this.childWidgets, "ui-controlgroup-item" );
},
_callChildMethod: function( method ) {
this.childWidgets.each( function() {
var element = $( this ),
data = element.data( "ui-controlgroup-data" );
if ( data && data[ method ] ) {
data[ method ]();
}
} );
},
_updateCornerClass: function( element, position ) {
var remove = "ui-corner-top ui-corner-bottom ui-corner-left ui-corner-right ui-corner-all";
var add = this._buildSimpleOptions( position, "label" ).classes.label;
this._removeClass( element, null, remove );
this._addClass( element, null, add );
},
_buildSimpleOptions: function( position, key ) {
var direction = this.options.direction === "vertical";
var result = {
classes: {}
};
result.classes[ key ] = {
"middle": "",
"first": "ui-corner-" + ( direction ? "top" : "left" ),
"last": "ui-corner-" + ( direction ? "bottom" : "right" ),
"only": "ui-corner-all"
}[ position ];
return result;
},
_spinnerOptions: function( position ) {
var options = this._buildSimpleOptions( position, "ui-spinner" );
options.classes[ "ui-spinner-up" ] = "";
options.classes[ "ui-spinner-down" ] = "";
return options;
},
_buttonOptions: function( position ) {
return this._buildSimpleOptions( position, "ui-button" );
},
_checkboxradioOptions: function( position ) {
return this._buildSimpleOptions( position, "ui-checkboxradio-label" );
},
_selectmenuOptions: function( position ) {
var direction = this.options.direction === "vertical";
return {
width: direction ? "auto" : false,
classes: {
middle: {
"ui-selectmenu-button-open": "",
"ui-selectmenu-button-closed": ""
},
first: {
"ui-selectmenu-button-open": "ui-corner-" + ( direction ? "top" : "tl" ),
"ui-selectmenu-button-closed": "ui-corner-" + ( direction ? "top" : "left" )
},
last: {
"ui-selectmenu-button-open": direction ? "" : "ui-corner-tr",
"ui-selectmenu-button-closed": "ui-corner-" + ( direction ? "bottom" : "right" )
},
only: {
"ui-selectmenu-button-open": "ui-corner-top",
"ui-selectmenu-button-closed": "ui-corner-all"
}
}[ position ]
};
},
_resolveClassesValues: function( classes, instance ) {
var result = {};
$.each( classes, function( key ) {
var current = instance.options.classes[ key ] || "";
current = $.trim( current.replace( controlgroupCornerRegex, "" ) );
result[ key ] = ( current + " " + classes[ key ] ).replace( /\s+/g, " " );
} );
return result;
},
_setOption: function( key, value ) {
if ( key === "direction" ) {
this._removeClass( "ui-controlgroup-" + this.options.direction );
}
this._super( key, value );
if ( key === "disabled" ) {
this._callChildMethod( value ? "disable" : "enable" );
return;
}
this.refresh();
},
refresh: function() {
var children,
that = this;
this._addClass( "ui-controlgroup ui-controlgroup-" + this.options.direction );
if ( this.options.direction === "horizontal" ) {
this._addClass( null, "ui-helper-clearfix" );
}
this._initWidgets();
children = this.childWidgets;
// We filter here because we need to track all childWidgets not just the visible ones
if ( this.options.onlyVisible ) {
children = children.filter( ":visible" );
}
if ( children.length ) {
// We do this last because we need to make sure all enhancment is done
// before determining first and last
$.each( [ "first", "last" ], function( index, value ) {
var instance = children[ value ]().data( "ui-controlgroup-data" );
if ( instance && that[ "_" + instance.widgetName + "Options" ] ) {
var options = that[ "_" + instance.widgetName + "Options" ](
children.length === 1 ? "only" : value
);
options.classes = that._resolveClassesValues( options.classes, instance );
instance.element[ instance.widgetName ]( options );
} else {
that._updateCornerClass( children[ value ](), value );
}
} );
// Finally call the refresh method on each of the child widgets.
this._callChildMethod( "refresh" );
}
}
} );
} ) );<|fim▁end|> | that.element |
<|file_name|>validateDni.ts<|end_file_name|><|fim▁begin|>export class ValidateDni implements ng.IDirective{
public link: (scope: angular.IScope , elem: ng.IAugmentedJQuery, attrs: angular.IAttributes, ngModel: angular.INgModelController) => void;
restrict ='A';
require = 'ngModel';
constructor(scope: angular.IScope, elem:ng.IAugmentedJQuery, attrs: angular.IAttributes, ngModel: angular.INgModelController, $log:angular.ILogService)
{
// It's important to add `link` to the prototype or you will end up with state issues.
// See http://blog.aaronholmes.net/writing-angularjs-directives-as-typescript-classes/#comment-2111298002 for more information.
ValidateDni.prototype.link = (scope: ng.IScope, element: ng.IAugmentedJQuery, attrs: ng.IAttributes, ngModel: angular.INgModelController) =>
{
if (!ngModel) {
$log.warn("empty model found");
return;
}
// Moving to 1.5 validation: http://codepen.io/transistor1/pen/pgXqNo
ngModel.$validators['validateDni'] = function(dni) {
return validateDNI(dni);
}
function validateDNI(dni)
{
var lockup = 'TRWAGMYFPDXBNJZSQVHLCKE';
var valueDni=dni.substr(0,dni.length-1);
var letra=dni.substr(dni.length-1,1).toUpperCase();
if(lockup.charAt(valueDni % 23)==letra)
return true;
return false;
}
};
}
public static Factory()
{
var directive = (scope: angular.IScope , elem, attrs: angular.IAttributes, ngModel: angular.INgModelController,$log:angular.ILogService) =>
{
return new ValidateDni(scope, elem, attrs, ngModel, $log);
};<|fim▁hole|> return directive;
}
}<|fim▁end|> |
directive['$inject'] = ['$log']; |
<|file_name|>logger.js<|end_file_name|><|fim▁begin|>"use strict";
const bunyan = require("bunyan")
, bformat = require("bunyan-format")
, config = require("config")
;
const log_level = process.env.LOG_LEVEL || (config.has('app.log_level') ? config.get('app.log_level') : "info");
const formatOut = bformat({ outputMode: "short" , })
, logger = bunyan.createLogger({
name: "pepp",
streams: [
{
level: log_level,
stream: formatOut
}/*,
{
level: 'info',
// log ERROR and above to a file
path: './output/test.log'
}*/<|fim▁hole|>module.exports = logger;<|fim▁end|> | ]
});
|
<|file_name|>todo.spec.ts<|end_file_name|><|fim▁begin|>import {it, describe, expect, inject, beforeEachProviders} from 'angular2/testing';
import {Todo} from '../../../src/scripts/todo/todo';
describe('Todo', () => {
let todo: Todo;
beforeEach(() => {
todo = new Todo();
});
it('should instantiate with no arguments', () => {
todo = new Todo();
expect(todo).toBeDefined();
});
it('should generate random `id`', () => {
todo = new Todo();
expect(todo.id).toBeGreaterThan(-1);<|fim▁hole|> it('should instantiate with `title` argument', () => {
todo = new Todo('foo');
expect(todo.title).toBe('foo');
});
});<|fim▁end|> | });
|
<|file_name|>group.rs<|end_file_name|><|fim▁begin|>#![deny(warnings)]
#![cfg_attr(feature = "cargo-clippy", allow(many_single_char_names))]
extern crate mpi;
use mpi::traits::*;
use mpi::topology::{SystemGroup, GroupRelation, Rank};
fn main() {
let universe = mpi::initialize().unwrap();
let world = universe.world();
let g = world.group();
// Group accessors and Communicator accessors agree
assert_eq!(world.size(), g.size());
assert_eq!(world.rank(), g.rank().unwrap());
// g == g
assert_eq!(GroupRelation::Identical, g.compare(&g));
let h = world.group();
// h == g
assert_eq!(GroupRelation::Identical, g.compare(&h));
let i = g.union(&h);
// g union h == g union g == g
assert_eq!(GroupRelation::Identical, g.compare(&i));
let empty = g.difference(&h);
// g difference h == g difference g = empty Group
assert_eq!(GroupRelation::Identical, SystemGroup::empty().compare(&empty));
assert_eq!(0, empty.size());
// g intersection empty == empty Group
assert_eq!(0, g.intersection(&empty).size());
let first_half: Vec<Rank> = (0..g.size() / 2).collect();
// f and s are first and second half of g
let f = g.include(&first_half[..]);
let s = g.exclude(&first_half[..]);
// f != s
assert_eq!(GroupRelation::Unequal, f.compare(&s));
// g intersection f == f
let f_ = g.intersection(&f);
assert_eq!(GroupRelation::Identical, f.compare(&f_));
// g intersection s == s
let s_ = g.intersection(&s);
assert_eq!(GroupRelation::Identical, s.compare(&s_));
// g difference s == f
let f__ = g.difference(&s);
assert_eq!(GroupRelation::Identical, f.compare(&f__));
// g difference f == s
let s__ = g.difference(&f);
assert_eq!(GroupRelation::Identical, s.compare(&s__));
// f union s == g
let fs = f.union(&s);
assert_eq!(GroupRelation::Identical, g.compare(&fs));
// f intersection s == empty Group
let fs = f.intersection(&s);
assert_eq!(GroupRelation::Identical, empty.compare(&fs));
// rank is either in f or in s
assert!((f.rank().is_some() && s.rank().is_none())
^ (f.rank().is_none() && s.rank().is_some()));
<|fim▁hole|> assert_eq!(Some(rev[g.rank().unwrap() as usize]), r.translate_rank(g.rank().unwrap(), &g));
}<|fim▁end|> | // inverting rank mappings
let rev: Vec<Rank> = (0..g.size()).rev().collect();
let r = g.include(&rev[..]); |
<|file_name|>0005_auto__del_field_billstage_stage.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
<|fim▁hole|>
def forwards(self, orm):
# Deleting field 'BillStage.stage'
db.delete_column(u'bills_billstage', 'stage')
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'BillStage.stage'
raise RuntimeError("Cannot reverse this migration. 'BillStage.stage' and its values cannot be restored.")
models = {
u'bills.bill': {
'Meta': {'object_name': 'Bill'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'bills.billstage': {
'Meta': {'object_name': 'BillStage'},
'bill': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stages'", 'to': u"orm['bills.Bill']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'bills.ncopconcurrence': {
'Meta': {'object_name': 'NCOPConcurrence', '_ormbases': [u'bills.BillStage']},
u'billstage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bills.BillStage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bills.parliamentfinalvote': {
'Meta': {'object_name': 'ParliamentFinalVote', '_ormbases': [u'bills.BillStage']},
u'billstage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bills.BillStage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bills.parliamentfirstreading': {
'Meta': {'object_name': 'ParliamentFirstReading', '_ormbases': [u'bills.BillStage']},
u'billstage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bills.BillStage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bills.parliamentportfoliocommittee': {
'Meta': {'object_name': 'ParliamentPortfolioCommittee', '_ormbases': [u'bills.BillStage']},
u'billstage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bills.BillStage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bills.parliamentsecondreading': {
'Meta': {'object_name': 'ParliamentSecondReading', '_ormbases': [u'bills.BillStage']},
u'billstage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bills.BillStage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bills.preparliamentarystage': {
'Meta': {'object_name': 'PreparliamentaryStage', '_ormbases': [u'bills.BillStage']},
u'billstage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bills.BillStage']", 'unique': 'True', 'primary_key': 'True'}),
'comments_end': ('django.db.models.fields.DateField', [], {}),
'comments_start': ('django.db.models.fields.DateField', [], {})
}
}
complete_apps = ['bills']<|fim▁end|> | class Migration(SchemaMigration): |
<|file_name|>HomeRoute.tsx<|end_file_name|><|fim▁begin|>import * as React from 'react'
import { observer } from 'mobx-react'
import styled from 'styled-components'
export interface IHomeRouteProps {
className: string
}
@observer
class HomeRoute extends React.Component<IHomeRouteProps, {}> {
render() {
const { className } = this.props
return (
<Container className={className}>
<h1>Home</h1>
<p>Navigate to some other other sections</p>
</Container>
)
}<|fim▁hole|>
const Container = styled.div`
`
export default HomeRoute<|fim▁end|> | } |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>extern crate std;
use std::sync::{ Arc, Future, Mutex };
use std::rc::Rc;
use threaded_executer::CommandsThread;
use std::string::String;
#[allow(dead_code)]
mod libglfw3;
mod window;
pub struct GLContext {
window: Mutex<window::Window>
}
impl GLContext {
pub fn new(width: uint, height: uint, title: &str) -> GLContext {
let window = window::Window::new(width, height, title);
window.make_context_current();
window.exec(proc() {
::gl::load_with(|s| unsafe { std::mem::transmute(libglfw3::glfwGetProcAddress(s.to_c_str().unwrap())) });
}).get();
GLContext {
window: Mutex::new(window)
}
}
pub fn recv(&self) -> Option<super::WindowEvent> {
let mut lock = self.window.lock();
lock.recv()<|fim▁hole|> let mut lock = self.window.lock();
lock.exec(f)
}
pub fn swap_buffers(&self) {
let mut lock = self.window.lock();
lock.swap_buffers()
}
}<|fim▁end|> | }
pub fn exec<T:Send>(&self, f: proc(): Send -> T) -> Future<T> { |
<|file_name|>capsulated.js<|end_file_name|><|fim▁begin|><|fim▁hole|> var mtests = capsule.tests.modules;
var thsocket = capsule.tests.modules.transport.http.socket_srv;
// mtests.http_responder.test(capsule);
// thsocket.test({ 'url' : 'http://localhost:8810/sockethh.js'}, capsule);
var thttp = capsule.tests.modules.transport.http.server;
thttp.test({ 'url' : 'http://localhost:8810/krevetk/o'}, capsule);
}<|fim▁end|> | exports.main = function(env){
var capsule = env.capsule; |
<|file_name|>lexer.py<|end_file_name|><|fim▁begin|>"""
`GrammarLexer` is compatible with Pygments lexers and can be used to highlight
the input using a regular grammar with token annotations.
"""
from __future__ import unicode_literals
from pygments.token import Token
from prompt_toolkit.layout.lexers import Lexer
from .compiler import _CompiledGrammar
__all__ = (
'GrammarLexer',
)
class GrammarLexer(Lexer):
"""
Lexer which can be used for highlighting of tokens according to variables in the grammar.
(It does not actual lexing of the string, but it exposes an API, compatible
with the Pygments lexer class.)
:param compiled_grammar: Grammar as returned by the `compile()` function.
:param lexers: Dictionary mapping variable names of the regular grammar to
the lexers that should be used for this part. (This can
call other lexers recursively.) If you wish a part of the
grammar to just get one token, use a
`prompt_toolkit.layout.lexers.SimpleLexer`.
"""
def __init__(self, compiled_grammar, default_token=None, lexers=None):
assert isinstance(compiled_grammar, _CompiledGrammar)
assert default_token is None or isinstance(default_token, tuple)
assert lexers is None or all(isinstance(v, Lexer) for k, v in lexers.items())
assert lexers is None or isinstance(lexers, dict)
self.compiled_grammar = compiled_grammar
self.default_token = default_token or Token
self.lexers = lexers or {}
def get_tokens(self, cli, text):
m = self.compiled_grammar.match_prefix(text)
if m:
characters = [[self.default_token, c] for c in text]
for v in m.variables():
# If we have a `Lexer` instance for this part of the input.
# Tokenize recursively and apply tokens.
lexer = self.lexers.get(v.varname)
if lexer:
lexer_tokens = lexer.get_tokens(cli, text[v.start:v.stop])
i = v.start
for t, s in lexer_tokens:
for c in s:
if characters[i][0] == self.default_token:
characters[i][0] = t
i += 1
# Highlight trailing input.
trailing_input = m.trailing_input()<|fim▁hole|> return characters
else:
return [(Token, text)]<|fim▁end|> | if trailing_input:
for i in range(trailing_input.start, trailing_input.stop):
characters[i][0] = Token.TrailingInput
|
<|file_name|>TagComponent.java<|end_file_name|><|fim▁begin|>// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.ide.plugins.newui;
import com.intellij.ide.IdeBundle;
import com.intellij.ui.JBColor;
import com.intellij.util.ui.JBUI;
import org.jetbrains.annotations.Nls;
import org.jetbrains.annotations.NotNull;
import java.awt.*;
/**
* @author Alexander Lobas
*/
public class TagComponent extends LinkComponent {
private static final Color BACKGROUND = JBColor.namedColor("Plugins.tagBackground", new JBColor(0xEAEAEC, 0x4D4D4D));
private static final Color EAP_BACKGROUND = JBColor.namedColor("Plugins.eapTagBackground", new JBColor(0xF2D2CF, 0xF2D2CF));
private static final Color PAID_BACKGROUND = JBColor.namedColor("Plugins.paidTagBackground", new JBColor(0xD8EDF8, 0x3E505C));
private static final Color TRIAL_BACKGROUND = JBColor.namedColor("Plugins.trialTagBackground", new JBColor(0xDBE8DD, 0x345574E));
private static final Color FOREGROUND = JBColor.namedColor("Plugins.tagForeground", new JBColor(0x787878, 0x999999));
private Color myColor;
public TagComponent() {
setForeground(FOREGROUND);
setPaintUnderline(false);
setOpaque(false);
setBorder(JBUI.Borders.empty(1, 8));
}
public TagComponent(@NotNull @Nls String name) {
this();
setText(name);
}
@Override
public void setText(@NotNull @Nls String name) {
String tooltip = null;
myColor = BACKGROUND;
if (Tags.EAP.name().equals(name)) {
myColor = EAP_BACKGROUND;
tooltip = IdeBundle.message("tooltip.eap.plugin.version");<|fim▁hole|> else if (Tags.Trial.name().equals(name) || Tags.Purchased.name().equals(name)) {
myColor = TRIAL_BACKGROUND;
}
else if (Tags.Paid.name().equals(name) || Tags.Freemium.name().equals(name)) {
myColor = PAID_BACKGROUND;
tooltip = IdeBundle.message("tooltip.paid.plugin");
}
super.setText(name);
setToolTipText(tooltip);
}
@Override
protected void paintComponent(Graphics g) {
//noinspection UseJBColor
g.setColor(myUnderline ? new Color(myColor.getRed(), myColor.getGreen(), myColor.getBlue(), 178) : myColor);
g.fillRect(0, 0, getWidth(), getHeight());
super.paintComponent(g);
}
@Override
protected boolean isInClickableArea(Point pt) {
return true;
}
}<|fim▁end|> | } |
<|file_name|>gost4401_81.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
* Partial implementation of standard atmospheric model as described in <|fim▁hole|> * sensors.
*
* Supported modelling of temperature and pressure over the altitude span from
* 0 up to 51km.
*
* algorithm by Oleg Kochetov <[email protected]>
"""
from math import log10
class GOST4401(object):
G = 9.80665
R = 287.05287
E = 6356766
MIN_PRESSURE = 6.69384
MAX_PRESSURE = 101325.00
MIN_GP_ALT = 0.00
MAX_GP_ALT = 51000.00
# Lookup table with averaged empirical parameters for
# lower layers of atmosphere in accordance with ГОСТ 4401-81
LUT_RECORDS = 6
tab = {
'altitude' : 0, # Geopotentional altitude
'temperature' : 1, # degrees K
'temp gradient' : 2, # degrees K per meter
'pressure' : 3, # pascals
}
ag_table = [
[0, 288.15, -0.0065, 101325.00],
[11000, 216.65, 0.0, 22632.04],
[20000, 216.65, 0.0010, 5474.87],
[32000, 228.65, 0.0028, 868.0146],
[47000, 270.65, 0.0, 110.9056],
[51000, 270.65, -0.0028, 6.69384]
]
@staticmethod
def geopotential_to_geometric(self, altitude):
return altitude * self.E / (self.E - altitude)
@staticmethod
def geometric_to_geopotential(self, altitude):
return altitude * self.E / (self.E + altitude)
def get_altitude(self, pressure):
"""
Returns geometric altitude value for the given pressure.
:param pressure: float pressure - pressure in pascals
:return: float geometric altitude in meters
"""
# Pressure in Pascals
if (pressure <= self.MIN_PRESSURE) or (pressure > self.MAX_PRESSURE):
return None
for idx in range(0, self.LUT_RECORDS - 1):
if ((pressure <= self.ag_table[idx][self.tab['pressure']]) and
(pressure > self.ag_table[idx + 1][self.tab['pressure']])):
break
Ps = float(self.ag_table[idx][self.tab['pressure']])
Bm = float(self.ag_table[idx][self.tab['temp gradient']])
Tm = float(self.ag_table[idx][self.tab['temperature']])
Hb = float(self.ag_table[idx][self.tab['altitude']])
if Bm != 0:
geopot_H = ((Tm * pow(Ps / pressure, Bm * self.R / self.G) - Tm) / Bm)
else:
geopot_H = log10(Ps / pressure) * (self.R * Tm) / self.G * 0.434292
return self.geopotential_to_geometric(self, Hb + geopot_H)
def get_pressure(self, altitude):
"""
Returns pressure in pascals for the given geometric altitude
:param altitude: float altitude - geometric altitude in meters
:return: float - pressure in pascals
"""
geopot_H = self.geometric_to_geopotential(self, altitude)
if (geopot_H < self.MIN_GP_ALT) or (geopot_H >= self.MAX_GP_ALT):
return None
for idx in range(0, self.LUT_RECORDS - 1):
if ((geopot_H >= self.ag_table[idx][self.tab['altitude']]) and
(geopot_H < self.ag_table[idx + 1][self.tab['altitude']])):
break
Ps = float(self.ag_table[idx][self.tab['pressure']])
Bm = float(self.ag_table[idx][self.tab['temp gradient']])
Tm = float(self.ag_table[idx][self.tab['temperature']])
Hb = float(self.ag_table[idx][self.tab['altitude']])
if Bm != 0:
lP = log10(Ps) - (self.G / (Bm * self.R)) * log10((Tm + Bm * (geopot_H - Hb)) / Tm)
else:
lP = log10(Ps) - 0.434294 * (self.G * (geopot_H - Hb)) / (self.R * Tm)
return pow(10, lP)
def get_temperature(self, altitude):
"""
Returns temperature value in K for the given geometric altitude.
:param altitude: float altitude - geometric altitude in meters
:return: float - temperature in degrees K
"""
geopot_H = self.geometric_to_geopotential(self, altitude)
if (geopot_H < self.MIN_GP_ALT) or (geopot_H >= self.MAX_GP_ALT):
return None
for idx in range(0, self.LUT_RECORDS - 1):
if ((geopot_H >= self.ag_table[idx][self.tab['altitude']]) and
(geopot_H < self.ag_table[idx + 1][self.tab['altitude']])):
break
Bm = float(self.ag_table[idx][self.tab['temp gradient']])
Tm = float(self.ag_table[idx][self.tab['temperature']])
Hb = float(self.ag_table[idx][self.tab['altitude']])
temp = Tm
if Bm != 0:
temp += Bm * (geopot_H - Hb)
return temp<|fim▁end|> | * GOST 4401-81 useful for processing of data from meteorological balloon |
<|file_name|>main.js<|end_file_name|><|fim▁begin|>$(function(){
BrowserDetect.init();
$('.minifyme').on("navminified", function() {
// $('td.expand,th.expand').toggle();
});
// Activate all popovers (if NOT mobile)
if ( !BrowserDetect.isMobile() ) {
$('[data-toggle="popover"]').popover();
}
});
$.fn.pressEnter = function(fn) {
return this.each(function() {
$(this).bind('enterPress', fn);
$(this).keyup(function(e){
if(e.keyCode == 13)
{
$(this).trigger("enterPress");
}
})
});
};
function ensureHeightOfSidebar() {
$('#left-panel').css('height',$('#main').height());
}
BrowserDetect =
// From http://stackoverflow.com/questions/13478303/correct-way-to-use-modernizr-to-detect-ie
{
init: function ()
{
this.browser = this.searchString(this.dataBrowser) || "Other";
this.version = this.searchVersion(navigator.userAgent) || this.searchVersion(navigator.appVersion) || "Unknown";
},
isMobile: function ()
{
if (navigator.userAgent.search(/(Android|Touch|iPhone|iPad)/) == -1) {
return false;
} else {
return true;
}
},
searchString: function (data)
{
for (var i=0 ; i < data.length ; i++)
{
var dataString = data[i].string;
this.versionSearchString = data[i].subString;
if (dataString.indexOf(data[i].subString) != -1)
{
return data[i].identity;
}
}
},
searchVersion: function (dataString)
{
var index = dataString.indexOf(this.versionSearchString);
if (index == -1) return;
return parseFloat(dataString.substring(index+this.versionSearchString.length+1));
},
dataBrowser:
[<|fim▁hole|> { string: navigator.userAgent, subString: "Safari", identity: "Safari" },
{ string: navigator.userAgent, subString: "Opera", identity: "Opera" }
]
};<|fim▁end|> | { string: navigator.userAgent, subString: "Chrome", identity: "Chrome" },
{ string: navigator.userAgent, subString: "MSIE", identity: "Explorer" },
{ string: navigator.userAgent, subString: "Firefox", identity: "Firefox" }, |
<|file_name|>ipynb.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright © 2013-2015 Damián Avila, Chris Warrick and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Implementation of compile_html based on nbconvert."""
from __future__ import unicode_literals, print_function
import io
import os
import sys
try:
import IPython
from IPython.nbconvert.exporters import HTMLExporter
if IPython.version_info[0] >= 3: # API changed with 3.0.0
from IPython import nbformat
current_nbformat = nbformat.current_nbformat
from IPython.kernel import kernelspec
else:
import IPython.nbformat.current as nbformat
current_nbformat = 'json'
kernelspec = None
from IPython.config import Config
flag = True
except ImportError:
flag = None
from nikola.plugin_categories import PageCompiler
from nikola.utils import makedirs, req_missing, get_logger, STDERR_HANDLER
class CompileIPynb(PageCompiler):
"""Compile IPynb into HTML."""
name = "ipynb"
friendly_name = "Jupyter/IPython Notebook"
demote_headers = True
default_kernel = 'python2' if sys.version_info[0] == 2 else 'python3'
def set_site(self, site):
"""Set Nikola site."""<|fim▁hole|> def compile_html_string(self, source, is_two_file=True):
"""Export notebooks as HTML strings."""
if flag is None:
req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')
HTMLExporter.default_template = 'basic'
c = Config(self.site.config['IPYNB_CONFIG'])
exportHtml = HTMLExporter(config=c)
with io.open(source, "r", encoding="utf8") as in_file:
nb_json = nbformat.read(in_file, current_nbformat)
(body, resources) = exportHtml.from_notebook_node(nb_json)
return body
def compile_html(self, source, dest, is_two_file=True):
"""Compile source file into HTML and save as dest."""
makedirs(os.path.dirname(dest))
with io.open(dest, "w+", encoding="utf8") as out_file:
out_file.write(self.compile_html_string(source, is_two_file))
def read_metadata(self, post, file_metadata_regexp=None, unslugify_titles=False, lang=None):
"""Read metadata directly from ipynb file.
As ipynb file support arbitrary metadata as json, the metadata used by Nikola
will be assume to be in the 'nikola' subfield.
"""
if flag is None:
req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')
source = post.source_path
with io.open(source, "r", encoding="utf8") as in_file:
nb_json = nbformat.read(in_file, current_nbformat)
# Metadata might not exist in two-file posts or in hand-crafted
# .ipynb files.
return nb_json.get('metadata', {}).get('nikola', {})
def create_post(self, path, **kw):
"""Create a new post."""
if flag is None:
req_missing(['ipython[notebook]>=2.0.0'], 'build this site (compile ipynb)')
content = kw.pop('content', None)
onefile = kw.pop('onefile', False)
kernel = kw.pop('ipython_kernel', None)
# is_page is not needed to create the file
kw.pop('is_page', False)
metadata = {}
metadata.update(self.default_metadata)
metadata.update(kw)
makedirs(os.path.dirname(path))
if content.startswith("{"):
# imported .ipynb file, guaranteed to start with "{" because it’s JSON.
nb = nbformat.reads(content, current_nbformat)
else:
if IPython.version_info[0] >= 3:
nb = nbformat.v4.new_notebook()
nb["cells"] = [nbformat.v4.new_markdown_cell(content)]
else:
nb = nbformat.new_notebook()
nb["worksheets"] = [nbformat.new_worksheet(cells=[nbformat.new_text_cell('markdown', [content])])]
if kernelspec is not None:
if kernel is None:
kernel = self.default_kernel
self.logger.notice('No kernel specified, assuming "{0}".'.format(kernel))
IPYNB_KERNELS = {}
ksm = kernelspec.KernelSpecManager()
for k in ksm.find_kernel_specs():
IPYNB_KERNELS[k] = ksm.get_kernel_spec(k).to_dict()
IPYNB_KERNELS[k]['name'] = k
del IPYNB_KERNELS[k]['argv']
if kernel not in IPYNB_KERNELS:
self.logger.error('Unknown kernel "{0}". Maybe you mispelled it?'.format(kernel))
self.logger.info("Available kernels: {0}".format(", ".join(sorted(IPYNB_KERNELS))))
raise Exception('Unknown kernel "{0}"'.format(kernel))
nb["metadata"]["kernelspec"] = IPYNB_KERNELS[kernel]
else:
# Older IPython versions don’t need kernelspecs.
pass
if onefile:
nb["metadata"]["nikola"] = metadata
with io.open(path, "w+", encoding="utf8") as fd:
if IPython.version_info[0] >= 3:
nbformat.write(nb, fd, 4)
else:
nbformat.write(nb, fd, 'ipynb')<|fim▁end|> | self.logger = get_logger('compile_ipynb', STDERR_HANDLER)
super(CompileIPynb, self).set_site(site)
|
<|file_name|>makeAllOligos.py<|end_file_name|><|fim▁begin|>'''
Designs oligos for a pre RNA-seq selection method
'''
### imports ###
import sys
import os
import numpy as np
def readFastaFile(fastaFilePath):
'''
Given a path to a multiline fasta file, reads the file, returning two lists - one containing the sequences, the other containing the headers
inputs: path to a fasta file
outputs: a list of the sequences, a list of the sequence headers
'''
sequences = []
headers = []
with open(fastaFilePath) as f:
data = f.readlines()
sequence = ""
for line in data:
if ">" in line:
header = line.replace(">", "").strip()
headers.append(header)
if not sequence == "":
sequences.append(sequence.upper())
sequence = ""
else:
sequence += line.strip()
sequences.append(sequence.upper())
return sequences, headers
def makeOligos(targetSequences, targetLength, outputPath):
'''
Gives all non-unique k-mers of target length that appear in target sequences
inputs: a list of sequences, length of k-mers, path to write output files<|fim▁hole|> seenOligos = set()
for i in range(len(targetSequences)):
currentSeq = targetSequences[i]
for j in range(len(targetSequences[i]) - targetLength):
oligo = currentSeq[ j : j + targetLength ]
seenOligos.add(oligo)
# write fasta files
oligos = list(seenOligos)
for i in range(len(oligos)):
outFile = open(outputPath + "/" + oligos[i] + ".fa", "w")
for j in range(1):
outFile.write(">" + str(j) + "\n")
outFile.write(oligos[i] + "\n")
outFile.close()
if __name__ == "__main__":
targetDirectoryPath = sys.argv[1] # path to a directory containing fasta files giving the sequences we want the oligos to hybridize to
targetLength = int(sys.argv[2]) # desired length of oligos
outputPath = sys.argv[3] # path to write output files
# intialize lists
allTargetSequences = []
allTargetHeaders = []
# read in sequences
print("reading target files")
for targetFile in os.listdir(targetDirectoryPath):
print(targetFile)
targetSequences, targetHeaders = readFastaFile(targetDirectoryPath + "/" + targetFile)
allTargetSequences += targetSequences
allTargetHeaders += targetHeaders
print("writing oligo fasta files")
makeOligos(targetSequences, targetLength, outputPath)<|fim▁end|> | outputs: writes the designed oligos to a Fasta file
''' |
<|file_name|>data_analysis.py<|end_file_name|><|fim▁begin|>from pyelectro import analysis as pye_analysis<|fim▁hole|>
file_name = "100pA_1a.csv"
t, v = pye_analysis.load_csv_data(file_name)
analysis_var = {
"peak_delta": 0.1,
"baseline": 0,
"dvdt_threshold": 2,
"peak_threshold": 0,
}
analysis = pye_analysis.IClampAnalysis(
v, t, analysis_var, start_analysis=150, end_analysis=900
)
res = analysis.analyse()
print res
pyplot.plot(t, v)
pyplot.suptitle("Data read in from: %s" % file_name)
pyplot.show()<|fim▁end|> | from matplotlib import pyplot |
<|file_name|>serializers.py<|end_file_name|><|fim▁begin|>from players.models import Player
from rest_framework import serializers
from teams.models import Team
class PlayerTeamSerializer(serializers.ModelSerializer):
"""Serializer for nesting a Team object inside a Player"""
url = serializers.HyperlinkedIdentityField(view_name='team-detail')
captain = serializers.PrimaryKeyRelatedField(read_only=True)
creator = serializers.PrimaryKeyRelatedField(read_only=True)
@staticmethod
def setup_eager_loading(queryset):
queryset = queryset.select_related(
'captain',
'creator',
)
return queryset
class Meta:
model = Team
fields = (
'id',
'name',
'captain',
'creator',
'url',
)
read_only_fields = (
'id',
'name',
'captain',
'creator',
'url',
)
class BasePlayerSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='player-detail')
steamid = serializers.CharField(source='user.steamid', required=False)
# steam_friends = serializers.ListField(child=serializers.CharField(), source='user.steam_friends', required=False)
email = serializers.EmailField(source='user.email')
username = serializers.CharField(source='user.username')
avatar = serializers.CharField(source='user.avatar')
avatarfull = serializers.CharField(source='user.avatarfull')
last_login = serializers.CharField(source='user.last_login')
def update(self, instance, validated_data):
user_data = validated_data.pop('user', None)
if user_data:
if 'email' in user_data.keys():
instance.user.email = user_data.get('email')
if 'username' in user_data.keys():
instance.user.username = user_data.get('username')
instance.user.save()
return super(BasePlayerSerializer, self).update(instance, validated_data)
@staticmethod
def setup_eager_loading(queryset):
queryset = queryset.prefetch_related(
'interests',
'languages',
'positions',
'regions',
'teams',
).select_related(
'user'
)
return queryset
class Meta:
model = Player
fields = (
'id',
'url',
'steamid',
# 'steam_friends',
'username',
'bio',
'email',
'last_login',
'regions',
'positions',
'interests',
'languages',
'teams',
'avatar',
'avatarfull',
'mmr',
'mmr_estimate',
'mmr_last_updated',
)
read_only_fields = (
'id',
'url',
'steamid',
# 'steam_friends',
'username',
'last_login',
'avatar',
'avatarfull',
'mmr',
'mmr_estimate',
'mmr_last_updated',<|fim▁hole|>
class FlatPlayerSerializer(BasePlayerSerializer):
pass
class PlayerSerializer(BasePlayerSerializer):
regions = serializers.PrimaryKeyRelatedField(read_only=True, many=True)
positions = serializers.PrimaryKeyRelatedField(read_only=True, many=True)
interests = serializers.PrimaryKeyRelatedField(read_only=True, many=True)
languages = serializers.PrimaryKeyRelatedField(read_only=True, many=True)
teams = PlayerTeamSerializer(read_only=True, many=True)<|fim▁end|> | )
|
<|file_name|>provider_command.py<|end_file_name|><|fim▁begin|># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Providers sub-commands"""
import re
from airflow.cli.simple_table import AirflowConsole
from airflow.providers_manager import ProvidersManager
from airflow.utils.cli import suppress_logs_and_warning
def _remove_rst_syntax(value: str) -> str:
return re.sub("[`_<>]", "", value.strip(" \n."))
@suppress_logs_and_warning
def provider_get(args):
"""Get a provider info."""
providers = ProvidersManager().providers
if args.provider_name in providers:
provider_version = providers[args.provider_name].version
provider_info = providers[args.provider_name].provider_info
if args.full:
provider_info["description"] = _remove_rst_syntax(provider_info["description"])
AirflowConsole().print_as(
data=[provider_info],
output=args.output,
)
else:
print(f"Provider: {args.provider_name}")
print(f"Version: {provider_version}")
else:
raise SystemExit(f"No such provider installed: {args.provider_name}")
@suppress_logs_and_warning
def providers_list(args):
"""Lists all providers at the command line"""
AirflowConsole().print_as(
data=list(ProvidersManager().providers.values()),
output=args.output,
mapper=lambda x: {
"package_name": x[1]["package-name"],
"description": _remove_rst_syntax(x[1]["description"]),
"version": x[0],
},
)
<|fim▁hole|>
@suppress_logs_and_warning
def hooks_list(args):
"""Lists all hooks at the command line"""
AirflowConsole().print_as(
data=list(ProvidersManager().hooks.items()),
output=args.output,
mapper=lambda x: {
"connection_type": x[0],
"class": x[1].connection_class,
"conn_id_attribute_name": x[1].connection_id_attribute_name,
'package_name': x[1].package_name,
'hook_name': x[1].hook_name,
},
)
@suppress_logs_and_warning
def connection_form_widget_list(args):
"""Lists all custom connection form fields at the command line"""
AirflowConsole().print_as(
data=list(ProvidersManager().connection_form_widgets.items()),
output=args.output,
mapper=lambda x: {
"connection_parameter_name": x[0],
"class": x[1].connection_class,
'package_name': x[1].package_name,
'field_type': x[1].field.field_class.__name__,
},
)
@suppress_logs_and_warning
def connection_field_behaviours(args):
"""Lists field behaviours"""
AirflowConsole().print_as(
data=list(ProvidersManager().field_behaviours.keys()),
output=args.output,
mapper=lambda x: {
"field_behaviours": x,
},
)
@suppress_logs_and_warning
def extra_links_list(args):
"""Lists all extra links at the command line"""
AirflowConsole().print_as(
data=ProvidersManager().extra_links_class_names,
output=args.output,
mapper=lambda x: {
"extra_link_class_name": x,
},
)<|fim▁end|> | |
<|file_name|>simpleMsg.go<|end_file_name|><|fim▁begin|>package utils
import (
"encoding/binary"
)
const (
MaxMsgLen = 65536
)
//信息,一个简单的[]byte信息结构 限制最大长度65536
type SimpleMsg struct {
MsgSize uint32
MsgSender uint32
MsgReceiver uint32 // 0:broadcast
MsgBody []byte //原始数据,用于转发
}
//创建一条信息
//1.[]byte格式:[4字节长度 + 4字节发送人id + 4字节接受人id + 正文] 其中长度 = 12 + 正文长度
//2.长度不能超过65536字节
//3.socket连接成功3秒内要发身份验证消息,格式:没有正文,发送人id为验证id,接收人id可以任意
//4.验证成功后发送消息根据接收人id分发,从1开始,可以自己给自己发;如果接收人id为0,表示群发该消息,但是不会发给自己。
func MakeNewSimpleMsg() *SimpleMsg {
return &SimpleMsg{
MsgSize: 0,
MsgSender: 0,
MsgReceiver: 0,
MsgBody: []byte{},
}
}<|fim▁hole|>
//[]byte -> SimpleMsg
func (this *SimpleMsg) FromBytes(buf []byte) *SimpleMsg {
this.MsgSize = 0
this.MsgSender = 0
this.MsgReceiver = 0
this.MsgBody = []byte{}
if len(buf) < 12 {
return this
} else {
this.MsgSize = binary.LittleEndian.Uint32(buf[0:4])
if int(this.MsgSize) == len(buf) {
this.MsgSender = binary.LittleEndian.Uint32(buf[4:8])
this.MsgReceiver = binary.LittleEndian.Uint32(buf[8:12])
//copy(this.MsgBody, buf)
this.MsgBody = append(this.MsgBody, buf...)
} else {
this.MsgSize = 0
}
}
return this
}
//string -> SimpleMsg
func (this *SimpleMsg) FromString(fromId int, toId int, msg string) *SimpleMsg {
this.MsgSender = uint32(fromId)
this.MsgReceiver = uint32(toId)
dataFrom := make([]byte, 4)
dataTo := make([]byte, 4)
dataSize := make([]byte, 4)
dataBody := []byte(msg)
this.MsgSize = uint32(len(dataBody) + 12)
binary.LittleEndian.PutUint32(dataFrom, this.MsgSender)
binary.LittleEndian.PutUint32(dataTo, this.MsgReceiver)
binary.LittleEndian.PutUint32(dataSize, this.MsgSize)
data := []byte{}
data = append(data, dataSize...)
data = append(data, dataFrom...)
data = append(data, dataTo...)
data = append(data, dataBody...)
this.MsgBody = data
return this
}
//SimpleMsg -> []byte
func (this *SimpleMsg) ToData() []byte {
return this.MsgBody
}
//SimpleMsg.MsgBody -> string
func (this *SimpleMsg) ToString() string {
return string(this.MsgBody[12:])
}
//数据加密 后续实现
func (this *SimpleMsg) EnCode() {
}
//数据解密 后续实现
func (this *SimpleMsg) DeCode() {
}<|fim▁end|> | |
<|file_name|>test_ptp_clock_cdc_64.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
"""
Copyright (c) 2019 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import ptp
module = 'ptp_clock_cdc'<|fim▁hole|>testbench = 'test_%s_64' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
TS_WIDTH = 64
NS_WIDTH = 4
FNS_WIDTH = 16
INPUT_PERIOD_NS = 0x6
INPUT_PERIOD_FNS = 0x6666
OUTPUT_PERIOD_NS = 0x6
OUTPUT_PERIOD_FNS = 0x6666
USE_SAMPLE_CLOCK = 1
LOG_FIFO_DEPTH = 3
LOG_RATE = 3
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
input_clk = Signal(bool(0))
input_rst = Signal(bool(0))
output_clk = Signal(bool(0))
output_rst = Signal(bool(0))
sample_clk = Signal(bool(0))
input_ts = Signal(intbv(0)[96:])
# Outputs
output_ts = Signal(intbv(0)[96:])
output_ts_step = Signal(bool(0))
output_pps = Signal(bool(0))
# PTP clock
ptp_clock = ptp.PtpClock(period_ns=INPUT_PERIOD_NS, period_fns=INPUT_PERIOD_FNS)
ptp_logic = ptp_clock.create_logic(
input_clk,
input_rst,
ts_64=input_ts
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
input_clk=input_clk,
input_rst=input_rst,
output_clk=output_clk,
output_rst=output_rst,
sample_clk=sample_clk,
input_ts=input_ts,
output_ts=output_ts,
output_ts_step=output_ts_step,
output_pps=output_pps
)
@always(delay(3200))
def clkgen():
clk.next = not clk
input_clk.next = not input_clk
output_clk_hp = Signal(int(3200))
@instance
def clkgen_output():
while True:
yield delay(int(output_clk_hp))
output_clk.next = not output_clk
@always(delay(5000))
def clkgen_sample():
sample_clk.next = not sample_clk
@instance
def check():
yield delay(100000)
yield clk.posedge
rst.next = 1
input_rst.next = 1
output_rst.next = 1
yield clk.posedge
yield clk.posedge
yield clk.posedge
input_rst.next = 0
output_rst.next = 0
yield clk.posedge
yield delay(100000)
yield clk.posedge
# testbench stimulus
yield clk.posedge
print("test 1: Same clock speed")
current_test.next = 1
yield clk.posedge
for i in range(20000):
yield clk.posedge
input_stop_ts = input_ts/2**16*1e-9
output_stop_ts = output_ts/2**16*1e-9
print(input_stop_ts-output_stop_ts)
assert abs(input_stop_ts-output_stop_ts) < 1e-8
yield delay(100000)
yield clk.posedge
print("test 2: Slightly faster")
current_test.next = 2
output_clk_hp.next = 3100
yield clk.posedge
for i in range(20000):
yield clk.posedge
input_stop_ts = input_ts/2**16*1e-9
output_stop_ts = output_ts/2**16*1e-9
print(input_stop_ts-output_stop_ts)
assert abs(input_stop_ts-output_stop_ts) < 1e-8
yield delay(100000)
yield clk.posedge
print("test 3: Slightly slower")
current_test.next = 3
output_clk_hp.next = 3300
yield clk.posedge
for i in range(20000):
yield clk.posedge
input_stop_ts = input_ts/2**16*1e-9
output_stop_ts = output_ts/2**16*1e-9
print(input_stop_ts-output_stop_ts)
assert abs(input_stop_ts-output_stop_ts) < 1e-8
yield delay(100000)
yield clk.posedge
print("test 4: Significantly faster")
current_test.next = 4
output_clk_hp.next = 2000
yield clk.posedge
for i in range(20000):
yield clk.posedge
input_stop_ts = input_ts/2**16*1e-9
output_stop_ts = output_ts/2**16*1e-9
print(input_stop_ts-output_stop_ts)
assert abs(input_stop_ts-output_stop_ts) < 1e-8
yield delay(100000)
yield clk.posedge
print("test 5: Significantly slower")
current_test.next = 5
output_clk_hp.next = 5000
yield clk.posedge
for i in range(30000):
yield clk.posedge
input_stop_ts = input_ts/2**16*1e-9
output_stop_ts = output_ts/2**16*1e-9
print(input_stop_ts-output_stop_ts)
assert abs(input_stop_ts-output_stop_ts) < 1e-8
yield delay(100000)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()<|fim▁end|> | |
<|file_name|>test_update_statement.py<|end_file_name|><|fim▁begin|># Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from cassandra.cqlengine.columns import Column, Set, List, Text
from cassandra.cqlengine.operators import *
from cassandra.cqlengine.statements import (UpdateStatement, WhereClause,
AssignmentClause, SetUpdateClause,
ListUpdateClause)
import six
class UpdateStatementTests(unittest.TestCase):
def test_table_rendering(self):
""" tests that fields are properly added to the select statement """
us = UpdateStatement('table')
self.assertTrue(six.text_type(us).startswith('UPDATE table SET'), six.text_type(us))
self.assertTrue(str(us).startswith('UPDATE table SET'), str(us))
def test_rendering(self):
us = UpdateStatement('table')
us.add_assignment(Column(db_field='a'), 'b')
us.add_assignment(Column(db_field='c'), 'd')
us.add_where(Column(db_field='a'), EqualsOperator(), 'x')
self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = %(0)s, "c" = %(1)s WHERE "a" = %(2)s', six.text_type(us))
us.add_where(Column(db_field='a'), NotEqualsOperator(), 'y')
self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = %(0)s, "c" = %(1)s WHERE "a" = %(2)s AND "a" != %(3)s', six.text_type(us))
def test_context(self):
us = UpdateStatement('table')
us.add_assignment(Column(db_field='a'), 'b')
us.add_assignment(Column(db_field='c'), 'd')
us.add_where(Column(db_field='a'), EqualsOperator(), 'x')
self.assertEqual(us.get_context(), {'0': 'b', '1': 'd', '2': 'x'})
def test_context_update(self):
us = UpdateStatement('table')
us.add_assignment(Column(db_field='a'), 'b')
us.add_assignment(Column(db_field='c'), 'd')
us.add_where(Column(db_field='a'), EqualsOperator(), 'x')
us.update_context_id(3)
self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = %(4)s, "c" = %(5)s WHERE "a" = %(3)s')<|fim▁hole|> us.add_assignment(Column(db_field='a'), 'b')
us.add_where(Column(db_field='a'), EqualsOperator(), 'x')
self.assertIn('USING TTL 60', six.text_type(us))
def test_update_set_add(self):
us = UpdateStatement('table')
us.add_update(Set(Text, db_field='a'), set((1,)), 'add')
self.assertEqual(six.text_type(us), 'UPDATE table SET "a" = "a" + %(0)s')
def test_update_empty_set_add_does_not_assign(self):
us = UpdateStatement('table')
us.add_update(Set(Text, db_field='a'), set(), 'add')
self.assertFalse(us.assignments)
def test_update_empty_set_removal_does_not_assign(self):
us = UpdateStatement('table')
us.add_update(Set(Text, db_field='a'), set(), 'remove')
self.assertFalse(us.assignments)
def test_update_list_prepend_with_empty_list(self):
us = UpdateStatement('table')
us.add_update(List(Text, db_field='a'), [], 'prepend')
self.assertFalse(us.assignments)
def test_update_list_append_with_empty_list(self):
us = UpdateStatement('table')
us.add_update(List(Text, db_field='a'), [], 'append')
self.assertFalse(us.assignments)<|fim▁end|> | self.assertEqual(us.get_context(), {'4': 'b', '5': 'd', '3': 'x'})
def test_additional_rendering(self):
us = UpdateStatement('table', ttl=60) |
<|file_name|>test_query.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# ===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ===============================================================================
__author__ = "Simon Oldfield"
import logging
from datacube.api import parse_date_min, parse_date_max, Satellite, DatasetType
from datacube.api.query import list_cells_as_list, list_tiles_as_list
from datacube.api.query import list_cells_vector_file_as_list
from datacube.api.query import MONTHS_BY_SEASON, Season
from datacube.api.query import LS7_SLC_OFF_EXCLUSION, LS7_SLC_OFF_ACQ_MIN
from datacube.api.query import LS8_PRE_WRS_2_EXCLUSION, LS8_PRE_WRS_2_ACQ_MAX
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')
_log = logging.getLogger()
TEST_CELL_X = 120
TEST_CELL_Y = -25
TEST_YEAR = 2005
TEST_YEAR_STR = str(TEST_YEAR)
TEST_MONTHS = MONTHS_BY_SEASON[Season.SUMMER]
TEST_VECTOR_FILE = "Mainlands.shp"
TEST_VECTOR_LAYER = 0
TEST_VECTOR_FEATURE = 4
def test_list_cells_120_020_2005_ls578(config=None):
cells = list_cells_as_list(x=[TEST_CELL_X], y=[TEST_CELL_Y],
acq_min=parse_date_min(TEST_YEAR_STR), acq_max=parse_date_max(TEST_YEAR_STR),
satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
dataset_types=[DatasetType.ARG25],
config=config)
assert(cells and len(list(cells)) > 0)
for cell in cells:
_log.info("Found cell xy = %s", cell.xy)
assert(cell.x == TEST_CELL_X and cell.y == TEST_CELL_Y and cell.xy == (TEST_CELL_X, TEST_CELL_Y))
def test_list_cells_120_020_2005_ls578_no_ls7_slc(config=None):
cells = list_cells_as_list(x=[TEST_CELL_X], y=[TEST_CELL_Y],
acq_min=parse_date_min(TEST_YEAR_STR),
acq_max=parse_date_max(TEST_YEAR_STR),
satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
dataset_types=[DatasetType.ARG25],
exclude=[LS7_SLC_OFF_EXCLUSION],
config=config)
assert(cells and len(list(cells)) > 0)
for cell in cells:
_log.info("Found cell xy = %s", cell.xy)
assert(cell.x == TEST_CELL_X and cell.y == TEST_CELL_Y and cell.xy == (TEST_CELL_X, TEST_CELL_Y))
def test_list_cells_120_020_2005_ls578_no_ls8_pre_wrs_2(config=None):
cells = list_cells_as_list(x=[TEST_CELL_X], y=[TEST_CELL_Y],
acq_min=parse_date_min(TEST_YEAR_STR), acq_max=parse_date_max(TEST_YEAR_STR),
satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
dataset_types=[DatasetType.ARG25],
exclude=[LS8_PRE_WRS_2_EXCLUSION],
config=config)
assert(cells and len(list(cells)) > 0)
for cell in cells:
_log.info("Found cell xy = %s", cell.xy)
assert(cell.x == TEST_CELL_X and cell.y == TEST_CELL_Y and cell.xy == (TEST_CELL_X, TEST_CELL_Y))
def test_list_cells_120_020_2005_ls578_summer(config=None):
cells = list_cells_as_list(x=[TEST_CELL_X], y=[TEST_CELL_Y],
acq_min=parse_date_min(TEST_YEAR_STR), acq_max=parse_date_max(TEST_YEAR_STR),
satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
dataset_types=[DatasetType.ARG25],
months=TEST_MONTHS,
config=config)
assert(cells and len(list(cells)) > 0)
for cell in cells:
_log.info("Found cell xy = %s", cell.xy)
assert(cell.x == TEST_CELL_X and cell.y == TEST_CELL_Y and cell.xy == (TEST_CELL_X, TEST_CELL_Y))
def test_list_tiles_120_020_2005_ls578(config=None):
dataset_types = [DatasetType.ARG25, DatasetType.PQ25, DatasetType.FC25]
tiles = list_tiles_as_list(x=[TEST_CELL_X], y=[TEST_CELL_Y],
acq_min=parse_date_min(TEST_YEAR_STR), acq_max=parse_date_max(TEST_YEAR_STR),
satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
dataset_types=dataset_types,
config=config)
assert(tiles and len(list(tiles)) > 0)
for tile in tiles:
_log.info("Found tile xy = %s", tile.xy)
assert(tile.x == TEST_CELL_X and tile.y == TEST_CELL_Y and tile.xy == (TEST_CELL_X, TEST_CELL_Y)
and tile.end_datetime_year == TEST_YEAR
and ds in tile.datasets for ds in dataset_types)
def test_list_tiles_120_020_2005_ls578_no_ls7_slc(config=None):
dataset_types = [DatasetType.ARG25, DatasetType.PQ25, DatasetType.FC25]
tiles = list_tiles_as_list(x=[TEST_CELL_X], y=[TEST_CELL_Y],
acq_min=parse_date_min(TEST_YEAR_STR),
acq_max=parse_date_max(TEST_YEAR_STR),
satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
dataset_types=dataset_types,
exclude=[LS7_SLC_OFF_EXCLUSION],
config=config)
assert(tiles and len(list(tiles)) > 0)
for tile in tiles:
_log.info("Found tile xy = %s", tile.xy)
dataset = tile.datasets[DatasetType.ARG25]
assert dataset
_log.info("Found ARG25 dataset [%s]", dataset.path)
assert(tile.x == TEST_CELL_X and tile.y == TEST_CELL_Y and tile.xy == (TEST_CELL_X, TEST_CELL_Y)
and tile.end_datetime_year == TEST_YEAR
and (ds in tile.datasets for ds in dataset_types)
and (dataset.satellite != Satellite.LS7 or tile.end_datetime.date() <= LS7_SLC_OFF_ACQ_MIN))
def test_list_tiles_120_020_2005_ls578_no_ls8_pre_wrs_2(config=None):
dataset_types = [DatasetType.ARG25, DatasetType.PQ25, DatasetType.FC25]
tiles = list_tiles_as_list(x=[TEST_CELL_X], y=[TEST_CELL_Y],
acq_min=parse_date_min(TEST_YEAR_STR),
acq_max=parse_date_max(TEST_YEAR_STR),
satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
dataset_types=dataset_types,
exclude=[LS8_PRE_WRS_2_EXCLUSION],
config=config)
assert(tiles and len(list(tiles)) > 0)
for tile in tiles:
_log.info("Found tile xy = %s", tile.xy)
dataset = tile.datasets[DatasetType.ARG25]
assert dataset
_log.info("Found ARG25 dataset [%s]", dataset.path)
assert(tile.x == TEST_CELL_X and tile.y == TEST_CELL_Y and tile.xy == (TEST_CELL_X, TEST_CELL_Y)
and tile.end_datetime_year == TEST_YEAR
and (ds in tile.datasets for ds in dataset_types)
and (dataset.satellite != Satellite.LS8 or tile.end_datetime.date() >= LS8_PRE_WRS_2_ACQ_MAX))
def test_list_tiles_120_020_2005_ls578_summer(config=None):
dataset_types = [DatasetType.ARG25, DatasetType.PQ25, DatasetType.FC25]
tiles = list_tiles_as_list(x=[TEST_CELL_X], y=[TEST_CELL_Y],
acq_min=parse_date_min(TEST_YEAR_STR),
acq_max=parse_date_max(TEST_YEAR_STR),
satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
dataset_types=dataset_types,
months=TEST_MONTHS,
config=config)
assert(tiles and len(list(tiles)) > 0)
for tile in tiles:
_log.info("Found tile xy = %s", tile.xy)
assert(tile.x == TEST_CELL_X and tile.y == TEST_CELL_Y and tile.xy == (TEST_CELL_X, TEST_CELL_Y)
and tile.end_datetime_year == TEST_YEAR
and (ds in tile.datasets for ds in dataset_types)
and tile.end_datetime_month in [m.value for m in TEST_MONTHS])
# AOI
def test_list_cells_act_2005_ls578(config=None):
cells = list_cells_vector_file_as_list(vector_file=TEST_VECTOR_FILE,
vector_layer=TEST_VECTOR_LAYER,
vector_feature=TEST_VECTOR_FEATURE,
satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
acq_min=parse_date_min(TEST_YEAR_STR),
acq_max=parse_date_max(TEST_YEAR_STR),
dataset_types=[DatasetType.ARG25], config=None)
assert(cells and len(list(cells)) == 2)
for cell in cells:
_log.info("Found cell xy = %s", cell.xy)
assert((cell.x == 148 or cell.x == 149) and cell.y == -36)
# def test_list_tiles_act_2005_ls578(config=None):
#
# dataset_types = [DatasetType.ARG25, DatasetType.PQ25, DatasetType.FC25]
#
# tiles = list_tiles_vector_file_as_list(vector_file="Mainlands.shp", vector_layer=0, vector_feature=4,
# acq_min=parse_date_min("2005"), acq_max=parse_date_max("2005"),
# satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
# dataset_types=dataset_types,
# config=config)
#
# assert(tiles and len(list(tiles)) > 0)
#
# for tile in tiles:
# _log.info("Found tile xy = %s", tile.xy)
# assert((tile.x == 148 or tile.x == 149) and tile.y == -36
# and tile.end_datetime_year == 2005
# and (ds in tile.datasets for ds in dataset_types)
# and tile.end_datetime_month in [m.value for m in MONTHS_BY_SEASON[Season.SUMMER]])<|fim▁hole|>
# def test_list_tiles_act_2005_ls578_summer(config=None):
#
# dataset_types = [DatasetType.ARG25, DatasetType.PQ25, DatasetType.FC25]
#
# tiles = list_tiles_vector_file_as_list(vector_file="Mainlands.shp", vector_layer=0, vector_feature=4,
# acq_min=parse_date_min("2005"), acq_max=parse_date_max("2005"),
# satellites=[Satellite.LS5, Satellite.LS7, Satellite.LS8],
# dataset_types=dataset_types,
# months=MONTHS_BY_SEASON[Season.SUMMER],
# config=config)
#
# assert(tiles and len(list(tiles)) > 0)
#
# for tile in tiles:
# _log.info("Found tile xy = %s", tile.xy)
# assert((tile.x == 148 or tile.x == 149) and tile.y == -36
# and tile.end_datetime_year == 2005
# and (ds in tile.datasets for ds in dataset_types)
# and tile.end_datetime_month in [m.value for m in MONTHS_BY_SEASON[Season.SUMMER]])<|fim▁end|> | |
<|file_name|>test8.cpp<|end_file_name|><|fim▁begin|>//-----------------------------------------------------------------------------
// boost-libs variant/test/test8.cpp header file
// See http://www.boost.org for updates, documentation, and revision history.
//-----------------------------------------------------------------------------
//
// Copyright (c) 2003<|fim▁hole|>//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include "boost/test/minimal.hpp"
#include "boost/variant.hpp"
#include <iostream>
#include <vector>
#include <string>
using namespace std;
using namespace boost;
typedef variant<float, std::string, int, std::vector<std::string> > t_var1;
struct int_sum : static_visitor<>
{
int_sum() : result_(0) { }
void operator()(int t)
{
result_ += t;
}
result_type operator()(float ) { }
result_type operator()(const std::string& ) { }
result_type operator()(const std::vector<std::string>& ) { }
int result_;
};
template <typename T, typename Variant>
T& check_pass(Variant& v, T value)
{
BOOST_CHECK(get<T>(&v));
try
{
T& r = get<T>(v);
BOOST_CHECK(r == value);
return r;
}
catch(boost::bad_get&)
{
throw; // must never reach
}
}
template <typename T, typename Variant>
void check_fail(Variant& v)
{
BOOST_CHECK(!relaxed_get<T>(&v));
try
{
T& r = relaxed_get<T>(v);
(void)r; // suppress warning about r not being used
BOOST_CHECK(false && &r); // should never reach
}
catch(const boost::bad_get& e)
{
BOOST_CHECK(!!e.what()); // make sure that what() is const qualified and returnes something
}
}
int test_main(int , char* [])
{
int_sum acc;
t_var1 v1 = 800;
// check get on non-const variant
{
int& r1 = check_pass<int>(v1, 800);
const int& cr1 = check_pass<const int>(v1, 800);
check_fail<float>(v1);
check_fail<const float>(v1);
check_fail<short>(v1);
check_fail<const short>(v1);
apply_visitor(acc, v1);
BOOST_CHECK(acc.result_ == 800);
r1 = 920; // NOTE: modifies content of v1
apply_visitor(acc, v1);
BOOST_CHECK(cr1 == 920);
BOOST_CHECK(acc.result_ == 800 + 920);
}
// check const correctness:
{
const t_var1& c = v1;
check_pass<const int>(c, 920);
//check_fail<int>(c);
check_fail<const float>(c);
//check_fail<float>(c);
check_fail<const short>(c);
//check_fail<short>(c);
}
return boost::exit_success;
}<|fim▁end|> | // Eric Friedman, Itay Maman |
<|file_name|>PythonVM.java<|end_file_name|><|fim▁begin|>/* ====================================================================
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*<|fim▁hole|> * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ====================================================================
*/
package org.apache.jcc;
public class PythonVM {
static protected PythonVM vm;
static {
System.loadLibrary("jcc");
}
static public PythonVM start(String programName, String[] args)
{
if (vm == null)
{
vm = new PythonVM();
vm.init(programName, args);
}
return vm;
}
static public PythonVM start(String programName)
{
return start(programName, null);
}
static public PythonVM get()
{
return vm;
}
protected PythonVM()
{
}
protected native void init(String programName, String[] args);
public native Object instantiate(String moduleName, String className)
throws PythonException;
}<|fim▁end|> | * Unless required by applicable law or agreed to in writing, software |
<|file_name|>grid-columns.js<|end_file_name|><|fim▁begin|>describe("grid-columns", function() {
function createSuite(buffered) {
describe(buffered ? "with buffered rendering" : "without buffered rendering", function() {
var defaultColNum = 4,
totalWidth = 1000,
grid, view, colRef, store, column;
function spyOnEvent(object, eventName, fn) {
var obj = {
fn: fn || Ext.emptyFn
},
spy = spyOn(obj, "fn");
object.addListener(eventName, obj.fn);
return spy;
}
function makeGrid(numCols, gridCfg, hiddenFn, lockedFn){
var cols, col, i;
gridCfg = gridCfg || {};
colRef = [];
if (!numCols || typeof numCols === 'number') {
cols = [];
numCols = numCols || defaultColNum;
for (i = 0; i < numCols; ++i) {
col = {
itemId: 'col' + i,
text: 'Col' + i,
dataIndex: 'field' + i
};
if (hiddenFn && hiddenFn(i)) {
col.hidden = true;
}
if (lockedFn && lockedFn(i)) {
col.locked = true;
}
col = new Ext.grid.column.Column(col);
cols.push(col);
}
} else {
cols = numCols;
}
store = new Ext.data.Store({
model: spec.TestModel,
data: [{
field0: 'val1',
field1: 'val2',
field2: 'val3',
field3: 'val4',
field4: 'val5'
}]
});
grid = new Ext.grid.Panel(Ext.apply({
renderTo: Ext.getBody(),
columns: cols,
width: totalWidth,
height: 500,
border: false,
store: store,
bufferedRenderer: buffered,
viewConfig: {
mouseOverOutBuffer: 0
}
}, gridCfg));
view = grid.view;
colRef = grid.getColumnManager().getColumns();
}
function getCell(rowIdx, colIdx) {
return grid.getView().getCellInclusive({
row: rowIdx,
column: colIdx
});
}
function getCellText(rowIdx, colIdx) {
return getCellInner(rowIdx, colIdx).innerHTML;
}
function getCellInner(rowIdx, colIdx) {
var cell = getCell(rowIdx, colIdx);
return Ext.fly(cell).down(grid.getView().innerSelector).dom;
}
function hasCls(el, cls) {
return Ext.fly(el).hasCls(cls);
}
function clickHeader(col) {
// Offset so we're not on the edges to trigger a drag
jasmine.fireMouseEvent(col.titleEl, 'click', 10);
}
function resizeColumn(column, by) {
var colBox = column.el.getBox(),
fromMx = colBox.x + colBox.width - 2,
fromMy = colBox.y + colBox.height / 2,
dragThresh = by > 0 ? Ext.dd.DragDropManager.clickPixelThresh + 1 : -Ext.dd.DragDropManager.clickPixelThresh - 1;
// Mousedown on the header to drag
jasmine.fireMouseEvent(column.el.dom, 'mouseover', fromMx, fromMy);
jasmine.fireMouseEvent(column.el.dom, 'mousemove', fromMx, fromMy);
jasmine.fireMouseEvent(column.el.dom, 'mousedown', fromMx, fromMy);
// The initial move which tiggers the start of the drag
jasmine.fireMouseEvent(column.el.dom, 'mousemove', fromMx + dragThresh, fromMy);
// Move to resize
jasmine.fireMouseEvent(column.el.dom, 'mousemove', fromMx + by + 2, fromMy);
jasmine.fireMouseEvent(column.el.dom, 'mouseup', fromMx + by + 2, fromMy);
}
function setup() {
Ext.define('spec.TestModel', {
extend: 'Ext.data.Model',
fields: ['field0', 'field1', 'field2', 'field3', 'field4']
});
}
function tearDown() {
Ext.destroy(grid, store, column);
grid = view = store = colRef = column = null;
Ext.undefine('spec.TestModel');
Ext.data.Model.schema.clear();
}
beforeEach(setup);
afterEach(tearDown);
// https://sencha.jira.com/browse/EXTJS-19950
describe('force fit columns, shrinking width to where flexes tend to zero', function() {
it('should work', function() {
makeGrid([{
text : 'Col1',
dataIndex : 'foo',
flex : 1
}, {
text : 'Col2',
columns : [{
text : 'Col21',
dataIndex : 'foo2',
width: 140
}, {
text : 'Col22',
dataIndex : 'foo4',
width : 160
}, {
text : 'Col23',
dataIndex : 'foo4',
width : 100
}, {
text : 'Col34',
dataIndex : 'foo4',
width : 85
}]
}, {
text : 'Col3',
dataIndex : 'foo3',
width : 110
}, {
text : 'Col4',
columns : [ {
text : 'Col41',
dataIndex : 'foo2',
flex: 1
}, {
text : 'Col42',
dataIndex : 'foo4',
width : 120
}]
}], {
autoScroll: true,
forceFit: true,
width: 1800
});
expect(function() {
grid.setWidth(700);
}).not.toThrow();
});
});
describe('as containers', function () {
var leafCls = 'x-leaf-column-header',
col;
afterEach(function () {
col = null;
});
describe('group headers', function () {
beforeEach(function () {
makeGrid([{
itemId: 'main1',
columns: [{
itemId: 'child1'
}, {
itemId: 'child2'
}, {
itemId: 'child3'
}]
}]);
col = grid.down('#main1');
});
it('should be stamped as a container', function () {
expect(col.isContainer).toBe(true);
});
it('should not give the titleEl the leaf column class', function () {
expect(col.titleEl.hasCls(leafCls)).toBe(false);
});
});
describe('contains child items', function () {
beforeEach(function () {
makeGrid([{
text: 'Foo',
dataIndex: 'field0',
items: [{
xtype: 'textfield',
itemId: 'foo'
}]
}]);
col = grid.visibleColumnManager.getHeaderByDataIndex('field0');
});
it('should be stamped as a container', function () {
expect(col.isContainer).toBe(true);
});
it('should not give the titleEl the leaf column class', function () {
expect(col.titleEl.hasCls(leafCls)).toBe(false);
});
describe('focusing', function () {
// See EXTJS-15757.
it('should not throw when focusing', function () {
expect(function () {
grid.down('#foo').onFocus();
}).not.toThrow();
});
it('should return the items collection', function () {
var col = grid.visibleColumnManager.getHeaderByDataIndex('field0');
expect(col.getFocusables()).toBe(col.items.items);
});
});
});
});
describe("cell sizing", function() {
it("should size the cells to match fixed header sizes", function() {
makeGrid([{
width: 200
}, {
width: 500
}]);
expect(getCell(0, 0).getWidth()).toBe(200);
expect(getCell(0, 1).getWidth()).toBe(500);
});
it("should size the cells to match flex header sizes", function() {
makeGrid([{
flex: 8
}, {
flex: 2
}]);
expect(getCell(0, 0).getWidth()).toBe(800);
expect(getCell(0, 1).getWidth()).toBe(200);
});
it("should size the cells to match an the text size in the header", function() {
makeGrid([{
width: null,
text: '<div style="width: 25px;"></div>'
}, {
width: null,
text: '<div style="width: 75px;"></div>'
}]);
expect(getCell(0, 0).getWidth()).toBe(colRef[0].titleEl.getWidth() + colRef[0].el.getBorderWidth('lr'));
expect(getCell(0, 1).getWidth()).toBe(colRef[1].titleEl.getWidth() + colRef[1].el.getBorderWidth('lr'));
});
});
describe("initializing", function() {
describe("normal", function() {
it("should accept a column array", function() {
makeGrid([{
text: 'Foo',
dataIndex: 'field0'
}]);
expect(grid.getColumnManager().getHeaderAtIndex(0).text).toBe('Foo');
});
it("should accept a header config", function() {
makeGrid({
margin: 5,
items: [{
text: 'Foo',
dataIndex: 'field0'
}]
});
expect(grid.getColumnManager().getHeaderAtIndex(0).text).toBe('Foo');
expect(grid.headerCt.margin).toBe(5);
});
});
describe("locking", function() {
it("should accept a column array, enabling locking if a column is configured with locked: true", function() {
makeGrid([{
text: 'Foo',
dataIndex: 'field0',
locked: true
}, {
text: 'Bar',
dataIndex: 'field1'<|fim▁hole|> expect(grid.lockable).toBe(true);
});
it("should accept a header config, enabling locking if any column is configured with locked: true", function() {
makeGrid({
items: [{
text: 'Foo',
dataIndex: 'field0',
locked: true
}, {
text: 'Bar',
dataIndex: 'field1'
}]
});
expect(grid.lockable).toBe(true);
// Top level grid should return columns from both sides
expect(grid.getVisibleColumns().length).toBe(2);
expect(grid.getColumns().length).toBe(2);
});
});
});
describe("column manager", function() {
// Get all columns from the grid ref
function ga() {
return grid.getColumnManager().getColumns();
}
// Get all manager
function gam() {
return grid.getColumnManager();
}
// Get all visible columns from the grid ref
function gv() {
return grid.getVisibleColumnManager().getColumns();
}
// Get visible manager
function gvm() {
return grid.getVisibleColumnManager();
}
it("should provide a getColumnManager method", function(){
makeGrid();
expect(gam().$className).toBe('Ext.grid.ColumnManager');
});
it("should provide a getVisibleColumnManager method", function(){
makeGrid();
expect(gvm().$className).toBe('Ext.grid.ColumnManager');
});
describe("simple grid", function(){
beforeEach(function(){
makeGrid();
});
it("should return all leaf columns", function() {
expect(gv().length).toBe(defaultColNum);
});
it("should have the correct column order", function(){
var cols = gv(),
i = 0,
len = cols.length;
for (; i < len; ++i) {
expect(cols[i]).toBe(colRef[i]);
}
});
it("should update the order when moving columns", function(){
grid.headerCt.move(3, 1);
var cols = gv();
expect(cols[0]).toBe(colRef[0]);
expect(cols[1]).toBe(colRef[3]);
expect(cols[2]).toBe(colRef[1]);
expect(cols[3]).toBe(colRef[2]);
});
it("should update the columns when removing a column", function(){
grid.headerCt.remove(1);
var cols = gv();
expect(cols[0]).toBe(colRef[0]);
expect(cols[1]).toBe(colRef[2]);
expect(cols[2]).toBe(colRef[3]);
});
it("should update the columns when adding a column", function(){
grid.headerCt.add({
text: 'Col4'
});
expect(gv()[4].text).toBe('Col4');
});
describe("functions", function() {
describe("getHeaderIndex", function() {
it("should return the correct index for the header", function() {
expect(gam().getHeaderIndex(colRef[3])).toBe(3);
});
it("should return -1 if the column doesn't exist", function(){
column = new Ext.grid.column.Column();
expect(gam().getHeaderIndex(column)).toBe(-1);
});
});
describe("getHeaderAtIndex", function(){
it("should return the column reference", function(){
expect(gam().getHeaderAtIndex(2)).toBe(colRef[2]);
});
it("should return null if the index is out of bounds", function(){
expect(gam().getHeaderAtIndex(10)).toBeNull();
});
});
describe("getHeaderById", function(){
it("should return the column reference by id", function(){
expect(gam().getHeaderById('col1')).toBe(colRef[1]);
});
it("should return null if the id doesn't exist", function() {
expect(gam().getHeaderById('foo')).toBeNull();
});
});
it("should return the first item", function(){
expect(gam().getFirst()).toBe(colRef[0]);
});
it("should return the last item", function(){
expect(gam().getLast()).toBe(colRef[3]);
});
describe("getNextSibling", function(){
it("should return the next sibling", function(){
expect(gam().getNextSibling(colRef[1])).toBe(colRef[2]);
});
it("should return the null if the next sibling doesn't exist", function(){
expect(gam().getNextSibling(colRef[3])).toBeNull();
});
});
describe("getPreviousSibling", function(){
it("should return the previous sibling", function(){
expect(gam().getPreviousSibling(colRef[2])).toBe(colRef[1]);
});
it("should return the null if the previous sibling doesn't exist", function(){
expect(gam().getPreviousSibling(colRef[0])).toBeNull();
});
});
});
});
describe('getHeaderIndex', function () {
var index, headerCtItems;
beforeEach(function () {
makeGrid([{
text: 'Name',
width: 100,
dataIndex: 'name',
hidden: true
},{
text: 'Email',
width: 100,
dataIndex: 'email'
}, {
text: 'Stock Price',
columns: [{
text: 'Price',
width: 75,
dataIndex: 'price'
}, {
text: 'Phone',
width: 80,
dataIndex: 'phone',
hidden: true
}, {
text: '% Change',
width: 40,
dataIndex: 'pctChange'
}]
}, {
text: 'Foo',
columns: [{
text: 'Foo Price',
width: 75,
dataIndex: 'price',
hidden: true
}, {
text: 'Foo Phone',
width: 80,
dataIndex: 'phone'
}, {
text: 'Foo % Change',
width: 40,
dataIndex: 'pctChange'
}]
}]);
headerCtItems = grid.headerCt.items;
});
afterEach(function () {
index = headerCtItems = null;
});
describe('all columns', function () {
describe('when argument is a column', function () {
it('should return a valid index', function () {
index = gam().getHeaderIndex(headerCtItems.items[0]);
expect(index).not.toBe(-1);
expect(index).toBe(0);
});
it('should return the header regardless of visibility', function () {
var header;
header = headerCtItems.items[0];
index = gam().getHeaderIndex(header);
expect(header.hidden).toBe(true);
expect(index).toBe(0);
});
it('should return the index of the header in its owner stack - rootHeader', function () {
index = gam().getHeaderIndex(headerCtItems.items[3].items.items[0]);
expect(index).toBe(5);
});
it('should return the index of the header in its owner stack - groupHeader', function () {
// Note that this spec is using the same header as the previous spec to demonstrate the difference.
var groupHeader = headerCtItems.items[3];
index = groupHeader.columnManager.getHeaderIndex(groupHeader.items.items[0]);
expect(index).toBe(0);
});
});
describe('when argument is a group header', function () {
it('should return a valid index', function () {
index = gam().getHeaderIndex(headerCtItems.items[2]);
expect(index).not.toBe(-1);
expect(index).toBe(2);
});
it('should return an index of the first leaf of group header', function () {
var colMgrHeader;
// First, get the index from the column mgr. It will retrieve it from the group header's column mgr.
index = gam().getHeaderIndex(headerCtItems.items[2]);
// Next, get a reference to the actual header (top-level col mgr will have a ref to all sub-level headers).
colMgrHeader = gam().getHeaderAtIndex(index);
// Remember, this is the index of the root header's visible col mgr.
expect(index).toBe(2);
expect(colMgrHeader.hidden).toBe(false);
expect(colMgrHeader.dataIndex).toBe('price');
});
it("should be a reference to the first leaf header in the grouped header's columnn manager", function () {
var groupedHeader, colMgrHeader, groupHeaderFirstHeader;
groupedHeader = headerCtItems.items[2];
groupHeaderFirstHeader = groupedHeader.columnManager.getHeaderAtIndex(0);
// First, get the index from the column mgr. It will retrieve it from the group header's column mgr.
index = gam().getHeaderIndex(groupedHeader);
// Next, get a reference to the actual header (top-level col mgr will have a ref to all sub-level headers).
colMgrHeader = gam().getHeaderAtIndex(index);
expect(colMgrHeader).toBe(groupHeaderFirstHeader);
expect(colMgrHeader.hidden).toBe(groupHeaderFirstHeader.hidden);
expect(colMgrHeader.dataIndex).toBe(groupHeaderFirstHeader.dataIndex);
});
it('should return first sub-header regardless of visibility', function () {
var groupedHeader, colMgrHeader, groupHeaderFirstHeader;
groupedHeader = headerCtItems.items[3];
groupHeaderFirstHeader = groupedHeader.columnManager.getHeaderAtIndex(0);
// First, get the index from the column mgr. It will retrieve it from the group header's column mgr.
index = gam().getHeaderIndex(groupedHeader);
// Next, get a reference to the actual header (top-level col mgr will have a ref to all sub-level headers).
colMgrHeader = gam().getHeaderAtIndex(index);
expect(colMgrHeader).toBe(groupHeaderFirstHeader);
expect(colMgrHeader.hidden).toBe(true);
expect(colMgrHeader.text).toBe('Foo Price');
});
});
});
describe('visible only', function () {
describe('when argument is a column', function () {
it('should return the correct index for the header', function() {
expect(gvm().getHeaderIndex(headerCtItems.items[1])).toBe(0);
});
it("should return -1 if the column doesn't exist", function() {
column = new Ext.grid.column.Column();
expect(gvm().getHeaderIndex(column)).toBe(-1);
});
it('should not return a hidden sub-header', function () {
var header;
header = headerCtItems.items[0];
index = gvm().getHeaderIndex(header);
expect(header.hidden).toBe(true);
expect(index).toBe(-1);
});
it('should return a valid index', function () {
index = gvm().getHeaderIndex(headerCtItems.items[1]);
expect(index).not.toBe(-1);
// Will filter out the first hidden column in the stack.
expect(index).toBe(0);
});
it('should return the index of the header in its owner stack - rootHeader', function () {
index = gvm().getHeaderIndex(headerCtItems.items[3].items.items[2]);
expect(index).toBe(4);
});
it('should return the index of the header in its owner stack - groupHeader', function () {
// Note that this spec is using the same header as the previous spec to demonstrate the difference.
var groupHeader = headerCtItems.items[3];
index = groupHeader.visibleColumnManager.getHeaderIndex(groupHeader.items.items[2]);
expect(index).toBe(1);
});
});
describe('when argument is a group header', function () {
it('should return a valid index', function () {
index = gvm().getHeaderIndex(headerCtItems.items[2]);
expect(index).not.toBe(-1);
// Will filter out the second hidden column in the stack.
expect(index).toBe(1);
});
it('should return an index of the first leaf of group header', function () {
var colMgrHeader;
// First, get the index from the column mgr. It will retrieve it from the group header's column mgr.
index = gvm().getHeaderIndex(headerCtItems.items[2]);
// Next, get a reference to the actual header (top-level col mgr will have a ref to all sub-level headers).
colMgrHeader = gvm().getHeaderAtIndex(index);
// Remember, this is the index of the root header's visible col mgr.
expect(index).toBe(1);
expect(colMgrHeader.hidden).toBe(false);
expect(colMgrHeader.dataIndex).toBe('price');
});
it("should be a reference to the first leaf header in the grouped header's columnn manager", function () {
var groupedHeader, colMgrHeader, groupHeaderFirstHeader;
groupedHeader = headerCtItems.items[2];
groupHeaderFirstHeader = headerCtItems.items[2].visibleColumnManager.getHeaderAtIndex(0);
// First, get the index from the column mgr. It will retrieve it from the group header's column mgr.
index = gvm().getHeaderIndex(groupedHeader);
// Next, get a reference to the actual header (top-level col mgr will have a ref to all sub-level headers).
colMgrHeader = gvm().getHeaderAtIndex(index);
expect(colMgrHeader).toBe(groupHeaderFirstHeader);
expect(colMgrHeader.hidden).toBe(groupHeaderFirstHeader.hidden);
expect(colMgrHeader.dataIndex).toBe(groupHeaderFirstHeader.dataIndex);
});
it('should not return a hidden sub-header', function () {
var groupedHeader, colMgrHeader, groupHeaderFirstHeader;
groupedHeader = headerCtItems.items[3];
groupHeaderFirstHeader = groupedHeader.visibleColumnManager.getHeaderAtIndex(0);
// First, get the index from the column mgr. It will retrieve it from the group header's column mgr.
index = gvm().getHeaderIndex(groupedHeader);
// Next, get a reference to the actual header (top-level col mgr will have a ref to all sub-level headers).
colMgrHeader = gvm().getHeaderAtIndex(index);
expect(colMgrHeader).toBe(groupHeaderFirstHeader);
expect(colMgrHeader.hidden).toBe(false);
expect(colMgrHeader.text).toBe('Foo Phone');
});
});
});
});
describe('getHeaderAtIndex', function () {
var header, headerCtItems;
beforeEach(function () {
makeGrid([{
text: 'Name',
width: 100,
dataIndex: 'name',
hidden: true
},{
text: 'Email',
width: 100,
dataIndex: 'email'
}, {
text: 'Stock Price',
columns: [{
text: 'Price',
width: 75,
dataIndex: 'price'
}, {
text: 'Phone',
width: 80,
dataIndex: 'phone',
hidden: true
}, {
text: '% Change',
width: 40,
dataIndex: 'pctChange'
}]
}, {
text: 'Foo',
columns: [{
text: 'Foo Price',
width: 75,
dataIndex: 'price',
hidden: true
}, {
text: 'Foo Phone',
width: 80,
dataIndex: 'phone'
}, {
text: 'Foo % Change',
width: 40,
dataIndex: 'pctChange'
}]
}]);
headerCtItems = grid.headerCt.items;
});
afterEach(function () {
header = headerCtItems = null;
});
describe('all columns', function () {
it('should return a valid header', function () {
header = gam().getHeaderAtIndex(0);
expect(header).not.toBe(null);
expect(header.dataIndex).toBe('name');
});
it('should return the correct header from the index', function() {
expect(gam().getHeaderAtIndex(0).dataIndex).toBe('name');
});
it("should return null if the column doesn't exist", function() {
expect(gam().getHeaderAtIndex(50)).toBe(null);
});
it('should return the header regardless of visibility', function () {
var header2;
header = gam().getHeaderAtIndex(0);
header2 = gam().getHeaderAtIndex(1);
expect(header).not.toBe(null);
expect(header.hidden).toBe(true);
expect(header2).not.toBe(null);
expect(header2.hidden).toBe(false);
});
it('should return the header in its owner stack - rootHeader', function () {
header = gam().getHeaderAtIndex(0);
expect(header.text).toBe('Name');
});
it('should return the index of the header in its owner stack - groupHeader', function () {
// Note that this spec is using the index as the previous spec to demonstrate the difference.
header = headerCtItems.items[3].columnManager.getHeaderAtIndex(0);
expect(header.text).toBe('Foo Price');
});
});
describe('visible only', function () {
it('should return the correct header from the index', function() {
expect(gvm().getHeaderAtIndex(0).dataIndex).toBe('email');
});
it("should return null if the column doesn't exist", function() {
expect(gvm().getHeaderAtIndex(50)).toBe(null);
});
it('should not return a hidden sub-header', function () {
header = gvm().getHeaderAtIndex(2);
expect(header.hidden).toBe(false);
expect(header.dataIndex).toBe('pctChange');
});
it('should return a valid header', function () {
header = gvm().getHeaderAtIndex(0);
expect(header).not.toBe(null);
expect(header.dataIndex).toBe('email');
});
it('should return the header in its owner stack - rootHeader', function () {
header = gvm().getHeaderAtIndex(0);
expect(header.text).toBe('Email');
});
it('should return the index of the header in its owner stack - groupHeader', function () {
// Note that this spec is using the same header as the previous spec to demonstrate the difference.
var groupHeader = headerCtItems.items[3];
header = headerCtItems.items[3].visibleColumnManager.getHeaderAtIndex(0);
expect(header.text).toBe('Foo Phone');
});
});
});
describe('hidden columns', function() {
// Hidden at index 3/6
beforeEach(function(){
makeGrid(8, null, function(i){
return i > 0 && i % 3 === 0;
});
});
it("should return all columns when using getColumnManager", function(){
expect(ga().length).toBe(8);
});
it("should return only visible columns when using getVisibleColumnManager", function(){
expect(gv().length).toBe(6);
});
it("should update the collection when hiding a column", function(){
colRef[0].hide();
expect(gv().length).toBe(5);
});
it("should update the collection when showing a column", function(){
colRef[3].show();
expect(gv().length).toBe(7);
});
describe("getHeaderAtIndex", function(){
it("should return the column reference", function(){
expect(gvm().getHeaderAtIndex(3)).toBe(colRef[4]);
});
it("should return null if the index is out of bounds", function(){
expect(gvm().getHeaderAtIndex(7)).toBeNull();
});
});
describe("getHeaderById", function(){
it("should return the column reference by id", function(){
expect(gvm().getHeaderById('col1')).toBe(colRef[1]);
});
it("should return null if the id doesn't exist", function() {
expect(gvm().getHeaderById('col3')).toBeNull();
});
});
it("should return the first item", function(){
expect(gvm().getFirst()).toBe(colRef[0]);
});
it("should return the last item", function(){
expect(gvm().getLast()).toBe(colRef[7]);
});
describe("getNextSibling", function(){
it("should return the next sibling", function(){
expect(gvm().getNextSibling(colRef[2])).toBe(colRef[4]);
});
it("should return the null if the next sibling doesn't exist", function(){
expect(gvm().getNextSibling(colRef[3])).toBeNull();
});
});
describe("getPreviousSibling", function(){
it("should return the previous sibling", function(){
expect(gvm().getPreviousSibling(colRef[7])).toBe(colRef[5]);
});
it("should return the null if the previous sibling doesn't exist", function(){
expect(gvm().getPreviousSibling(colRef[6])).toBeNull();
});
});
});
describe("locking", function(){
// first 4 locked
beforeEach(function(){
makeGrid(10, null, null, function(i){
return i <= 3;
});
});
describe("global manager", function() {
it("should return both sets of columns", function(){
expect(ga().length).toBe(10);
});
it("should update the collection when adding to the locked side", function(){
grid.lockedGrid.headerCt.add({
text: 'Foo'
});
expect(ga().length).toBe(11);
});
it("should update the collection when adding to the unlocked side", function(){
grid.normalGrid.headerCt.add({
text: 'Foo'
});
expect(ga().length).toBe(11);
});
it("should update the collection when removing from the locked side", function(){
grid.lockedGrid.headerCt.remove(0);
expect(ga().length).toBe(9);
});
it("should update the collection when removing from the unlocked side", function(){
grid.normalGrid.headerCt.remove(0);
expect(ga().length).toBe(9);
});
it("should maintain the same size when locking an item", function(){
grid.lock(colRef[4]);
expect(ga().length).toBe(10);
});
it("should maintain the same size when unlocking an item", function(){
grid.unlock(colRef[0]);
expect(ga().length).toBe(10);
});
});
describe("locked side", function(){
var glm = function(){
return grid.lockedGrid.getColumnManager();
};
it("should only return the columns for this side", function(){
expect(glm().getColumns().length).toBe(4);
});
it("should update the collection when adding an item to this side", function(){
grid.lock(colRef[9]);
expect(glm().getColumns().length).toBe(5);
});
it("should update the collection when removing an item from this side", function(){
grid.unlock(colRef[0]);
expect(glm().getColumns().length).toBe(3);
});
describe("function", function(){
describe("getHeaderIndex", function() {
it("should return the correct index for the header", function() {
expect(glm().getHeaderIndex(colRef[2])).toBe(2);
});
it("should return -1 if the column doesn't exist", function(){
expect(glm().getHeaderIndex(colRef[5])).toBe(-1);
});
});
describe("getHeaderAtIndex", function(){
it("should return the column reference", function(){
expect(glm().getHeaderAtIndex(3)).toBe(colRef[3]);
});
it("should return null if the index is out of bounds", function(){
expect(glm().getHeaderAtIndex(6)).toBeNull();
});
});
describe("getHeaderById", function(){
it("should return the column reference by id", function(){
expect(glm().getHeaderById('col1')).toBe(colRef[1]);
});
it("should return null if the id doesn't exist", function() {
expect(glm().getHeaderById('col5')).toBeNull();
});
});
});
});
describe("unlocked side", function(){
var gum = function(){
return grid.normalGrid.getColumnManager();
};
it("should only return the columns for this side", function(){
expect(gum().getColumns().length).toBe(6);
});
it("should update the collection when adding an item to this side", function(){
grid.unlock(colRef[1]);
expect(gum().getColumns().length).toBe(7);
});
it("should update the collection when removing an item from this side", function(){
grid.lock(colRef[7]);
expect(gum().getColumns().length).toBe(5);
});
describe("function", function(){
var offset = 4;
describe("getHeaderIndex", function() {
it("should return the correct index for the header", function() {
expect(gum().getHeaderIndex(colRef[offset + 2])).toBe(2);
});
it("should return -1 if the column doesn't exist", function(){
expect(gum().getHeaderIndex(colRef[0])).toBe(-1);
});
});
describe("getHeaderAtIndex", function(){
it("should return the column reference", function(){
expect(gum().getHeaderAtIndex(3)).toBe(colRef[3 + offset]);
});
it("should return null if the index is out of bounds", function(){
expect(gum().getHeaderAtIndex(6)).toBeNull();
});
});
describe("getHeaderById", function(){
it("should return the column reference by id", function(){
expect(gum().getHeaderById('col6')).toBe(colRef[6]);
});
it("should return null if the id doesn't exist", function() {
expect(gum().getHeaderById('col2')).toBeNull();
});
});
});
});
});
});
describe("menu", function() {
it("should not allow menu to be shown when menuDisabled: true", function() {
makeGrid([{
dataIndex: 'field0',
width: 200,
filter: 'string',
menuDisabled: true
}], {
plugins: 'gridfilters'
});
// menuDisabled=true, shouldn't have a trigger
expect(colRef[0].triggerEl).toBeNull();
});
it("should not allow menu to be shown when grid is configured with enableColumnHide: false and sortableColumns: false", function() {
makeGrid([{
dataIndex: 'field0',
width: 200
}], {
enableColumnHide: false,
sortableColumns: false
});
expect(colRef[0].triggerEl).toBeNull();
});
it("should allow menu to be shown when requiresMenu: true (from plugin) and grid is configured with enableColumnHide: false and sortableColumns: false", function() {
makeGrid([{
dataIndex: 'field0',
width: 200,
filter: 'string'
}], {
enableColumnHide: false,
sortableColumns: false,
plugins: 'gridfilters'
});
var col = colRef[0],
menu;
col.triggerEl.show();
jasmine.fireMouseEvent(col.triggerEl.dom, 'click');
menu = col.activeMenu;
expect(menu.isVisible()).toBe(true);
expect(col.requiresMenu).toBe(true);
});
});
describe("sorting", function() {
it("should sort by dataIndex when clicking on the header with sortable: true", function() {
makeGrid([{
dataIndex: 'field0',
sortable: true
}]);
clickHeader(colRef[0]);
var sorters = store.getSorters();
expect(sorters.getCount()).toBe(1);
expect(sorters.first().getProperty()).toBe('field0');
expect(sorters.first().getDirection()).toBe('ASC');
});
it("should invert the sort order when clicking on a sorted column", function() {
makeGrid([{
dataIndex: 'field0',
sortable: true
}]);
clickHeader(colRef[0]);
var sorters = store.getSorters();
clickHeader(colRef[0]);
expect(sorters.getCount()).toBe(1);
expect(sorters.first().getProperty()).toBe('field0');
expect(sorters.first().getDirection()).toBe('DESC');
clickHeader(colRef[0]);
expect(sorters.getCount()).toBe(1);
expect(sorters.first().getProperty()).toBe('field0');
expect(sorters.first().getDirection()).toBe('ASC');
});
it("should not sort when configured with sortable false", function() {
makeGrid([{
dataIndex: 'field0',
sortable: false
}]);
clickHeader(colRef[0]);
expect(store.getSorters().getCount()).toBe(0);
});
it("should not sort when the grid is configured with sortableColumns: false", function() {
makeGrid([{
dataIndex: 'field0'
}], {
sortableColumns: false
});
clickHeader(colRef[0]);
expect(store.getSorters().getCount()).toBe(0);
});
});
describe("grouped columns", function() {
var baseCols;
function createGrid(cols, stateful) {
if (grid) {
grid.destroy();
grid = null;
}
makeGrid(cols, {
renderTo: null,
stateful: stateful,
stateId: 'foo'
});
}
function getCol(id) {
return grid.down('#' + id);
}
describe('when stateful', function () {
var col;
beforeEach(function () {
new Ext.state.Provider();
makeGrid([{
itemId: 'main1',
columns: [{
itemId: 'child1'
}, {
itemId: 'child2'
}, {
itemId: 'child3'
}]
}, {
itemId: 'main2',
columns: [{
itemId: 'child4'
}, {
itemId: 'child5'
}, {
itemId: 'child6'
}]
}], {
stateful: true,
stateId: 'foo'
});
});
afterEach(function () {
Ext.state.Manager.getProvider().clear();
col = null;
});
it('should work when toggling visibility on the groups', function () {
// See EXTJS-11661.
col = grid.down('#main2');
col.hide();
// Trigger the bug.
grid.saveState();
col.show();
// Now, select one of the col's children and query its hidden state.
// Really, we can check anything here, b/c if the bug wasn't fixed then
// a TypeError would be thrown in Ext.view.TableLayout#setColumnWidths.
expect(grid.down('#child6').hidden).toBe(false);
});
it('should not show a previously hidden subheader when the visibility of its group header is toggled', function () {
var subheader = grid.down('#child4');
subheader.hide();
col = grid.down('#main2');
col.hide();
col.show();
expect(subheader.hidden).toBe(true);
});
});
describe("column visibility", function() {
var cells;
afterEach(function () {
cells = null;
});
describe("hiding/show during construction", function() {
it("should be able to show a column during construction", function() {
expect(function() {
makeGrid([{
dataIndex: 'field1',
hidden: true,
listeners: {
added: function(c) {
c.show();
}
}
}]);
}).not.toThrow();
expect(grid.getVisibleColumnManager().getColumns()[0]).toBe(colRef[0]);
});
it("should be able to hide a column during construction", function() {
expect(function() {
makeGrid([{
dataIndex: 'field1',
listeners: {
added: function(c) {
c.hide();
}
}
}]);
}).not.toThrow();
expect(grid.getVisibleColumnManager().getColumns().length).toBe(0);
});
});
describe('when groupheader parent is hidden', function () {
describe('hidden at config time', function () {
beforeEach(function () {
makeGrid([{
itemId: 'main1'
}, {
itemId: 'main2',
hidden: true,
columns: [{
itemId: 'child1'
}, {
itemId: 'child2'
}]
}]);
cells = grid.view.body.query('.x-grid-row td');
});
it('should hide child columns at config time if the parent is hidden', function () {
expect(grid.down('#child1').getInherited().hidden).toBe(true);
expect(grid.down('#child2').getInherited().hidden).toBe(true);
// Check the view.
expect(cells.length).toBe(1);
});
it('should not explicitly hide any child columns (they will be hierarchically hidden)', function () {
expect(grid.down('#child1').hidden).toBe(false);
expect(grid.down('#child2').hidden).toBe(false);
// Check the view.
expect(cells.length).toBe(1);
});
});
describe('hidden at run time', function () {
beforeEach(function () {
makeGrid([{
itemId: 'main1'
}, {
itemId: 'main2',
columns: [{
itemId: 'child1'
}, {
itemId: 'child2'
}]
}]);
grid.down('#main2').hide();
cells = grid.view.body.query('.x-grid-row td');
});
it('should hide child columns at runtime if the parent is hidden', function () {
expect(grid.down('#child1').getInherited().hidden).toBe(true);
expect(grid.down('#child2').getInherited().hidden).toBe(true);
// Check the view.
expect(cells.length).toBe(1);
});
it('should not explicitly hide any child columns (they will be hierarchically hidden)', function () {
expect(grid.down('#child1').hidden).toBe(false);
expect(grid.down('#child2').hidden).toBe(false);
// Check the view.
expect(cells.length).toBe(1);
});
});
});
describe('when groupheader parent is shown', function () {
describe('shown at config time', function () {
beforeEach(function () {
makeGrid([{
itemId: 'main1'
}, {
itemId: 'main2',
columns: [{
itemId: 'child1'
}, {
itemId: 'child2'
}]
}]);
cells = grid.view.body.query('.x-grid-row td');
});
it('should not hide child columns at config time if the parent is shown', function () {
expect(grid.down('#child1').getInherited().hidden).not.toBeDefined();
expect(grid.down('#child2').getInherited().hidden).not.toBeDefined();
// Check the view.
expect(cells.length).toBe(3);
});
it('should not explicitly hide any child columns (they will be hierarchically shown)', function () {
expect(grid.down('#child1').hidden).toBe(false);
expect(grid.down('#child2').hidden).toBe(false);
// Check the view.
expect(cells.length).toBe(3);
});
});
describe('shown at run time', function () {
beforeEach(function () {
makeGrid([{
itemId: 'main1'
}, {
itemId: 'main2',
hidden: true,
columns: [{
itemId: 'child1'
}, {
itemId: 'child2'
}]
}]);
grid.down('#main2').show();
cells = grid.view.body.query('.x-grid-row td');
});
it('should show child columns at runtime if the parent is shown', function () {
expect(grid.down('#child1').getInherited().hidden).not.toBeDefined();
expect(grid.down('#child2').getInherited().hidden).not.toBeDefined();
// Check the view.
expect(cells.length).toBe(3);
});
it('should not explicitly hide any child columns (they will be hierarchically shown)', function () {
expect(grid.down('#child1').hidden).toBe(false);
expect(grid.down('#child2').hidden).toBe(false);
// Check the view.
expect(cells.length).toBe(3);
});
});
});
describe("hiding/showing children", function() {
beforeEach(function() {
baseCols = [{
itemId: 'col1',
columns: [{
itemId: 'col11'
}, {
itemId: 'col12'
}, {
itemId: 'col13'
}]
}, {
itemId: 'col2',
columns: [{
itemId: 'col21'
}, {
itemId: 'col22'
}, {
itemId: 'col23'
}]
}];
});
it('should not show a previously hidden subheader when the visibility of its group header is toggled', function () {
var subheader, col;
makeGrid([{
itemId: 'main1'
}, {
itemId: 'main2',
columns: [{
itemId: 'child1'
}, {
itemId: 'child2'
}]
}]);
subheader = grid.down('#child1');
subheader.hide();
col = grid.down('#main2');
col.hide();
col.show();
expect(subheader.hidden).toBe(true);
});
it('should allow any subheader to be reshown when all subheaders are currently hidden', function () {
// There was a bug where a subheader could not be reshown when itself and all of its fellows were curently hidden.
// See EXTJS-18515.
var subheader;
makeGrid([{
itemId: 'main1'
}, {
itemId: 'main2',
columns: [{
itemId: 'child1'
}, {
itemId: 'child2'
}, {
itemId: 'child3'
}]
}]);
grid.down('#child1').hide();
grid.down('#child2').hide();
subheader = grid.down('#child3');
// Toggling would reveal the bug.
subheader.hide();
expect(subheader.hidden).toBe(true);
subheader.show();
expect(subheader.hidden).toBe(false);
});
it('should show the last hidden subheader if all subheaders are currently hidden when the group is reshown', function () {
var groupheader, subheader1, subheader2, subheader3;
makeGrid([{
itemId: 'main1'
}, {
itemId: 'main2',
columns: [{
itemId: 'child1'
}, {
itemId: 'child2'
}, {
itemId: 'child3'
}]
}]);
groupheader = grid.down('#main2');
subheader1 = grid.down('#child1').hide();
subheader3 = grid.down('#child3').hide();
subheader2 = grid.down('#child2')
subheader2.hide();
expect(subheader2.hidden).toBe(true);
groupheader.show();
// The last hidden subheader should now be shown.
expect(subheader2.hidden).toBe(false);
// Let's also demonstrate that the others are still hidden.
expect(subheader1.hidden).toBe(true);
expect(subheader3.hidden).toBe(true);
});
describe("initial configuration", function() {
it("should not hide the parent by default", function() {
createGrid(baseCols);
expect(getCol('col1').hidden).toBe(false);
});
it("should not hide the parent if not all children are hidden", function() {
baseCols[1].columns[2].hidden = baseCols[1].columns[0].hidden = true;
createGrid(baseCols);
expect(getCol('col2').hidden).toBe(false);
});
it("should hide the parent if all children are hidden", function() {
baseCols[1].columns[2].hidden = baseCols[1].columns[1].hidden = baseCols[1].columns[0].hidden = true;
createGrid(baseCols);
expect(getCol('col2').hidden).toBe(true);
});
});
describe("before render", function() {
it("should hide the parent when hiding all children", function() {
createGrid(baseCols);
getCol('col21').hide();
getCol('col22').hide();
getCol('col23').hide();
grid.render(Ext.getBody());
expect(getCol('col2').hidden).toBe(true);
});
it("should show the parent when showing a hidden child", function() {
baseCols[1].columns[2].hidden = baseCols[1].columns[1].hidden = baseCols[1].columns[0].hidden = true;
createGrid(baseCols);
getCol('col22').show();
grid.render(Ext.getBody());
expect(getCol('col2').hidden).toBe(false);
});
});
describe("after render", function() {
it("should hide the parent when hiding all children", function() {
createGrid(baseCols);
grid.render(Ext.getBody());
getCol('col21').hide();
getCol('col22').hide();
getCol('col23').hide();
expect(getCol('col2').hidden).toBe(true);
});
it("should show the parent when showing a hidden child", function() {
baseCols[1].columns[2].hidden = baseCols[1].columns[1].hidden = baseCols[1].columns[0].hidden = true;
createGrid(baseCols);
grid.render(Ext.getBody());
getCol('col22').show();
expect(getCol('col2').hidden).toBe(false);
});
it("should only trigger a single layout when hiding the last leaf in a group", function() {
baseCols[0].columns.splice(1, 2);
createGrid(baseCols);
grid.render(Ext.getBody());
var count = grid.componentLayoutCounter;
getCol('col11').hide();
expect(grid.componentLayoutCounter).toBe(count + 1);
});
it("should only trigger a single refresh when hiding the last leaf in a group", function() {
baseCols[0].columns.splice(1, 2);
createGrid(baseCols);
grid.render(Ext.getBody());
var view = grid.getView(),
count = view.refreshCounter;
getCol('col11').hide();
expect(view.refreshCounter).toBe(count + 1);
});
});
describe('nested stacked columns', function () {
// Test stacked group headers where the only child is the next group header in the hierarchy.
// The last (lowest in the stack) group header will contain multiple child items.
// For example:
//
// +-----------------------------------+
// | col1 |
// |-----------------------------------|
// | col2 |
// other |-----------------------------------| other
// headers | col3 | headers
// |-----------------------------------|
// | col4 |
// |-----------------------------------|
// | Field1 | Field2 | Field3 | Field4 |
// |===================================|
// | view |
// +-----------------------------------+
//
function assertHiddenState(n, hiddenState) {
while (n) {
expect(getCol('col' + n).hidden).toBe(hiddenState);
--n;
}
}
describe('on hide', function () {
beforeEach(function() {
baseCols = [{
itemId: 'col1',
columns: [{
itemId: 'col2',
columns: [{
itemId: 'col3',
columns: [{
itemId: 'col4',
columns: [{
itemId: 'col41'
}, {
itemId: 'col42'
}, {
itemId: 'col43'
}, {
itemId: 'col44'
}]
}]
}]
}]
}, {
itemId: 'col5'
}]
});
it('should hide every group header above the target group header', function () {
createGrid(baseCols);
getCol('col4').hide();
assertHiddenState(4, true);
tearDown();
setup();
createGrid(baseCols);
getCol('col3').hide();
assertHiddenState(3, true);
tearDown();
setup();
createGrid(baseCols);
getCol('col2').hide();
assertHiddenState(2, true);
});
it('should reshow every group header above the target group header when toggled', function () {
createGrid(baseCols);
getCol('col4').hide();
assertHiddenState(4, true);
getCol('col4').show();
assertHiddenState(4, false);
tearDown();
setup();
createGrid(baseCols);
getCol('col3').hide();
assertHiddenState(3, true);
getCol('col3').show();
assertHiddenState(3, false);
tearDown();
setup();
createGrid(baseCols);
getCol('col2').hide();
assertHiddenState(2, true);
getCol('col2').show();
assertHiddenState(2, false);
});
describe('subheaders', function () {
it('should hide all ancestor group headers when hiding all subheaders in lowest group header', function () {
createGrid(baseCols);
getCol('col41').hide();
getCol('col42').hide();
getCol('col43').hide();
getCol('col44').hide();
assertHiddenState(4, true);
});
});
});
describe('on show', function () {
beforeEach(function() {
baseCols = [{
itemId: 'col1',
hidden: true,
columns: [{
itemId: 'col2',
hidden: true,
columns: [{
itemId: 'col3',
hidden: true,
columns: [{
itemId: 'col4',
hidden: true,
columns: [{
itemId: 'col41'
}, {
itemId: 'col42'
}, {
itemId: 'col43'
}, {
itemId: 'col44'
}]
}]
}]
}]
}, {
itemId: 'col5'
}]
});
it('should show every group header above the target group header', function () {
// Here we're showing that a header that is explicitly shown will have every header
// above it shown as well.
createGrid(baseCols);
getCol('col4').show();
assertHiddenState(4, false);
tearDown();
setup();
createGrid(baseCols);
getCol('col3').show();
assertHiddenState(3, false);
tearDown();
setup();
createGrid(baseCols);
getCol('col2').show();
assertHiddenState(2, false);
});
it('should show every group header in the chain no matter which group header is checked', function () {
// Here we're showing that a header that is explicitly shown will have every header
// in the chain shown, no matter which group header was clicked.
//
// Group headers are special in that they are auto-hidden when their subheaders are all
// hidden and auto-shown when the first subheader is reshown. They are the only headers
// that should now be auto-shown or -hidden.
//
// It follows that since group headers are dictated by some automation depending upon the
// state of their child items that all group headers should be shown if anyone in the
// hierarchy is shown since these special group headers only contain one child, which is
// the next group header in the stack.
createGrid(baseCols);
getCol('col4').show();
assertHiddenState(4, false);
tearDown();
setup();
createGrid(baseCols);
getCol('col3').show();
assertHiddenState(4, false);
tearDown();
setup();
createGrid(baseCols);
getCol('col2').show();
assertHiddenState(4, false);
tearDown();
setup();
createGrid(baseCols);
getCol('col1').show();
assertHiddenState(4, false);
});
it('should rehide every group header above the target group header when toggled', function () {
createGrid(baseCols);
getCol('col4').show();
assertHiddenState(4, false);
getCol('col4').hide();
assertHiddenState(4, true);
tearDown();
setup();
createGrid(baseCols);
getCol('col3').show();
assertHiddenState(3, false);
getCol('col3').hide();
assertHiddenState(3, true);
tearDown();
setup();
createGrid(baseCols);
getCol('col2').show();
assertHiddenState(2, false);
getCol('col2').hide();
assertHiddenState(2, true);
});
describe('subheaders', function () {
it('should not show any ancestor group headers when hiding all subheaders in lowest group header', function () {
createGrid(baseCols);
getCol('col41').hide();
getCol('col42').hide();
getCol('col43').hide();
getCol('col44').hide();
assertHiddenState(4, true);
});
it('should show all ancestor group headers when hiding all subheaders in lowest group header and then showing one', function () {
createGrid(baseCols);
getCol('col41').hide();
getCol('col42').hide();
getCol('col43').hide();
getCol('col44').hide();
assertHiddenState(4, true);
getCol('col42').show();
assertHiddenState(4, false);
});
it('should remember which subheader was last checked and restore its state when its group header is rechecked', function () {
var col, subheader, headerCt;
// Let's hide the 3rd menu item.
makeGrid(baseCols);
col = getCol('col4');
subheader = getCol('col43');
headerCt = grid.headerCt;
getCol('col41').hide();
getCol('col42').hide();
getCol('col44').hide();
subheader.hide();
expect(col.hidden).toBe(true);
// Get the menu item.
headerCt.getMenuItemForHeader(headerCt.menu, col).setChecked(true);
expect(subheader.hidden).toBe(false);
// Now let's hide the 2nd menu item.
tearDown();
setup();
makeGrid(baseCols);
col = getCol('col4');
subheader = getCol('col42');
headerCt = grid.headerCt;
getCol('col41').hide();
getCol('col43').hide();
getCol('col44').hide();
subheader.hide();
expect(col.hidden).toBe(true);
// Get the menu item.
headerCt.getMenuItemForHeader(headerCt.menu, col).setChecked(true);
expect(subheader.hidden).toBe(false);
});
it('should only show visible subheaders when all group headers are shown', function () {
var col;
createGrid(baseCols);
col = getCol('col4');
// All subheaders are visible.
col.show();
expect(col.visibleColumnManager.getColumns().length).toBe(4);
// Hide the group header and hide two subheaders.
col.hide();
getCol('col42').hide();
getCol('col43').hide();
// Only two subheaders should now be visible.
col.show();
expect(col.visibleColumnManager.getColumns().length).toBe(2);
});
});
});
});
});
describe("adding/removing children", function() {
beforeEach(function() {
baseCols = [{
itemId: 'col1',
columns: [{
itemId: 'col11'
}, {
itemId: 'col12'
}, {
itemId: 'col13'
}]
}, {
itemId: 'col2',
columns: [{
itemId: 'col21'
}, {
itemId: 'col22'
}, {
itemId: 'col23'
}]
}];
});
describe("before render", function() {
it("should hide the parent if removing the last hidden item", function() {
baseCols[0].columns[0].hidden = baseCols[0].columns[1].hidden = true;
createGrid(baseCols);
getCol('col13').destroy();
grid.render(Ext.getBody());
expect(getCol('col1').hidden).toBe(true);
});
it("should show the parent if adding a visible item and all items are hidden", function() {
baseCols[0].columns[0].hidden = baseCols[0].columns[1].hidden = baseCols[0].columns[2].hidden = true;
createGrid(baseCols);
getCol('col1').add({
itemId: 'col14'
});
grid.render(Ext.getBody());
expect(getCol('col1').hidden).toBe(false);
});
});
describe("after render", function() {
it("should hide the parent if removing the last hidden item", function() {
baseCols[0].columns[0].hidden = baseCols[0].columns[1].hidden = true;
createGrid(baseCols);
grid.render(Ext.getBody());
getCol('col13').destroy();
expect(getCol('col1').hidden).toBe(true);
});
it("should show the parent if adding a visible item and all items are hidden", function() {
baseCols[0].columns[0].hidden = baseCols[0].columns[1].hidden = baseCols[0].columns[2].hidden = true;
createGrid(baseCols);
grid.render(Ext.getBody());
getCol('col1').add({
itemId: 'col14'
});
expect(getCol('col1').hidden).toBe(false);
});
});
});
});
describe("removing columns from group", function() {
beforeEach(function() {
baseCols = [{
itemId: 'col1',
columns: [{
itemId: 'col11'
}, {
itemId: 'col12'
}, {
itemId: 'col13'
}]
}, {
itemId: 'col2',
columns: [{
itemId: 'col21'
}, {
itemId: 'col22'
}, {
itemId: 'col23'
}]
}];
createGrid(baseCols);
});
describe("before render", function() {
it("should destroy the group header when removing all columns", function() {
var headerCt = grid.headerCt,
col2 = getCol('col2');
expect(headerCt.items.indexOf(col2)).toBe(1);
getCol('col21').destroy();
getCol('col22').destroy();
getCol('col23').destroy();
expect(col2.destroyed).toBe(true);
expect(headerCt.items.indexOf(col2)).toBe(-1);
});
});
describe("after render", function() {
it("should destroy the group header when removing all columns", function() {
createGrid(baseCols);
grid.render(Ext.getBody());
var headerCt = grid.headerCt,
col2 = getCol('col2');
expect(headerCt.items.indexOf(col2)).toBe(1);
getCol('col21').destroy();
getCol('col22').destroy();
getCol('col23').destroy();
expect(col2.destroyed).toBe(true);
expect(headerCt.items.indexOf(col2)).toBe(-1);
});
});
});
});
describe("column operations & the view", function() {
describe('', function () {
beforeEach(function() {
makeGrid();
});
it("should update the view when adding a new header", function() {
grid.headerCt.insert(0, {
dataIndex: 'field4'
});
expect(getCellText(0, 0)).toBe('val5');
});
it("should update the view when moving an existing header", function() {
grid.headerCt.insert(0, colRef[1]);
expect(getCellText(0, 0)).toBe('val2');
});
it("should update the view when removing a header", function() {
grid.headerCt.remove(1);
expect(getCellText(0, 1)).toBe('val3');
});
it("should not refresh the view when doing a drag/drop move", function() {
var called = false,
header;
grid.getView().on('refresh', function() {
called = true;
});
// Simulate a DD here
header = colRef[0];
grid.headerCt.move(0, 3);
expect(getCellText(0, 3)).toBe('val1');
expect(called).toBe(false);
});
});
describe('toggling column visibility', function () {
var refreshCounter;
beforeEach(function () {
makeGrid();
refreshCounter = view.refreshCounter;
});
afterEach(function () {
refreshCounter = null;
});
describe('hiding', function () {
it('should update the view', function () {
colRef[0].hide();
expect(view.refreshCounter).toBe(refreshCounter + 1);
});
});
describe('showing', function () {
it('should update the view', function () {
colRef[0].hide();
refreshCounter = view.refreshCounter;
colRef[0].show();
expect(view.refreshCounter).toBe(refreshCounter + 1);
});
});
});
});
describe("locked/normal grid visibility", function() {
function expectVisible(locked, normal) {
expect(grid.lockedGrid.isVisible()).toBe(locked);
expect(grid.normalGrid.isVisible()).toBe(normal);
}
var failCount;
beforeEach(function() {
failCount = Ext.failedLayouts;
});
afterEach(function() {
expect(failCount).toBe(Ext.failedLayouts);
failCount = null;
});
describe("initial", function() {
it("should have both sides visible", function() {
makeGrid([{locked: true}, {}], {
syncTaskDelay: 0
});
expectVisible(true, true);
});
it("should have only the normal side visible if there are no locked columns", function() {
makeGrid([{}, {}], {
enableLocking: true,
syncTaskDelay: 0
});
expectVisible(false, true);
});
it("should have only the locked side visible if there are no normal columns", function() {
makeGrid([{locked: true}, {locked: true}], {
syncTaskDelay: 0
});
expectVisible(true, false);
});
});
describe("dynamic", function() {
beforeEach(function() {
makeGrid([{
locked: true,
itemId: 'col0'
}, {
locked: true,
itemId: 'col1'
}, {
itemId: 'col2'
}, {
itemId: 'col3'
}], {
syncTaskDelay: 0
});
});
describe("normal side", function() {
it("should not hide when removing a column but there are other normal columns", function() {
grid.normalGrid.headerCt.remove('col2');
expectVisible(true, true);
});
it("should hide when removing the last normal column", function() {
grid.normalGrid.headerCt.remove('col2');
grid.normalGrid.headerCt.remove('col3');
expectVisible(true, false);
});
it("should not hide when hiding a column but there are other visible normal columns", function() {
colRef[2].hide();
expectVisible(true, true);
});
it("should hide when hiding the last normal column", function() {
colRef[2].hide();
colRef[3].hide();
expectVisible(true, false);
});
});
describe("locked side", function() {
it("should not hide when removing a column but there are other locked columns", function() {
grid.lockedGrid.headerCt.remove('col0');
expectVisible(true, true);
});
it("should hide when removing the last locked column", function() {
grid.lockedGrid.headerCt.remove('col0');
grid.lockedGrid.headerCt.remove('col1');
expectVisible(false, true);
});
it("should not hide when hiding a column but there are other visible locked columns", function() {
colRef[0].hide();
expectVisible(true, true);
});
it("should hide when hiding the last locked column", function() {
colRef[0].hide();
colRef[1].hide();
expectVisible(false, true);
});
});
});
});
describe("rendering", function() {
beforeEach(function() {
makeGrid();
});
describe("first/last", function() {
it("should stamp x-grid-cell-first on the first column cell", function() {
var cls = grid.getView().firstCls;
expect(hasCls(getCell(0, 0), cls)).toBe(true);
expect(hasCls(getCell(0, 1), cls)).toBe(false);
expect(hasCls(getCell(0, 2), cls)).toBe(false);
expect(hasCls(getCell(0, 3), cls)).toBe(false);
});
it("should stamp x-grid-cell-last on the last column cell", function() {
var cls = grid.getView().lastCls;
expect(hasCls(getCell(0, 0), cls)).toBe(false);
expect(hasCls(getCell(0, 1), cls)).toBe(false);
expect(hasCls(getCell(0, 2), cls)).toBe(false);
expect(hasCls(getCell(0, 3), cls)).toBe(true);
});
it("should update the first class when moving the first column", function() {
grid.headerCt.insert(0, colRef[1]);
var cell = getCell(0, 0),
view = grid.getView(),
cls = view.firstCls;
expect(getCellText(0, 0)).toBe('val2');
expect(hasCls(cell, cls)).toBe(true);
expect(hasCls(getCell(0, 1), cls)).toBe(false);
});
it("should update the last class when moving the last column", function() {
// Suppress console warning about reusing existing id
spyOn(Ext.log, 'warn');
grid.headerCt.add(colRef[1]);
var cell = getCell(0, 3),
view = grid.getView(),
cls = view.lastCls;
expect(getCellText(0, 3)).toBe('val2');
expect(hasCls(cell, cls)).toBe(true);
expect(hasCls(getCell(0, 2), cls)).toBe(false);
});
});
describe("id", function() {
it("should stamp the id of the column in the cell", function() {
expect(hasCls(getCell(0, 0), 'x-grid-cell-col0')).toBe(true);
expect(hasCls(getCell(0, 1), 'x-grid-cell-col1')).toBe(true);
expect(hasCls(getCell(0, 2), 'x-grid-cell-col2')).toBe(true);
expect(hasCls(getCell(0, 3), 'x-grid-cell-col3')).toBe(true);
});
});
});
describe("hiddenHeaders", function() {
it("should lay out the hidden items so cells obtain correct width", function() {
makeGrid([{
width: 100
}, {
flex: 1
}, {
width: 200
}], {
hiddenHeaders: true
});
expect(getCell(0, 0).getWidth()).toBe(100);
expect(getCell(0, 1).getWidth()).toBe(totalWidth - 200 - 100);
expect(getCell(0, 2).getWidth()).toBe(200);
});
it("should lay out grouped column headers", function() {
makeGrid([{
width: 100
}, {
columns: [{
width: 200
}, {
width: 400
}, {
width: 100
}]
}, {
width: 200
}], {
hiddenHeaders: true
});
expect(getCell(0, 0).getWidth()).toBe(100);
expect(getCell(0, 1).getWidth()).toBe(200);
expect(getCell(0, 2).getWidth()).toBe(400);
expect(getCell(0, 3).getWidth()).toBe(100);
expect(getCell(0, 4).getWidth()).toBe(200);
});
});
describe("emptyCellText config", function () {
function expectEmptyText(column, rowIdx, colIdx) {
var cell = getCellInner(rowIdx, colIdx),
el = document.createElement('div');
// We're doing this because ' ' !== ' '. By letting the browser decode the entity, we
// can then do a comparison.
el.innerHTML = column.emptyCellText;
expect(cell.textContent || cell.innerText).toBe(el.textContent || el.innerText);
}
describe("rendering", function() {
beforeEach(function () {
makeGrid([{
width: 100
}, {
emptyCellText: 'derp',
width: 200
}]);
});
it("should use the default html entity for when there is no emptyCellText given", function () {
expectEmptyText(colRef[0], 0, 0);
});
it("should use the value of emptyCellText when configured", function () {
expectEmptyText(colRef[1], 0, 1);
});
});
describe("column update", function() {
describe("full row update", function() {
it("should use the empty text on update", function() {
makeGrid([{
width: 100,
dataIndex: 'field0',
renderer: function(v, meta, rec) {
return v;
}
}]);
// Renderer with >1 arg requires a full row redraw
store.getAt(0).set('field0', '');
expectEmptyText(colRef[0], 0, 0);
});
});
describe("cell update only", function() {
describe("producesHTML: true", function() {
it("should use the empty text on update", function() {
makeGrid([{
width: 100,
producesHTML: true,
dataIndex: 'field0'
}]);
store.getAt(0).set('field0', '');
expectEmptyText(colRef[0], 0, 0);
});
it("should use the empty text on update with a simple renderer", function() {
makeGrid([{
width: 100,
producesHTML: true,
dataIndex: 'field0',
renderer: Ext.identityFn
}]);
store.getAt(0).set('field0', '');
expectEmptyText(colRef[0], 0, 0);
});
});
describe("producesHTML: false", function() {
it("should use the empty text on update", function() {
makeGrid([{
width: 100,
producesHTML: false,
dataIndex: 'field0'
}]);
store.getAt(0).set('field0', '');
expectEmptyText(colRef[0], 0, 0);
});
it("should use the empty text on update with a simple renderer", function() {
makeGrid([{
width: 100,
producesHTML: false,
dataIndex: 'field0',
renderer: Ext.identityFn
}]);
store.getAt(0).set('field0', '');
expectEmptyText(colRef[0], 0, 0);
});
});
});
});
});
describe("non-column items in the header", function() {
it("should show non-columns as children", function() {
makeGrid([{
width: 100,
items: {
xtype: 'textfield',
itemId: 'foo'
}
}]);
expect(grid.down('#foo').isVisible(true)).toBe(true);
});
it("should have the hidden item as visible after showing an initially hidden column", function() {
makeGrid([{
width: 100,
items: {
xtype: 'textfield'
}
}, {
width: 100,
hidden: true,
items: {
xtype: 'textfield',
itemId: 'foo'
}
}]);
var field = grid.down('#foo');
expect(field.isVisible(true)).toBe(false);
field.ownerCt.show();
expect(field.isVisible(true)).toBe(true);
});
});
describe("reconfiguring", function() {
it("should destroy any old columns", function() {
var o = {};
makeGrid(4);
Ext.Array.forEach(colRef, function(col) {
col.on('destroy', function(c) {
o[col.getItemId()] = true;
});
});
grid.reconfigure(null, []);
expect(o).toEqual({
col0: true,
col1: true,
col2: true,
col3: true
});
});
describe("with locking", function() {
it("should resize the locked part to match the grid size", function() {
makeGrid(4, null, null, function(i) {
return i === 0;
});
var borderWidth = grid.lockedGrid.el.getBorderWidth('lr');
// Default column width
expect(grid.lockedGrid.getWidth()).toBe(100 + borderWidth);
grid.reconfigure(null, [{
locked: true,
width: 120
}, {
locked: true,
width: 170
}, {}, {}])
expect(grid.lockedGrid.getWidth()).toBe(120 + 170 + borderWidth);
});
});
});
describe('column header borders', function() {
it('should show header borders by default, and turn them off dynamically', function() {
makeGrid();
expect(colRef[0].el.getBorderWidth('r')).toBe(1);
expect(colRef[1].el.getBorderWidth('r')).toBe(1);
expect(colRef[2].el.getBorderWidth('r')).toBe(1);
grid.setHeaderBorders(false);
expect(colRef[0].el.getBorderWidth('r')).toBe(0);
expect(colRef[1].el.getBorderWidth('r')).toBe(0);
expect(colRef[2].el.getBorderWidth('r')).toBe(0);
});
it('should have no borders if configured false, and should show them dynamically', function() {
makeGrid(null, {
headerBorders: false
});
expect(colRef[0].el.getBorderWidth('r')).toBe(0);
expect(colRef[1].el.getBorderWidth('r')).toBe(0);
expect(colRef[2].el.getBorderWidth('r')).toBe(0);
grid.setHeaderBorders(true);
expect(colRef[0].el.getBorderWidth('r')).toBe(1);
expect(colRef[1].el.getBorderWidth('r')).toBe(1);
expect(colRef[2].el.getBorderWidth('r')).toBe(1);
});
});
describe('column resize', function() {
it('should not fire drag events on headercontainer during resize', function() {
makeGrid();
var colWidth = colRef[0].getWidth(),
dragSpy = spyOnEvent(grid.headerCt.el, 'drag');
resizeColumn(colRef[0], 10);
expect(colRef[0].getWidth()).toBe(colWidth + 10);
expect(dragSpy).not.toHaveBeenCalled();
});
});
});
}
createSuite(false);
createSuite(true);
});<|fim▁end|> | }]); |
<|file_name|>ast.rs<|end_file_name|><|fim▁begin|>use std::cell::Cell;
use std::fmt;
use std::vec::Vec;
pub type Var = String;
pub type Atom = String;
pub enum TopLevel {
Fact(Term),
Query(Term)
}
#[derive(Clone, Copy)]
pub enum Level {
Shallow, Deep
}
impl fmt::Display for Level {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&Level::Shallow => write!(f, "A"),
&Level::Deep => write!(f, "X")
}
}
}
#[derive(Clone, Copy)]
pub enum Reg {
ArgAndNorm(usize, usize),
Norm(usize)
}
impl Reg {
pub fn has_arg(&self) -> bool {
match self {
&Reg::ArgAndNorm(_, _) => true,
_ => false
}
}
pub fn norm(&self) -> usize {
match self {
&Reg::ArgAndNorm(_, norm) | &Reg::Norm(norm) => norm
}
}
}
pub enum Term {
Atom(Cell<usize>, Atom),
Clause(Cell<usize>, Atom, Vec<Box<Term>>),
Var(Cell<Reg>, Var)
}<|fim▁hole|> Var(Level, &'a Cell<Reg>, &'a Var)
}
#[derive(Clone)]
pub enum FactInstruction {
GetStructure(Level, Atom, usize, usize),
GetValue(usize, usize),
GetVariable(usize, usize),
Proceed,
UnifyVariable(usize),
UnifyValue(usize)
}
pub enum QueryInstruction {
Call(Atom, usize),
PutStructure(Level, Atom, usize, usize),
PutValue(usize, usize),
PutVariable(usize, usize),
SetVariable(usize),
SetValue(usize),
}
pub type CompiledFact = Vec<FactInstruction>;
pub type CompiledQuery = Vec<QueryInstruction>;
#[derive(Clone, Copy, PartialEq)]
pub enum Addr {
HeapCell(usize),
RegNum(usize)
}
#[derive(Clone)]
pub enum HeapCellValue {
NamedStr(usize, Atom),
Ref(usize),
Str(usize),
}
pub type Heap = Vec<HeapCellValue>;
pub type Registers = Vec<HeapCellValue>;
impl Term {
pub fn subterms(&self) -> usize {
match self {
&Term::Clause(_, _, ref terms) => terms.len(),
_ => 1
}
}
pub fn name(&self) -> &Atom {
match self {
&Term::Atom(_, ref atom)
| &Term::Var(_, ref atom)
| &Term::Clause(_, ref atom, _) => atom
}
}
pub fn arity(&self) -> usize {
match self {
&Term::Atom(_, _) | &Term::Var(_, _) => 0,
&Term::Clause(_, _, ref child_terms) => child_terms.len()
}
}
}<|fim▁end|> |
pub enum TermRef<'a> {
Atom(Level, &'a Cell<usize>, &'a Atom),
Clause(Level, &'a Cell<usize>, &'a Atom, &'a Vec<Box<Term>>), |
<|file_name|>RoutingResult.java<|end_file_name|><|fim▁begin|>package org.spincast.core.routing;
import java.util.List;
import org.spincast.core.exchange.RequestContext;
/**<|fim▁hole|> * The result of the router, when asked to find matches for
* a request.
*/
public interface RoutingResult<R extends RequestContext<?>> {
/**
* The handlers matching the route (a main handler + filters, if any),
* in order they have to be called.
*/
public List<RouteHandlerMatch<R>> getRouteHandlerMatches();
/**
* The main route handler and its information, from the routing result.
*/
public RouteHandlerMatch<R> getMainRouteHandlerMatch();
}<|fim▁end|> | |
<|file_name|>if-without-else-result.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your<|fim▁hole|> let a = if true { true };
//~^ ERROR if may be missing an else clause: expected `()`, found `bool` (expected (), found bool)
println!("{}", a);
}<|fim▁end|> | // option. This file may not be copied, modified, or distributed
// except according to those terms.
fn main() { |
<|file_name|>time-frame.controller.js<|end_file_name|><|fim▁begin|>(function() {
'use strict';
angular
.module('sentryApp')
.controller('TimeFrameController', TimeFrameController);
TimeFrameController.$inject = ['$scope', '$state', 'TimeFrame', 'ParseLinks', 'AlertService', 'paginationConstants', 'pagingParams'];
function TimeFrameController ($scope, $state, TimeFrame, ParseLinks, AlertService, paginationConstants, pagingParams) {
var vm = this;
vm.loadPage = loadPage;
vm.predicate = pagingParams.predicate;
vm.reverse = pagingParams.ascending;
vm.transition = transition;
vm.itemsPerPage = paginationConstants.itemsPerPage;
loadAll();
function loadAll () {
TimeFrame.query({
page: pagingParams.page - 1,
size: vm.itemsPerPage,
sort: sort()
}, onSuccess, onError);
function sort() {
var result = [vm.predicate + ',' + (vm.reverse ? 'asc' : 'desc')];
if (vm.predicate !== 'id') {
result.push('id');<|fim▁hole|> function onSuccess(data, headers) {
vm.links = ParseLinks.parse(headers('link'));
vm.totalItems = headers('X-Total-Count');
vm.queryCount = vm.totalItems;
vm.timeFrames = data;
vm.page = pagingParams.page;
}
function onError(error) {
AlertService.error(error.data.message);
}
}
function loadPage(page) {
vm.page = page;
vm.transition();
}
function transition() {
$state.transitionTo($state.$current, {
page: vm.page,
sort: vm.predicate + ',' + (vm.reverse ? 'asc' : 'desc'),
search: vm.currentSearch
});
}
}
})();<|fim▁end|> | }
return result;
} |
<|file_name|>CanvasD2D.cpp<|end_file_name|><|fim▁begin|>/* Copyright (C) 2013 Rainmeter Project Developers
*
* This Source Code Form is subject to the terms of the GNU General Public
* License; either version 2 of the License, or (at your option) any later
* version. If a copy of the GPL was not distributed with this file, You can
* obtain one at <https://www.gnu.org/licenses/gpl-2.0.html>. */
#include "StdAfx.h"
#include "CanvasD2D.h"
#include "TextFormatD2D.h"
#include "Util/DWriteFontCollectionLoader.h"
#include "Util/DWriteHelpers.h"
#include "Util/WICBitmapLockGDIP.h"
#include "../../Library/Util.h"
namespace {
D2D1_COLOR_F ToColorF(const Gdiplus::Color& color)
{
return D2D1::ColorF(color.GetR() / 255.0f, color.GetG() / 255.0f, color.GetB() / 255.0f, color.GetA() / 255.0f);
}
D2D1_RECT_F ToRectF(const Gdiplus::Rect& rect)
{
return D2D1::RectF((FLOAT)rect.X, (FLOAT)rect.Y, (FLOAT)(rect.X + rect.Width), (FLOAT)(rect.Y + rect.Height));
}
D2D1_RECT_F ToRectF(const Gdiplus::RectF& rect)
{
return D2D1::RectF(rect.X, rect.Y, rect.X + rect.Width, rect.Y + rect.Height);
}
} // namespace
namespace Gfx {
UINT CanvasD2D::c_Instances = 0;
Microsoft::WRL::ComPtr<ID2D1Factory1> CanvasD2D::c_D2DFactory;
Microsoft::WRL::ComPtr<IDWriteFactory1> CanvasD2D::c_DWFactory;
Microsoft::WRL::ComPtr<IDWriteGdiInterop> CanvasD2D::c_DWGDIInterop;
Microsoft::WRL::ComPtr<IWICImagingFactory> CanvasD2D::c_WICFactory;
CanvasD2D::CanvasD2D() : Canvas(),
m_Bitmap(),
m_TextAntiAliasing(false),
m_CanUseAxisAlignClip(false)
{
}
CanvasD2D::~CanvasD2D()
{
Finalize();
}
bool CanvasD2D::Initialize()
{
++c_Instances;
if (c_Instances == 1)
{
if (!IsWindows7OrGreater()) return false;
D2D1_FACTORY_OPTIONS fo = {};
#ifdef _DEBUG
fo.debugLevel = D2D1_DEBUG_LEVEL_INFORMATION;
#endif
HRESULT hr = D2D1CreateFactory(
D2D1_FACTORY_TYPE_SINGLE_THREADED,
fo,
c_D2DFactory.GetAddressOf());
if (FAILED(hr)) return false;
hr = CoCreateInstance(
CLSID_WICImagingFactory,
nullptr,
CLSCTX_INPROC_SERVER,
IID_IWICImagingFactory,
(LPVOID*)c_WICFactory.GetAddressOf());
if (FAILED(hr)) return false;
hr = DWriteCreateFactory(
DWRITE_FACTORY_TYPE_SHARED,
__uuidof(c_DWFactory),
(IUnknown**)c_DWFactory.GetAddressOf());
if (FAILED(hr)) return false;
hr = c_DWFactory->GetGdiInterop(c_DWGDIInterop.GetAddressOf());
if (FAILED(hr)) return false;
hr = c_DWFactory->RegisterFontCollectionLoader(Util::DWriteFontCollectionLoader::GetInstance());
if (FAILED(hr)) return false;
}
return true;
}
void CanvasD2D::Finalize()
{
--c_Instances;
if (c_Instances == 0)
{
c_D2DFactory.Reset();
c_WICFactory.Reset();
c_DWGDIInterop.Reset();
if (c_DWFactory)
{
c_DWFactory->UnregisterFontCollectionLoader(Util::DWriteFontCollectionLoader::GetInstance());
c_DWFactory.Reset();
}
}
}
void CanvasD2D::Resize(int w, int h)
{
__super::Resize(w, h);
m_Target.Reset();
m_Bitmap.Resize(w, h);
m_GdipBitmap.reset(new Gdiplus::Bitmap(w, h, w * 4, PixelFormat32bppPARGB, m_Bitmap.GetData()));
m_GdipGraphics.reset(new Gdiplus::Graphics(m_GdipBitmap.get()));
}
bool CanvasD2D::BeginDraw()
{
return true;
}
void CanvasD2D::EndDraw()
{
EndTargetDraw();
}
bool CanvasD2D::BeginTargetDraw()
{
if (m_Target) return true;
const D2D1_PIXEL_FORMAT format = D2D1::PixelFormat(
DXGI_FORMAT_B8G8R8A8_UNORM,
D2D1_ALPHA_MODE_PREMULTIPLIED);
const D2D1_RENDER_TARGET_PROPERTIES properties = D2D1::RenderTargetProperties(
D2D1_RENDER_TARGET_TYPE_DEFAULT,
format,
0.0f, // Default DPI
0.0f, // Default DPI
D2D1_RENDER_TARGET_USAGE_GDI_COMPATIBLE);
// A new Direct2D render target must be created for each sequence of Direct2D draw operations
// since we use GDI+ to render to the same pixel data. Without creating a new render target
// each time, it has been found that Direct2D may overwrite the draws by GDI+ since it is
// unaware of the changes made by GDI+. By creating a new render target and then releasing it
// before the next GDI+ draw operations, we ensure that the pixel data result is as expected
// Once GDI+ drawing is no longer needed, we change to recreate the render target only when the
// bitmap size is changed.
HRESULT hr = c_D2DFactory->CreateWicBitmapRenderTarget(&m_Bitmap, properties, &m_Target);
if (SUCCEEDED(hr))
{
SetTextAntiAliasing(m_TextAntiAliasing);
m_Target->BeginDraw();
// Apply any transforms that occurred before creation of |m_Target|.
UpdateTargetTransform();
return true;
}
return false;
}
void CanvasD2D::EndTargetDraw()
{
if (m_Target)
{
m_Target->EndDraw();
m_Target.Reset();
}
}
Gdiplus::Graphics& CanvasD2D::BeginGdiplusContext()
{
EndTargetDraw();
return *m_GdipGraphics;
}
void CanvasD2D::EndGdiplusContext()
{
}
HDC CanvasD2D::GetDC()
{
EndTargetDraw();
HDC dcMemory = CreateCompatibleDC(nullptr);
SelectObject(dcMemory, m_Bitmap.GetHandle());
return dcMemory;
}
void CanvasD2D::ReleaseDC(HDC dc)
{
DeleteDC(dc);
}
bool CanvasD2D::IsTransparentPixel(int x, int y)
{
if (!(x >= 0 && y >= 0 && x < m_W && y < m_H)) return false;
bool transparent = true;
DWORD* data = (DWORD*)m_Bitmap.GetData();
if (data)
{
DWORD pixel = data[y * m_W + x]; // Top-down DIB.
transparent = (pixel & 0xFF000000) != 0;
}
return transparent;
}
void CanvasD2D::UpdateTargetTransform()
{
Gdiplus::Matrix gdipMatrix;
m_GdipGraphics->GetTransform(&gdipMatrix);
D2D1_MATRIX_3X2_F d2dMatrix;
gdipMatrix.GetElements((Gdiplus::REAL*)&d2dMatrix);
m_Target->SetTransform(d2dMatrix);
m_CanUseAxisAlignClip =
d2dMatrix._12 == 0.0f && d2dMatrix._21 == 0.0f &&
d2dMatrix._31 == 0.0f && d2dMatrix._32 == 0.0f;
}
void CanvasD2D::SetTransform(const Gdiplus::Matrix& matrix)
{
m_GdipGraphics->SetTransform(&matrix);
if (m_Target)
{
UpdateTargetTransform();
}
}
void CanvasD2D::ResetTransform()
{
m_GdipGraphics->ResetTransform();
if (m_Target)
{
m_Target->SetTransform(D2D1::Matrix3x2F::Identity());
}
}
void CanvasD2D::RotateTransform(float angle, float x, float y, float dx, float dy)
{
m_GdipGraphics->TranslateTransform(x, y);
m_GdipGraphics->RotateTransform(angle);
m_GdipGraphics->TranslateTransform(dx, dy);
if (m_Target)
{
UpdateTargetTransform();
}
}
void CanvasD2D::SetAntiAliasing(bool enable)
{
// TODO: Set m_Target aliasing?
m_GdipGraphics->SetSmoothingMode(
enable ? Gdiplus::SmoothingModeHighQuality : Gdiplus::SmoothingModeNone);
m_GdipGraphics->SetPixelOffsetMode(
enable ? Gdiplus::PixelOffsetModeHighQuality : Gdiplus::PixelOffsetModeDefault);
}
void CanvasD2D::SetTextAntiAliasing(bool enable)
{
// TODO: Add support for D2D1_TEXT_ANTIALIAS_MODE_CLEARTYPE?
m_TextAntiAliasing = enable;
if (m_Target)
{
m_Target->SetTextAntialiasMode(
m_TextAntiAliasing ? D2D1_TEXT_ANTIALIAS_MODE_GRAYSCALE : D2D1_TEXT_ANTIALIAS_MODE_ALIASED);
}
}
void CanvasD2D::Clear(const Gdiplus::Color& color)
{
if (!m_Target) // Use GDI+ if D2D render target has not been created.
{
m_GdipGraphics->Clear(color);
return;
}
m_Target->Clear(ToColorF(color));
}
void CanvasD2D::DrawTextW(const WCHAR* str, UINT strLen, const TextFormat& format, Gdiplus::RectF& rect,
const Gdiplus::SolidBrush& brush, bool applyInlineFormatting)
{
if (!BeginTargetDraw()) return;
Gdiplus::Color color;
brush.GetColor(&color);
Microsoft::WRL::ComPtr<ID2D1SolidColorBrush> solidBrush;
HRESULT hr = m_Target->CreateSolidColorBrush(ToColorF(color), solidBrush.GetAddressOf());
if (FAILED(hr)) return;
TextFormatD2D& formatD2D = (TextFormatD2D&)format;
if (!formatD2D.CreateLayout(
m_Target.Get(), str, strLen, rect.Width, rect.Height, !m_AccurateText && m_TextAntiAliasing)) return;
D2D1_POINT_2F drawPosition;
drawPosition.x = [&]()
{
if (!m_AccurateText)
{
const float xOffset = formatD2D.m_TextFormat->GetFontSize() / 6.0f;
switch (formatD2D.GetHorizontalAlignment())
{
case HorizontalAlignment::Left: return rect.X + xOffset;
case HorizontalAlignment::Right: return rect.X - xOffset;
}
}
return rect.X;
} ();
drawPosition.y = [&]()
{
// GDI+ compatibility.
float yPos = rect.Y - formatD2D.m_LineGap;
switch (formatD2D.GetVerticalAlignment())
{
case VerticalAlignment::Bottom: yPos -= formatD2D.m_ExtraHeight; break;
case VerticalAlignment::Center: yPos -= formatD2D.m_ExtraHeight / 2; break;
}
return yPos;
} ();
if (formatD2D.m_Trimming)
{
D2D1_RECT_F clipRect = ToRectF(rect);
if (m_CanUseAxisAlignClip)
{
m_Target->PushAxisAlignedClip(clipRect, D2D1_ANTIALIAS_MODE_ALIASED);
}
else
{
const D2D1_LAYER_PARAMETERS layerParams =
D2D1::LayerParameters(clipRect, nullptr, D2D1_ANTIALIAS_MODE_ALIASED);
m_Target->PushLayer(layerParams, nullptr);
}
}
// When different "effects" are used with inline coloring options, we need to
// remove the previous inline coloring, then reapply them (if needed) - instead
// of destroying/recreating the text layout.
formatD2D.ResetInlineColoring(solidBrush.Get(), strLen);
if (applyInlineFormatting)
{
formatD2D.ApplyInlineColoring(m_Target.Get(), &drawPosition);
}
m_Target->DrawTextLayout(drawPosition, formatD2D.m_TextLayout.Get(), solidBrush.Get());
if (applyInlineFormatting)
{
// Inline gradients require the drawing position, so in case that position
// changes, we need a way to reset it after drawing time so on the next
// iteration it will know the correct position.
formatD2D.ResetGradientPosition(&drawPosition);
}
if (formatD2D.m_Trimming)
{
if (m_CanUseAxisAlignClip)
{
m_Target->PopAxisAlignedClip();
}
else
{
m_Target->PopLayer();
}
}
}
bool CanvasD2D::MeasureTextW(const WCHAR* str, UINT strLen, const TextFormat& format, Gdiplus::RectF& rect)
{
TextFormatD2D& formatD2D = (TextFormatD2D&)format;
const DWRITE_TEXT_METRICS metrics = formatD2D.GetMetrics(str, strLen, !m_AccurateText);
rect.Width = metrics.width;
rect.Height = metrics.height;
return true;
}
bool CanvasD2D::MeasureTextLinesW(const WCHAR* str, UINT strLen, const TextFormat& format, Gdiplus::RectF& rect, UINT& lines)
{
TextFormatD2D& formatD2D = (TextFormatD2D&)format;
formatD2D.m_TextFormat->SetWordWrapping(DWRITE_WORD_WRAPPING_WRAP);
const DWRITE_TEXT_METRICS metrics = formatD2D.GetMetrics(str, strLen, !m_AccurateText, rect.Width);
rect.Width = metrics.width;
rect.Height = metrics.height;
lines = metrics.lineCount;
if (rect.Height > 0.0f)
{
// GDI+ draws multi-line text even though the last line may be clipped slightly at the
// bottom. This is a workaround to emulate that behaviour.
rect.Height += 1.0f;
}
else
{
// GDI+ compatibility: Zero height text has no visible lines.
lines = 0;
}
return true;
}
void CanvasD2D::DrawBitmap(Gdiplus::Bitmap* bitmap, const Gdiplus::Rect& dstRect, const Gdiplus::Rect& srcRect)
{
if (srcRect.Width != dstRect.Width || srcRect.Height != dstRect.Height)
{
// If the bitmap needs to be scaled, get rid of the D2D target and use the GDI+ code path
// to draw the bitmap. This is due to antialiasing differences between GDI+ and D2D on
// scaled bitmaps.
EndTargetDraw();
}
if (!m_Target) // Use GDI+ if D2D render target has not been created.
{
m_GdipGraphics->DrawImage(
bitmap, dstRect, srcRect.X, srcRect.Y, srcRect.Width, srcRect.Height, Gdiplus::UnitPixel);
return;
}
// The D2D DrawBitmap seems to perform exactly like Gdiplus::Graphics::DrawImage since we are
// not using a hardware accelerated render target. Nevertheless, we will use it to avoid
// the EndDraw() call needed for GDI+ drawing.
Util::WICBitmapLockGDIP* bitmapLock = new Util::WICBitmapLockGDIP();
Gdiplus::Rect lockRect(0, 0, bitmap->GetWidth(), bitmap->GetHeight());
Gdiplus::Status status = bitmap->LockBits(
&lockRect, Gdiplus::ImageLockModeRead, PixelFormat32bppPARGB, bitmapLock->GetBitmapData());
if (status == Gdiplus::Ok)
{
D2D1_BITMAP_PROPERTIES props = D2D1::BitmapProperties(
D2D1::PixelFormat(DXGI_FORMAT_B8G8R8A8_UNORM, D2D1_ALPHA_MODE_PREMULTIPLIED));
Microsoft::WRL::ComPtr<ID2D1Bitmap> d2dBitmap;
HRESULT hr = m_Target->CreateSharedBitmap(
__uuidof(IWICBitmapLock), bitmapLock, &props, d2dBitmap.GetAddressOf());
if (SUCCEEDED(hr))
{
auto rDst = ToRectF(dstRect);
auto rSrc = ToRectF(srcRect);
m_Target->DrawBitmap(d2dBitmap.Get(), rDst, 1.0F, D2D1_BITMAP_INTERPOLATION_MODE_LINEAR, rSrc);
}
// D2D will still use the pixel data after this call (at the next Flush() or EndDraw()).
bitmap->UnlockBits(bitmapLock->GetBitmapData());
}
bitmapLock->Release();
}
void CanvasD2D::DrawMaskedBitmap(Gdiplus::Bitmap* bitmap, Gdiplus::Bitmap* maskBitmap, const Gdiplus::Rect& dstRect,
const Gdiplus::Rect& srcRect, const Gdiplus::Rect& srcRect2)
{
if (!BeginTargetDraw()) return;
auto rDst = ToRectF(dstRect);
auto rSrc = ToRectF(srcRect);
Util::WICBitmapLockGDIP* bitmapLock = new Util::WICBitmapLockGDIP();
Gdiplus::Rect lockRect(srcRect2);
Gdiplus::Status status = bitmap->LockBits(
&lockRect, Gdiplus::ImageLockModeRead, PixelFormat32bppPARGB, bitmapLock->GetBitmapData());
if (status == Gdiplus::Ok)
{
D2D1_BITMAP_PROPERTIES props = D2D1::BitmapProperties(
D2D1::PixelFormat(DXGI_FORMAT_B8G8R8A8_UNORM, D2D1_ALPHA_MODE_PREMULTIPLIED));
Microsoft::WRL::ComPtr<ID2D1Bitmap> d2dBitmap;
HRESULT hr = m_Target->CreateSharedBitmap(
__uuidof(IWICBitmapLock), bitmapLock, &props, d2dBitmap.GetAddressOf());
if (SUCCEEDED(hr))
{
// Create bitmap brush from original |bitmap|.
Microsoft::WRL::ComPtr<ID2D1BitmapBrush> brush;
D2D1_BITMAP_BRUSH_PROPERTIES propertiesXClampYClamp = D2D1::BitmapBrushProperties(
D2D1_EXTEND_MODE_CLAMP,
D2D1_EXTEND_MODE_CLAMP,
D2D1_BITMAP_INTERPOLATION_MODE_LINEAR);
// "Move" and "scale" the |bitmap| to match the destination.
D2D1_MATRIX_3X2_F translate = D2D1::Matrix3x2F::Translation(rDst.left, rDst.top);
D2D1_MATRIX_3X2_F scale = D2D1::Matrix3x2F::Scale(
D2D1::SizeF((rDst.right - rDst.left) / (float)srcRect2.Width, (rDst.bottom - rDst.top) / (float)srcRect2.Height));
D2D1_BRUSH_PROPERTIES brushProps = D2D1::BrushProperties(1.0F, scale * translate);
hr = m_Target->CreateBitmapBrush(
d2dBitmap.Get(),
propertiesXClampYClamp,
brushProps,
brush.GetAddressOf());
// Load the |maskBitmap| and use the bitmap brush to "fill" its contents.
// Note: The image must be aliased when applying the opacity mask.
if (SUCCEEDED(hr))
{
Util::WICBitmapLockGDIP* maskBitmapLock = new Util::WICBitmapLockGDIP();
Gdiplus::Rect maskLockRect(0, 0, maskBitmap->GetWidth(), maskBitmap->GetHeight());
status = maskBitmap->LockBits(
&maskLockRect, Gdiplus::ImageLockModeRead, PixelFormat32bppPARGB, maskBitmapLock->GetBitmapData());
if (status == Gdiplus::Ok)
{
D2D1_BITMAP_PROPERTIES maskProps = D2D1::BitmapProperties(
D2D1::PixelFormat(DXGI_FORMAT_B8G8R8A8_UNORM, D2D1_ALPHA_MODE_PREMULTIPLIED));
Microsoft::WRL::ComPtr<ID2D1Bitmap> d2dMaskBitmap;
hr = m_Target->CreateSharedBitmap(
__uuidof(IWICBitmapLock), maskBitmapLock, &props, d2dMaskBitmap.GetAddressOf());
if (SUCCEEDED(hr))
{
m_Target->SetAntialiasMode(D2D1_ANTIALIAS_MODE_ALIASED); // required
m_Target->FillOpacityMask(
d2dMaskBitmap.Get(),
brush.Get(),
D2D1_OPACITY_MASK_CONTENT_GRAPHICS,
&rDst,
<|fim▁hole|> m_Target->SetAntialiasMode(D2D1_ANTIALIAS_MODE_PER_PRIMITIVE);
}
maskBitmap->UnlockBits(bitmapLock->GetBitmapData());
}
maskBitmapLock->Release();
}
}
bitmap->UnlockBits(bitmapLock->GetBitmapData());
}
bitmapLock->Release();
}
void CanvasD2D::FillRectangle(Gdiplus::Rect& rect, const Gdiplus::SolidBrush& brush)
{
if (!m_Target) // Use GDI+ if D2D render target has not been created.
{
m_GdipGraphics->FillRectangle(&brush, rect);
return;
}
Gdiplus::Color color;
brush.GetColor(&color);
Microsoft::WRL::ComPtr<ID2D1SolidColorBrush> solidBrush;
HRESULT hr = m_Target->CreateSolidColorBrush(ToColorF(color), solidBrush.GetAddressOf());
if (SUCCEEDED(hr))
{
m_Target->FillRectangle(ToRectF(rect), solidBrush.Get());
}
}
} // namespace Gfx<|fim▁end|> | &rSrc);
|
<|file_name|>wflow_w3ra_new.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
"""
Definition of the wflow_W3RA model.
---------------------------------------
The model is modified from the Australian Water Resources Assessment
Landscape (AWRA-L) model version 0.5
W3RA is documented in
van Dijk et al. (2013), Water Resour. Res., 49, 2729-2746, doi:10.1002/wrcr.20251
URL: http://onlinelibrary.wiley.com/doi/10.1002/wrcr.20251/abstract
More comprehensive documentation of AWRA-L version 0.5 can be found in:
Van Dijk, A.I.J.M. (2010) The Australian water resources assessment system
(version 0.5), 3.0.5.Technical description of the landscape hydrology model
(AWRA-L). WIRADA Technical Report, CSIRO Water for a Healthy Country
Flagship, Canberra.
URL: http://www.clw.csiro.au/publications/waterforahealthycountry/2010/wfhc-aus-water-resources-assessment-system.pdf
The section references below refer to the sections in the AWRA-L report.
Changes compared to that code are indicated, e.g. by commenting out
redundant code.
Further question please contact [email protected]
Port to Python/PCRaster: Deltares
Usage:
wflow_W3RA -C case -R Runid -c inifile
-C: set the name of the case (directory) to run
-R: set the name runId within the current case
-c name of the config file (in the case directory)
$Author: schelle $
$Id: wflow_sceleton.py 898 2014-01-09 14:47:06Z schelle $
$Rev: 898 $
"""
import math
import os.path
import pcraster.framework
from wflow.wf_DynamicFramework import *
from wflow.wflow_adapt import *
# TODO: Make the script HRU independent (loop over the nr of HRU's)
# TODO:
def usage(*args):
sys.stdout = sys.stderr
for msg in args:
print(msg)
print(__doc__)
sys.exit(0)
class WflowModel(pcraster.framework.DynamicModel):
"""
The user defined model class. T
"""
def __init__(self, cloneMap, Dir, RunDir, configfile):
"""
*Required*
The init function **must** contain what is shown below. Other functionality
may be added by you if needed.
"""
pcraster.framework.DynamicModel.__init__(self)
pcr.setclone(Dir + "/staticmaps/" + cloneMap)
self.runId = RunDir
self.caseName = Dir
self.Dir = Dir
self.configfile = configfile
self.SaveDir = self.Dir + "/" + self.runId + "/"
def stateVariables(self):
"""
*Required*
Returns a list of state variables that are essential to the model.
This list is essential for the resume and suspend functions to work.
This function is specific for each model and **must** be present. This is
where you specify the state variables of you model. If your model is stateless
this function must return and empty array (states = [])
"""
states = [
"S01",
"Ss1",
"Sd1",
"Mleaf1",
"FreeWater1",
"DrySnow1",
"LAI1",
"EVI1",
"Sg",
"Sr",
"S02",
"Ss2",
"Sd2",
"Mleaf2",
"FreeWater2",
"DrySnow2",
"LAI2",
"EVI2",
]
return states
def suspend(self):
"""
*Required*
Suspends the model to disk. All variables needed to restart the model
are saved to disk as pcraster maps. Use resume() to re-read them
This function is required.
"""
self.logger.info("Saving initial conditions...")
#: It is advised to use the wf_suspend() function
#: here which will suspend the variables that are given by stateVariables
#: function.
self.wf_suspend(self.SaveDir + "/outstate/")
def initial(self):
"""
*Required*
Initial part of the model, executed only once. It reads all static model
information (parameters) and sets-up the variables used in modelling.
This function is required. The contents is free. However, in order to
easily connect to other models it is advised to adhere to the directory
structure used in the other models.
"""
#: pcraster option to calculate with units or cells. Not really an issue
#: in this model but always good to keep in mind.
pcr.setglobaloption("unittrue")
pcr.setglobaloption(
"radians"
) # Needed as W3RA was originally written in matlab
# SET GLBOAL PARAMETER VALUES (however not used in original script)
# Nhru=2
# K_gw_scale=0.0146
# K_gw_shape=0.0709
# K_rout_scale=0.1943
# K_rout_int=0.0589
# FdrainFC_scale=0.2909
# FdrainFC_shape=0.5154
# Sgref_scale=3.2220
# Sgref_shape=3.2860
# fday=0.5000
self.timestepsecs = int(
configget(self.config, "model", "timestepsecs", "86400")
)
self.UseETPdata = int(
configget(self.config, "model", "UseETPdata", "1")
) # 1: Use ETP data, 0: Compute ETP from meteorological variables
self.logger.debug("use DATA: " + str(self.UseETPdata))
self.basetimestep = 86400
self.SaveMapDir = self.Dir + "/" + self.runId + "/outmaps"
# Define here the W3RA mapstacks (best to read these via netcdf)
self.TMAX_mapstack = self.Dir + configget(
self.config, "inputmapstacks", "TMAX", "/inmaps/TMAX"
)
self.TMIN_mapstack = self.Dir + configget(
self.config, "inputmapstacks", "TMIN", "/inmaps/TMIN"
)
self.TDAY_mapstack = self.Dir + configget(
self.config, "inputmapstacks", "TDAY", "/inmaps/TDAY"
)
self.EPOT_mapstack = self.Dir + configget(
self.config, "inputmapstacks", "EPOT", "/inmaps/EPOT"
)
self.PRECIP_mapstack = self.Dir + configget(
self.config, "inputmapstacks", "PRECIP", "/inmaps/PRECIP"
)
self.RAD_mapstack = self.Dir + configget(
self.config, "inputmapstacks", "RAD", "/inmaps/RAD"
)
self.WINDSPEED_mapstack = self.Dir + configget(
self.config,
"inputmapstacks",
"WINDSPEED",
"/inmaps/ClimatologyMapFiles/WINDS/WNDSPEED",
)
self.AIRPRESS_mapstack = self.Dir + configget(
self.config,
"inputmapstacks",
"AIRPRESS",
"/inmaps/ClimatologyMapFiles/AIRPRESS/AIRPRESS",
)
self.ALBEDO_mapstack = self.Dir + configget(
self.config,
"inputmapstacks",
"ALBEDO",
"/inmaps/ClimatologyMapFiles/ALBEDO/ALBEDO",
)
# self.WINDSPEED_mapstack=self.Dir + configget(self.config,"inputmapstacks","WINDSPEED","/inmaps/WIND")
# self.AIRPRESS_mapstack=self.Dir + configget(self.config,"inputmapstacks","AIRPRESS","/inmaps/PRES")
self.Altitude = pcr.readmap(self.Dir + "/staticmaps/wflow_dem")
self.latitude = pcr.ycoordinate(pcr.boolean(self.Altitude))
# Add reading of parameters here
self.K_gw = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/K_gw.map"), 0.0, fail=True
)
self.K_rout = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/K_rout.map"), 0.0, fail=True
)
self.Sgref = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/Sgref.map"), 0.0, fail=True
)
self.alb_dry1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/alb_dry.map"), 0.0, fail=True
)
self.alb_wet1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/alb_wet.map"), 0.0, fail=True
)
self.beta1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/beta.map"), 0.0, fail=True
)
self.cGsmax1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/cGsmax.map"), 0.0, fail=True
)
self.ER_frac_ref1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/ER_frac_ref.map"), 0.0, fail=True
)
self.FdrainFC1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/FdrainFC.map"), 0.0, fail=True
)
self.Fgw_conn1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/Fgw_conn.map"), 0.0, fail=True
)
self.Fhru1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/Fhru.map"), 0.0, fail=True
)
self.SLA1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/SLA.map"), 0.0, fail=True
)
self.LAIref1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/LAIref.map"), 0.0, fail=True
)
self.FsoilEmax1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/FsoilEmax.map"), 0.0, fail=True
)
self.fvegref_G1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/fvegref_G.map"), 0.0, fail=True
)
self.FwaterE1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/FwaterE.map"), 0.0, fail=True
)
self.Gfrac_max1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/Gfrac_max.map"), 0.0, fail=True
)
self.hveg1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/hveg.map"), 0.0, fail=True
)
self.InitLoss1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/InitLoss.map"), 0.0, fail=True
)
self.LAImax1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/LAImax.map"), 0.0, fail=True
)
self.PrefR1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/PrefR.map"), 0.0, fail=True
)
self.S_sls1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/S_sls.map"), 0.0, fail=True
)
self.S0FC1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/S0FC.map"), 0.0, fail=True
)
self.SsFC1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/SsFC.map"), 0.0, fail=True
)
self.SdFC1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/SdFC.map"), 0.0, fail=True
)
self.Vc1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/Vc.map"), 0.0, fail=True
)
self.w0ref_alb1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/w0ref_alb.map"), 0.0, fail=True
)
self.Us01 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/Us0.map"), 0.0, fail=True
)
self.Ud01 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/Ud0.map"), 0.0, fail=True
)
self.wslimU1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/wslimU.map"), 0.0, fail=True
)
self.wdlimU1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/wdlimU.map"), 0.0, fail=True
)
self.w0limE1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/w0limE.map"), 0.0, fail=True
)
self.Tgrow1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/Tgrow.map"), 0.0, fail=True
)
self.Tsenc1 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/Tsenc.map"), 0.0, fail=True
)
self.alb_dry2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/alb_dry2.map"), 0.0, fail=True
)
self.alb_wet2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/alb_wet2.map"), 0.0, fail=True
)
self.beta2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/beta2.map"), 0.0, fail=True
)
self.cGsmax2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/cGsmax2.map"), 0.0, fail=True
)
self.ER_frac_ref2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/ER_frac_ref2.map"), 0.0, fail=True
)
self.FdrainFC2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/FdrainFC2.map"), 0.0, fail=True
)
self.Fgw_conn2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/Fgw_conn2.map"), 0.0, fail=True
)
self.Fhru2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/Fhru2.map"), 0.0, fail=True
)
self.SLA2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/SLA2.map"), 0.0, fail=True
)
self.LAIref2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/LAIref2.map"), 0.0, fail=True
)
self.FsoilEmax2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/FsoilEmax2.map"), 0.0, fail=True
)
self.fvegref_G2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/fvegref_G2.map"), 0.0, fail=True
)
self.FwaterE2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/FwaterE2.map"), 0.0, fail=True
)
self.Gfrac_max2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/Gfrac_max2.map"), 0.0, fail=True
)
self.hveg2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/hveg2.map"), 0.0, fail=True
)
self.InitLoss2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/InitLoss2.map"), 0.0, fail=True
)
self.LAImax2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/LAImax2.map"), 0.0, fail=True
)
self.PrefR2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/PrefR2.map"), 0.0, fail=True
)
self.S_sls2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/S_sls2.map"), 0.0, fail=True
)
self.S0FC2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/S0FC2.map"), 0.0, fail=True
)
self.SsFC2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/SsFC2.map"), 0.0, fail=True
)
self.SdFC2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/SdFC2.map"), 0.0, fail=True
)
self.Vc2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/Vc2.map"), 0.0, fail=True
)
self.w0ref_alb2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/w0ref_alb2.map"), 0.0, fail=True
)
self.Us02 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/Us02.map"), 0.0, fail=True
)
self.Ud02 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/Ud02.map"), 0.0, fail=True
)
self.wslimU2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/wslimU2.map"), 0.0, fail=True
)
self.wdlimU2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/wdlimU2.map"), 0.0, fail=True
)
self.w0limE2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/w0limE2.map"), 0.0, fail=True
)
self.Tgrow2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/Tgrow2.map"), 0.0, fail=True
)
self.Tsenc2 = self.wf_readmap(
os.path.join(self.Dir, "staticmaps/Tsenc2.map"), 0.0, fail=True
)
self.wf_multparameters()
# Static, for the computation of Aerodynamic conductance (3.7)
self.fh1 = pcr.ln(813.0 / self.hveg1 - 5.45)
self.fh2 = pcr.ln(813.0 / self.hveg2 - 5.45)
self.ku2_1 = 0.305 / (self.fh1 * (self.fh1 + 2.3))
self.ku2_2 = 0.305 / (self.fh2 * (self.fh2 + 2.3))
self.logger.info("Starting Dynamic run...")
def resume(self):
"""
*Required*
This function is required. Read initial state maps (they are output of a
previous call to suspend()). The implementation shown here is the most basic
setup needed.
"""
self.logger.info("Reading initial conditions...")
#: It is advised to use the wf_resume() function
#: here which pick up the variable save by a call to wf_suspend()
try:
self.wf_resume(self.Dir + "/instate/")
except:
self.logger.warning("Cannot load initial states, setting to default")
for s in self.stateVariables():
exec("self." + s + " = pcr.cover(1.0)")
def default_summarymaps(self):
"""
*Optional*
Return a default list of variables to report as summary maps in the outsum dir.
"""
return []
def parameters(self):
"""
Define all model parameters here that the framework should handle for the model
See wf_updateparameters and the parameters section of the ini file
If you use this make sure to all wf_updateparameters at the start of the dynamic section
and at the start/end of the initial section
"""
modelparameters = []
# Static model parameters e.g.
# modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
# 3: Input time series ###################################################
# self.P_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Precipitation",
# "/inmaps/P") # timeseries for rainfall
# self.PET_mapstack = self.Dir + configget(self.config, "inputmapstacks", "EvapoTranspiration",
# "/inmaps/PET") # timeseries for rainfall"/inmaps/PET" # potential evapotranspiration
# self.TEMP_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Temperature",
# "/inmaps/TEMP") # timeseries for rainfall "/inmaps/TEMP" # global radiation
# self.Inflow_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Inflow",
# "/inmaps/IF") # timeseries for rainfall "/inmaps/IF" # in/outflow locations (abstractions)
# Meteo and other forcing
# modelparameters.append(self.ParamType(name="Precipitation",stack=self.P_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
# modelparameters.append(self.ParamType(name="PotenEvap",stack=self.PET_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
# modelparameters.append(self.ParamType(name="Temperature",stack=self.TEMP_mapstack,type="timeseries",default=10.0,verbose=True,lookupmaps=[]))
# modelparameters.append(self.ParamType(name="Inflow",stack=self.Inflow_mapstack,type="timeseries",default=0.0,verbose=False,lookupmaps=[]))
def dynamic(self):
"""
*Required*
This is where all the time dependent functions are executed. Time dependent
output should also be saved here.
"""
# print 'useETPdata' , self.UseETPdata
# Put the W3RA here. Stuff from W3RA_timestep_model.m
# read meteo from file
self.logger.debug("Running for: " + str(self.currentdatetime))
self.PRECIP = pcr.cover(
self.wf_readmap(self.PRECIP_mapstack, 0.0), pcr.scalar(0.0)
) # mm
if self.UseETPdata == 1:
self.TDAY = pcr.cover(
self.wf_readmap(self.TDAY_mapstack, 10.0), pcr.scalar(10.0)
) # T in degC
self.EPOT = pcr.cover(
self.wf_readmap(self.EPOT_mapstack, 0.0), pcr.scalar(0.0)
) # mm
# self.WINDSPEED=pcr.cover(self.wf_readmapClimatology(self.WINDSPEED_mapstack, default=1.0), pcr.scalar(1.0))
# self.AIRPRESS=pcr.cover(self.wf_readmapClimatology(self.AIRPRESS_mapstack, default=980.0), pcr.scalar(980.0))
# print "Using climatology for wind, air pressure and albedo."
elif self.UseETPdata == 0:
self.TMIN = pcr.cover(
self.wf_readmap(self.TMIN_mapstack, 10.0), pcr.scalar(10.0)
) # T in degC
self.TMAX = pcr.cover(
self.wf_readmap(self.TMAX_mapstack, 10.0), pcr.scalar(10.0)
) # T in degC
self.RAD = pcr.cover(
self.wf_readmap(self.RAD_mapstack, 10.0), pcr.scalar(10.0)
) # W m-2 s-1
self.WINDSPEED = pcr.cover(
self.wf_readmap(self.WINDSPEED_mapstack, 10.0), pcr.scalar(10.0)
) # ms-1
self.AIRPRESS = pcr.cover(
self.wf_readmap(self.AIRPRESS_mapstack, 10.0), pcr.scalar(10.0)
) # Pa
self.ALBEDO = pcr.cover(
self.wf_readmapClimatology(self.ALBEDO_mapstack, default=0.1),
pcr.scalar(0.1),
)
self.wf_multparameters()
doy = self.currentdatetime.timetuple().tm_yday
# conversion daylength
pcr.setglobaloption("radians")
m = pcr.scalar(1) - math.tan(
(self.latitude * pcr.scalar(math.pi) / pcr.scalar(180))
) * math.tan(
(
(pcr.scalar(23.439) * pcr.scalar(math.pi) / pcr.scalar(180))
* cos(
pcr.scalar(2)
* pcr.scalar(math.pi)
* (doy + pcr.scalar(9))
/ pcr.scalar(365.25)
)
)
)
self.fday = pcr.min(
pcr.max(
pcr.scalar(0.02),
pcr.scalar(
pcr.acos(
pcr.scalar(1)
- pcr.min(pcr.max(pcr.scalar(0), m), pcr.scalar(2))
)
)
/ pcr.scalar(math.pi),
),
pcr.scalar(1),
) # fraction daylength
# Assign forcing and estimate effective meteorological variables
Pg = self.PRECIP # mm
if self.UseETPdata == 1:
Ta = self.TDAY # T in degC
T24 = self.TDAY # T in degC
elif self.UseETPdata == 0:
Rg = pcr.max(
self.RAD, pcr.scalar(0.0001)<|fim▁hole|> T24 = self.TMIN + pcr.scalar(0.5) * (self.TMAX - self.TMIN) # T in degC
pex = pcr.min(
pcr.scalar(17.27) * (self.TMIN) / (pcr.scalar(237.3) + self.TMIN),
pcr.scalar(10),
) # T in degC
pe = pcr.min(
pcr.scalar(610.8) * (pcr.exp(pex)), pcr.scalar(10000.0)
) # Mean actual vapour pressure, from dewpoint temperature
# rescale factor because windspeed climatology is at 2m
WindFactor = 1.0
u2 = (
pcr.scalar(WindFactor)
* self.WINDSPEED
* (pcr.scalar(1) - (pcr.scalar(1) - self.fday) * pcr.scalar(0.25))
/ self.fday
)
self.u2 = (
pcr.scalar(WindFactor)
* self.WINDSPEED
* (pcr.scalar(1) - (pcr.scalar(1) - self.fday) * pcr.scalar(0.25))
/ self.fday
)
pair = self.AIRPRESS # already in Pa
# diagnostic equations
self.LAI1 = self.SLA1 * self.Mleaf1 # (5.3)
self.LAI2 = self.SLA2 * self.Mleaf2 # (5.3)
fveg1 = pcr.max(1 - pcr.exp(-self.LAI1 / self.LAIref1), 0.000001) # (5.3)
fveg2 = pcr.max(1 - pcr.exp(-self.LAI2 / self.LAIref2), 0.000001)
# Vc = pcr.max(0,EVI-0.07)/fveg
fsoil1 = 1 - fveg1
fsoil2 = 1 - fveg2
w01 = self.S01 / self.S0FC1 # (2.1)
w02 = self.S02 / self.S0FC2
ws1 = self.Ss1 / self.SsFC1 # (2.1)
ws2 = self.Ss2 / self.SsFC2
wd1 = self.Sd1 / self.SdFC1 # (2.1)
wd2 = self.Sd2 / self.SdFC2 # (2.1)
TotSnow1 = self.FreeWater1 + self.DrySnow1
TotSnow2 = self.FreeWater2 + self.DrySnow2
wSnow1 = self.FreeWater1 / (TotSnow1 + 1e-5)
wSnow2 = self.FreeWater2 / (TotSnow2 + 1e-5)
# Spatialise catchment fractions
Sgfree = pcr.max(self.Sg, 0.0)
# JS: Not sure if this is translated properly....
# for i=1:par.Nhru
fwater1 = pcr.min(0.005, (0.007 * self.Sr ** 0.75))
fwater2 = pcr.min(0.005, (0.007 * self.Sr ** 0.75))
fsat1 = pcr.min(
1.0, pcr.max(pcr.min(0.005, 0.007 * self.Sr ** 0.75), Sgfree / self.Sgref)
)
fsat2 = pcr.min(
1.0, pcr.max(pcr.min(0.005, 0.007 * self.Sr ** 0.75), Sgfree / self.Sgref)
)
Sghru1 = self.Sg
Sghru2 = self.Sg
# CALCULATION OF PET
# Conversions and coefficients (3.1)
pesx = pcr.min(
(pcr.scalar(17.27) * Ta / (pcr.scalar(237.3) + Ta)), pcr.scalar(10)
)
pes = pcr.min(
pcr.scalar((pcr.scalar(610.8)) * pcr.exp(pesx)), pcr.scalar(10000)
) # saturated vapour pressure
# fRH = pe/pes # relative air humidity -------------- check
cRE = 0.03449 + 4.27e-5 * Ta
# Caero = self.fday*0.176*(1+Ta/209.1)*(pair-0.417*pe)*(1-fRH) -------------- check
# keps = 1.4e-3*((Ta/187)**2+Ta/107+1)*(6.36*pair+pe)/pes
# Aerodynamic conductance (3.7)
ga1 = self.ku2_1 * u2
ga2 = self.ku2_2 * u2
if self.UseETPdata == 1:
self.E01 = pcr.max(self.EPOT, 0)
self.E02 = pcr.max(self.EPOT, 0)
keps = (
0.655e-3 * pair / pes
) # See Appendix A3 (http://www.clw.csiro.au/publications/waterforahealthycountry/2010/wfhc-aus-water-resources-assessment-system.pdf) -------------------------------- check!
elif self.UseETPdata == 0:
ns_alb = self.ALBEDO
Rgeff = Rg / self.fday
# shortwave radiation balance (3.2)
# alb_veg = 0.452*Vc
# alb_soil = alb_wet+(alb_dry-alb_wet)*exp(-w0/w0ref_alb)
# new equations for snow albedo
alb_snow1 = 0.65 - 0.2 * wSnow1 # assumed; ideally some lit research needed
alb_snow2 = 0.65 - 0.2 * wSnow2
fsnow1 = pcr.min(
1.0, 0.05 * TotSnow1
) # assumed; ideally some lit research needed
fsnow2 = pcr.min(1.0, 0.05 * TotSnow2)
# alb = fveg*alb_veg+(fsoil-fsnow)*alb_soil +fsnow*alb_snow
# alb = albedo
alb1 = (1 - fsnow1) * ns_alb + fsnow1 * alb_snow1
alb2 = (1 - fsnow2) * ns_alb + fsnow2 * alb_snow2
RSn1 = (1 - alb1) * Rgeff
RSn2 = (1 - alb2) * Rgeff
# long wave radiation balance (3.3 to 3.5)
StefBolz = 5.67e-8
Tkelv = Ta + 273.16
self.RLin = (0.65 * (pe / Tkelv) ** 0.14) * StefBolz * Tkelv ** 4 # (3.3)
RLout = StefBolz * Tkelv ** 4.0 # (3.4)
self.RLn = self.RLin - RLout
self.fGR1 = self.Gfrac_max1 * (1 - pcr.exp(-fsoil1 / self.fvegref_G1))
self.fGR2 = self.Gfrac_max2 * (
1 - pcr.exp(-fsoil2 / self.fvegref_G2)
) # (3.5)
self.Rneff1 = (RSn1 + self.RLn) * (1 - self.fGR1)
self.Rneff2 = (RSn2 + self.RLn) * (1 - self.fGR2)
fRH = pe / pes # relative air humidity
Caero = (
self.fday * 0.176 * (1 + Ta / 209.1) * (pair - 0.417 * pe) * (1 - fRH)
) # -------------- check
keps = 1.4e-3 * ((Ta / 187) ** 2 + Ta / 107 + 1) * (6.36 * pair + pe) / pes
# Potential evaporation
kalpha1 = 1 + Caero * ga1 / self.Rneff1
kalpha2 = 1 + Caero * ga2 / self.Rneff2
self.E01 = cRE * (1 / (1 + keps)) * kalpha1 * self.Rneff1 * self.fday
self.E02 = cRE * (1 / (1 + keps)) * kalpha2 * self.Rneff2 * self.fday
self.E01 = pcr.max(self.E01, 0)
self.E02 = pcr.max(self.E02, 0)
# CALCULATION OF ET FLUXES AND ROOT WATER UPTAKE
# Root water uptake constraint (4.4)
Usmax1 = pcr.max(
0, self.Us01 * pcr.min(1, ws1 / self.wslimU1)
) ##0-waarden omdat ws1 bevat 0-waarden (zie regel 116)
Usmax2 = pcr.max(
0, self.Us02 * pcr.min(1, ws2 / self.wslimU2)
) ##0-waarden omdat ws2 bevat 0-waarden (zie regel 117)
Udmax1 = pcr.max(
0, self.Ud01 * pcr.min(1, wd1 / self.wdlimU1)
) ##0-waarden omdat wd1 bevat 0-waarden (zie regel 118)
Udmax2 = pcr.max(
0, self.Ud02 * pcr.min(1, wd2 / self.wdlimU2)
) ##0-waarden omdat wd2 bevat 0-waarden (zie regel 119)
# U0max = pcr.max(0, Us0*min(1,w0/wslimU))
U0max1 = pcr.scalar(0)
U0max2 = pcr.scalar(0)
Utot1 = pcr.max(Usmax1, pcr.max(Udmax1, U0max1))
Utot2 = pcr.max(Usmax2, pcr.max(Udmax2, U0max2))
# Maximum transpiration (4.3)
Gsmax1 = self.cGsmax1 * self.Vc1
gs1 = fveg1 * Gsmax1
ft1 = 1 / (1 + (keps / (1 + keps)) * ga1 / gs1)
Etmax1 = ft1 * self.E01
Gsmax2 = self.cGsmax2 * self.Vc2
gs2 = fveg2 * Gsmax2
ft2 = 1 / (1 + (keps / (1 + keps)) * ga2 / gs2)
Etmax2 = ft2 * self.E02
# Actual transpiration (4.1)
Et1 = pcr.min(Utot1, Etmax1)
Et2 = pcr.min(Utot2, Etmax2)
# # Root water uptake distribution (2.3)
U01 = pcr.max(
pcr.min((U0max1 / (U0max1 + Usmax1 + Udmax1)) * Et1, self.S01 - 1e-2), 0
)
Us1 = pcr.max(
pcr.min((Usmax1 / (U0max1 + Usmax1 + Udmax1)) * Et1, self.Ss1 - 1e-2), 0
)
Ud1 = pcr.max(
pcr.min((Udmax1 / (U0max1 + Usmax1 + Udmax1)) * Et1, self.Sd1 - 1e-2), 0
)
Et1 = U01 + Us1 + Ud1 # to ensure mass balance
U02 = pcr.max(
pcr.min((U0max2 / (U0max2 + Usmax2 + Udmax2)) * Et2, self.S02 - 1e-2), 0
)
Us2 = pcr.max(
pcr.min((Usmax2 / (U0max2 + Usmax2 + Udmax2)) * Et2, self.Ss2 - 1e-2), 0
)
Ud2 = pcr.max(
pcr.min((Udmax2 / (U0max2 + Usmax2 + Udmax2)) * Et2, self.Sd2 - 1e-2), 0
)
Et2 = U02 + Us2 + Ud2
# Soil evaporation (4.5)
self.S01 = pcr.max(0, self.S01 - U01)
self.S02 = pcr.max(0, self.S02 - U02)
w01 = self.S01 / self.S0FC1 # (2.1)
w02 = self.S02 / self.S0FC2 # (2.1)
fsoilE1 = self.FsoilEmax1 * pcr.min(1, w01 / self.w0limE1)
fsoilE2 = self.FsoilEmax2 * pcr.min(1, w02 / self.w0limE2)
Es1 = pcr.max(
0, pcr.min(((1 - fsat1) * fsoilE1 * (self.E01 - Et1)), self.S01 - 1e-2)
)
Es2 = pcr.max(
0, pcr.min(((1 - fsat2) * fsoilE2 * (self.E02 - Et2)), self.S02 - 1e-2)
)
# Groundwater evaporation (4.6)
Eg1 = pcr.min((fsat1 - fwater1) * self.FsoilEmax1 * (self.E01 - Et1), Sghru1)
Eg2 = pcr.min((fsat2 - fwater2) * self.FsoilEmax2 * (self.E02 - Et2), Sghru2)
# Open water evaporation (4.7)
Er1 = pcr.min(fwater1 * self.FwaterE1 * pcr.max(0, self.E01 - Et1), self.Sr)
Er2 = pcr.min(fwater2 * self.FwaterE2 * pcr.max(0, self.E02 - Et2), self.Sr)
# Rainfall interception evaporation (4.2)
Sveg1 = self.S_sls1 * self.LAI1
fER1 = self.ER_frac_ref1 * fveg1
Pwet1 = -pcr.ln(1 - fER1 / fveg1) * Sveg1 / fER1
Ei1 = pcr.scalar(Pg < Pwet1) * fveg1 * Pg + pcr.scalar(Pg >= Pwet1) * (
fveg1 * Pwet1 + fER1 * (Pg - Pwet1)
)
Sveg2 = self.S_sls2 * self.LAI2
fER2 = self.ER_frac_ref2 * fveg2
Pwet2 = -pcr.ln(1 - fER2 / fveg2) * Sveg2 / fER2
Ei2 = pcr.scalar(Pg < Pwet2) * fveg2 * Pg + pcr.scalar(Pg >= Pwet2) * (
fveg2 * Pwet2 + fER2 * (Pg - Pwet2)
)
# HBV snow routine
# Matlab: function [FreeWater,DrySnow,InSoil]=snow_submodel(Precipitation,Temperature,FreeWater,DrySnow)
# derived from HBV-96 shared by Jaap Schellekens (Deltares) in May 2011
# original in PCraster, adapted to Matlab by Albert van Dijk
# HBV snow routine
Pn1 = Pg - Ei1
Pn2 = Pg - Ei2
Precipitation1 = Pn1
Precipitation2 = Pn2
# Snow routine parameters
# parameters
# TODO: Check this, not sure if this works.......
x = pcr.scalar(Pg)
Cfmax1 = 0.6 * 3.75653 * pcr.scalar(x >= 0)
Cfmax2 = 3.75653 * pcr.scalar(x >= 0)
TT1 = -1.41934 * pcr.scalar(
x >= 0
) # critical temperature for snowmelt and refreezing
TT2 = -1.41934 * pcr.scalar(x >= 0)
TTI1 = 1.00000 * pcr.scalar(
x >= 0
) # defines interval in which precipitation falls as rainfall and snowfall
TTI2 = 1.00000 * pcr.scalar(x >= 0)
CFR1 = 0.05000 * pcr.scalar(
x >= 0
) # refreezing efficiency constant in refreezing of freewater in snow
CFR2 = 0.05000 * pcr.scalar(x >= 0)
WHC1 = 0.10000 * pcr.scalar(x >= 0)
WHC2 = 0.10000 * pcr.scalar(x >= 0)
# Partitioning into fractions rain and snow
Temperature = T24 # Dimmie, let op: tijdelijke regel!!
RainFrac1 = pcr.max(0, pcr.min((Temperature - (TT1 - TTI1 / 2)) / TTI1, 1))
RainFrac2 = pcr.max(0, pcr.min((Temperature - (TT2 - TTI2 / 2)) / TTI2, 1))
SnowFrac1 = 1 - RainFrac1 # fraction of precipitation which falls as snow
SnowFrac2 = 1 - RainFrac2
# Snowfall/melt calculations
SnowFall1 = SnowFrac1 * Precipitation1 # snowfall depth
SnowFall2 = SnowFrac2 * Precipitation2
RainFall1 = RainFrac1 * Precipitation1 # rainfall depth
RainFall2 = RainFrac2 * Precipitation2
PotSnowMelt1 = Cfmax1 * pcr.max(
0, Temperature - TT1
) # Potential snow melt, based on temperature
PotSnowMelt2 = Cfmax2 * pcr.max(0, Temperature - TT2)
PotRefreezing1 = (
Cfmax1 * CFR1 * pcr.max(TT1 - Temperature, 0)
) # Potential refreezing, based on temperature
PotRefreezing2 = Cfmax2 * CFR2 * pcr.max(TT2 - Temperature, 0)
Refreezing1 = pcr.min(PotRefreezing1, self.FreeWater1) # actual refreezing
Refreezing2 = pcr.min(PotRefreezing2, self.FreeWater2)
SnowMelt1 = pcr.min(PotSnowMelt1, self.DrySnow1) # actual snow melt
SnowMelt2 = pcr.min(PotSnowMelt2, self.DrySnow2)
self.DrySnow1 = (
self.DrySnow1 + SnowFall1 + Refreezing1 - SnowMelt1
) # dry snow content
self.DrySnow2 = self.DrySnow2 + SnowFall2 + Refreezing2 - SnowMelt2
self.FreeWater1 = self.FreeWater1 - Refreezing1 # free water content in snow
self.FreeWater2 = self.FreeWater2 - Refreezing2
MaxFreeWater1 = self.DrySnow1 * WHC1
MaxFreeWater2 = self.DrySnow2 * WHC2
self.FreeWater1 = self.FreeWater1 + SnowMelt1 + RainFall1
self.FreeWater2 = self.FreeWater2 + SnowMelt2 + RainFall2
InSoil1 = pcr.max(
self.FreeWater1 - MaxFreeWater1, 0
) # abundant water in snow pack which goes into soil
InSoil2 = pcr.max(self.FreeWater2 - MaxFreeWater2, 0)
self.FreeWater1 = self.FreeWater1 - InSoil1
self.FreeWater2 = self.FreeWater2 - InSoil2
# End of Snow Module
# CALCULATION OF WATER BALANCES
# surface water fluxes (2.2)
NetInSoil1 = pcr.max(0, (InSoil1 - self.InitLoss1))
NetInSoil2 = pcr.max(0, (InSoil2 - self.InitLoss2))
Rhof1 = (1 - fsat1) * (NetInSoil1 / (NetInSoil1 + self.PrefR1)) * NetInSoil1
Rhof2 = (1 - fsat2) * (NetInSoil2 / (NetInSoil2 + self.PrefR2)) * NetInSoil2
Rsof1 = fsat1 * NetInSoil1
Rsof2 = fsat2 * NetInSoil2
QR1 = Rhof1 + Rsof1
QR2 = Rhof2 + Rsof2
I1 = InSoil1 - QR1
I2 = InSoil2 - QR2
# SOIL WATER BALANCES (2.1 & 2.4)
# Topsoil water balance (S0)
self.S01 = self.S01 + I1 - Es1 - U01
self.S02 = self.S02 + I2 - Es2 - U02
SzFC1 = self.S0FC1
SzFC2 = self.S0FC2
Sz1 = self.S01
Sz2 = self.S02
wz1 = pcr.max(1e-2, Sz1) / SzFC1
wz2 = pcr.max(1e-2, Sz2) / SzFC2
self.TMP = SzFC1
# TODO: Check if this works
fD1 = pcr.scalar(wz1 > 1) * pcr.max(self.FdrainFC1, 1 - 1 / wz1) + pcr.scalar(
wz1 <= 1
) * self.FdrainFC1 * pcr.exp(self.beta1 * pcr.scalar(wz1 - 1))
fD2 = pcr.scalar(wz2 > 1) * pcr.max(self.FdrainFC2, 1 - 1 / wz2) + pcr.scalar(
wz2 <= 1
) * self.FdrainFC2 * pcr.exp(self.beta2 * pcr.scalar(wz2 - 1))
Dz1 = pcr.max(0, pcr.min(fD1 * Sz1, Sz1 - 1e-2))
Dz2 = pcr.max(0, pcr.min(fD2 * Sz2, Sz2 - 1e-2))
D01 = Dz1
D02 = Dz2
self.S01 = self.S01 - D01
self.S02 = self.S02 - D02
# Shallow root zone water balance (Ss)
self.Ss1 = self.Ss1 + D01 - Us1
self.Ss2 = self.Ss2 + D02 - Us2
SzFC1 = self.SsFC1
SzFC2 = self.SsFC2
Sz1 = self.Ss1
Sz2 = self.Ss2
wz1 = pcr.max(1e-2, Sz1) / SzFC1
wz2 = pcr.max(1e-2, Sz2) / SzFC2
fD1 = pcr.scalar(wz1 > 1) * pcr.max(self.FdrainFC1, 1 - 1 / wz1) + pcr.scalar(
wz1 <= 1
) * self.FdrainFC1 * pcr.exp(self.beta1 * pcr.scalar(wz1 - 1))
fD2 = pcr.scalar(wz2 > 1) * pcr.max(self.FdrainFC2, 1 - 1 / wz2) + pcr.scalar(
wz2 <= 1
) * self.FdrainFC2 * pcr.exp(self.beta2 * pcr.scalar(wz2 - 1))
Dz1 = pcr.max(0, pcr.min(fD1 * Sz1, Sz1 - 1e-2))
Dz2 = pcr.max(0, pcr.min(fD2 * Sz2, Sz2 - 1e-2))
Ds1 = Dz1
Ds2 = Dz2
self.Ss1 = self.Ss1 - Ds1
self.Ss2 = self.Ss2 - Ds2
# Deep root zone water balance (Sd) (2.6)
self.Sd1 = self.Sd1 + Ds1 - Ud1
self.Sd2 = self.Sd2 + Ds2 - Ud2
SzFC1 = self.SdFC1
SzFC2 = self.SdFC2
Sz1 = self.Sd1
Sz2 = self.Sd2
wz1 = pcr.max(1e-2, Sz1) / SzFC1
wz2 = pcr.max(1e-2, Sz2) / SzFC2
fD1 = pcr.scalar(wz1 > 1) * pcr.max(self.FdrainFC1, 1 - 1 / wz1) + pcr.scalar(
wz1 <= 1
) * self.FdrainFC1 * pcr.exp(self.beta1 * pcr.scalar(wz1 - 1))
fD2 = pcr.scalar(wz2 > 1) * pcr.max(self.FdrainFC2, 1 - 1 / wz2) + pcr.scalar(
wz2 <= 1
) * self.FdrainFC2 * pcr.exp(self.beta2 * pcr.scalar(wz2 - 1))
Dz1 = pcr.max(0, pcr.min(fD1 * Sz1, Sz1 - 1e-2))
Dz2 = pcr.max(0, pcr.min(fD2 * Sz2, Sz2 - 1e-2))
Dd1 = Dz1
Dd2 = Dz2
self.Sd1 = self.Sd1 - Dd1
self.Sd2 = self.Sd2 - Dd2
Y1 = pcr.min(
self.Fgw_conn1 * pcr.max(0, self.wdlimU1 * self.SdFC1 - self.Sd1),
Sghru1 - Eg1,
)
Y2 = pcr.min(
self.Fgw_conn2 * pcr.max(0, self.wdlimU2 * self.SdFC2 - self.Sd2),
Sghru2 - Eg2,
)
# Y = Fgw_conn.*max(0,wdlimU.*SdFC-Sd); #nog matlab script
self.Sd1 = self.Sd1 + Y1
self.Sd2 = self.Sd2 + Y2
# CATCHMENT WATER BALANCE
# Groundwater store water balance (Sg) (2.5)
NetGf = (self.Fhru1 * (Dd1 - Eg1 - Y1)) + (self.Fhru2 * (Dd2 - Eg2 - Y2))
self.Sg = self.Sg + NetGf
Sgfree = pcr.max(self.Sg, 0)
Qg = pcr.min(Sgfree, (1 - pcr.exp(-self.K_gw)) * Sgfree)
self.Sg = self.Sg - Qg
# Surface water store water balance (Sr) (2.7)
self.Sr = self.Sr + (self.Fhru1 * (QR1 - Er1)) + (self.Fhru2 * (QR2 - Er2)) + Qg
Qtot = pcr.min(self.Sr, (1 - pcr.exp(-self.K_rout)) * self.Sr)
self.Sr = self.Sr - Qtot
# VEGETATION ADJUSTMENT (5)
fveq1 = (
(1 / pcr.max((self.E01 / Utot1) - 1, 1e-3))
* (keps / (1 + keps))
* (ga1 / Gsmax1)
)
fveq2 = (
(1 / pcr.max((self.E02 / Utot2) - 1, 1e-3))
* (keps / (1 + keps))
* (ga2 / Gsmax2)
)
fvmax1 = 1 - pcr.exp(-self.LAImax1 / self.LAIref1)
fvmax2 = 1 - pcr.exp(-self.LAImax2 / self.LAIref2)
fveq1 = pcr.min(fveq1, fvmax1)
fveq2 = pcr.min(fveq2, fvmax2)
dMleaf1 = -pcr.ln(1 - fveq1) * self.LAIref1 / self.SLA1 - self.Mleaf1
dMleaf2 = -pcr.ln(1 - fveq2) * self.LAIref2 / self.SLA2 - self.Mleaf2
# Mleafnet1 = dMleaf1 * (dMleaf1/self.Tgrow1) + dMleaf1 * dMleaf1/self.Tsenc1
# Mleafnet2 = dMleaf2 * (dMleaf1/self.Tgrow2) + dMleaf2 * dMleaf2/self.Tsenc2
Mleafnet1 = (
pcr.scalar(dMleaf1 > 0) * (dMleaf1 / self.Tgrow1)
+ pcr.scalar(dMleaf1 < 0) * dMleaf1 / self.Tsenc1
)
Mleafnet2 = (
pcr.scalar(dMleaf2 > 0) * (dMleaf2 / self.Tgrow2)
+ pcr.scalar(dMleaf2 < 0) * dMleaf2 / self.Tsenc2
)
self.Mleaf1 = self.Mleaf1 + Mleafnet1
self.Mleaf2 = self.Mleaf2 + Mleafnet2
self.LAI1 = self.SLA1 * self.Mleaf1 # (5.3)
self.LAI2 = self.SLA2 * self.Mleaf2
# Updating diagnostics
self.LAI1 = self.SLA1 * self.Mleaf1 # (5.3)
self.LAI2 = self.SLA2 * self.Mleaf2
fveg1 = 1 - pcr.exp(-self.LAI1 / self.LAIref1) # (5.3)
fveg2 = 1 - pcr.exp(-self.LAI2 / self.LAIref2)
fsoil1 = 1 - fveg1
fsoil2 = 1 - fveg2
w01 = self.S01 / self.S0FC1 # (2.1)
w02 = self.S02 / self.S0FC2
ws1 = self.Ss1 / self.SsFC1 # (2.1)
ws2 = self.Ss2 / self.SsFC2
wd1 = self.Sd1 / self.SdFC1 # (2.1)
wd2 = self.Sd2 / self.SdFC2
# The main function is used to run the program from the command line
def main(argv=None):
"""
*Optional*
Perform command line execution of the model. This example uses the getopt
module to parse the command line options.
The user can set the caseName, the runDir, the timestep and the configfile.
"""
global multpars
caseName = (
"../openstreams_w3ra"
) # "D:/trambaue/_Projects/GLOFFIS/201501/GLOFFIS_SA/Modules/openstreams_w3ra/"
runId = "run_default"
configfile = "wflow_W3RA.ini"
_lastTimeStep = 0
_firstTimeStep = 0
timestepsecs = 86400
wflow_cloneMap = "wflow_subcatch.map"
runinfoFile = "runinfo.xml"
_NoOverWrite = False
loglevel = logging.DEBUG
LogFileName = "wflow.log"
# This allows us to use the model both on the command line and to call
# the model usinge main function from another python script.
if argv is None:
argv = sys.argv[1:]
if len(argv) == 0:
usage()
return
opts, args = getopt.getopt(argv, "C:S:T:c:s:R:")
for o, a in opts:
if o == "-C":
caseName = a
if o == "-R":
runId = a
if o == "-c":
configfile = a
if o == "-s":
timestepsecs = int(a)
if o == "-T":
_lastTimeStep = int(a)
if o == "-S":
_firstTimeStep = int(a)
if len(opts) <= 1:
usage()
starttime = dt.datetime(1990, 1, 1)
if _lastTimeStep < _firstTimeStep:
print(
"The starttimestep ("
+ str(_firstTimeStep)
+ ") is smaller than the last timestep ("
+ str(_lastTimeStep)
+ ")"
)
usage()
myModel = WflowModel(wflow_cloneMap, caseName, runId, configfile)
dynModelFw = wf_DynamicFramework(
myModel, _lastTimeStep, firstTimestep=_firstTimeStep, datetimestart=starttime
)
dynModelFw.createRunId(
NoOverWrite=_NoOverWrite,
level=loglevel,
logfname=LogFileName,
model="wflow_W3RA",
doSetupFramework=False,
)
for o, a in opts:
if o == "-P":
left = a.split("=")[0]
right = a.split("=")[1]
configset(
myModel.config, "variable_change_once", left, right, overwrite=True
)
if o == "-p":
left = a.split("=")[0]
right = a.split("=")[1]
configset(
myModel.config, "variable_change_timestep", left, right, overwrite=True
)
if o == "-X":
configset(myModel.config, "model", "OverWriteInit", "1", overwrite=True)
if o == "-I":
configset(myModel.config, "model", "reinit", "1", overwrite=True)
if o == "-i":
configset(myModel.config, "model", "intbl", a, overwrite=True)
if o == "-s":
configset(myModel.config, "model", "timestepsecs", a, overwrite=True)
dynModelFw.setupFramework()
dynModelFw._runInitial()
dynModelFw._runResume()
# dynModelFw._runDynamic(0,0)
dynModelFw._runDynamic(_firstTimeStep, _lastTimeStep)
dynModelFw._runSuspend()
dynModelFw._wf_shutdown()
if __name__ == "__main__":
main()<|fim▁end|> | ) # already in W m-2 s-1; set minimum of 0.01 to avoid numerical problems
Ta = self.TMIN + pcr.scalar(0.75) * (self.TMAX - self.TMIN) # T in degC |
<|file_name|>test_slurm_queues_getter_with_props.py<|end_file_name|><|fim▁begin|><|fim▁hole|>#!/usr/bin/env python
import pytest
from pyxenon_snippets import slurm_queues_getter_with_props
def test_slurm_queues_getter_with_props():
slurm_queues_getter_with_props.run_example()<|fim▁end|> | |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
SETTING_NAME = (
('conf_space', 'Confluence Space Key'),
('conf_page', 'Confluence Page'),
('jira_project', 'JIRA Project Code Name'),
('github_project', 'GitHub Project'),
)<|fim▁hole|> primary_key=True,
choices=SETTING_NAME)
content = models.CharField(max_length=255)
class Meta:
verbose_name_plural = "settings"<|fim▁end|> |
class AppSettings(models.Model):
name = models.CharField(max_length=50, |
<|file_name|>createWhiteningValues.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python2
import collections
import os
from loranode import RN2483Controller
# from ../_examplify.py import Examplify
import os
os.sys.path.append(os.path.dirname(os.path.abspath('.')))
from _examplify import Examplify
import lora, pmt, osmosdr
from gnuradio import gr, blocks
class ReceiveWhitening:
def __init__(self, sf = 7, output_file = './test_out.csv'):
self.target_freq = 868.1e6
self.sf = sf
self.samp_rate = 1e6
self.capture_freq = 868.0e6
self.offset = -(self.capture_freq - self.target_freq)
self.inputFile = './'
self.outputFile = output_file
self.tempFile = '/tmp/whitening_out'
self.tb = None<|fim▁hole|> self.inputFile = inputFile
if os.path.isfile(self.inputFile):
self.tb = gr.top_block()
self.file_source = blocks.file_source(gr.sizeof_gr_complex*1, self.inputFile, False) # Repeat input: True/False
self.lora_lora_receiver_0 = lora.lora_receiver(self.samp_rate, self.capture_freq, self.offset, self.sf, self.samp_rate)
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_gr_complex*1, self.samp_rate, True)
self.tb.connect( (self.file_source, 0), (self.blocks_throttle_0, 0))
self.tb.connect( (self.blocks_throttle_0, 0), (self.lora_lora_receiver_0, 0))
self.tb.run()
self.tb = None
if os.path.isfile(self.tempFile):
if os.path.isfile(self.outputFile):
inf = open(self.tempFile, 'r')
seq = inf.read()
# print(seq)
out = open(self.outputFile, 'a')
out.write(seq)
out.close()
inf.close()
else:
raise Exception("[ReceiveWhitening] Outputfile '" + self.outputFile + "' does not exist!")
else:
raise Exception("[ReceiveWhitening] Tempfile '" + self.tempFile + "' does not exist!")
else:
raise Exception("[ReceiveWhitening] Inputfile '" + self.inputFile + "' does not exist!")
if __name__ == '__main__':
ofile = '/tmp/tmp_whitening.cfile'
testset = [ (7, "4/6"), (7, "4/7"), (8, "4/5"), (12, "4/6"), (9, "4/5"), (10, "4/5"), (11, "4/5"), (6, "4/5")]
for settings in testset:
dataf = './test_out_SF{0:d}_CR{1:s}.csv'.format(settings[0], '-'.join(settings[1].split('/')))
out = open(dataf, 'a')
out.close()
examplifr = Examplify(settings[0], settings[1], gains = [32, 38, 38])
whitening = ReceiveWhitening(settings[0], dataf)
for i in range(8):
print("Sample {0:d} of 16".format(i))
examplifr.transmitToFile(['0' * 256] * 4, ofile)
whitening.captureSequence(ofile)
for i in range(8):
print("Sample {0:d} of 16".format(i + 8))
examplifr.transmitToFile(['0' * 256] * 8, ofile)
whitening.captureSequence(ofile)
examplifr = None
whitening = None<|fim▁end|> |
def captureSequence(self, inputFile): |
<|file_name|>CommitSequenceTest.java<|end_file_name|><|fim▁begin|>/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and<|fim▁hole|>
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import gobblin.commit.CommitSequence;
import gobblin.commit.FsRenameCommitStep;
import gobblin.configuration.ConfigurationKeys;
import gobblin.configuration.State;
import gobblin.runtime.JobState.DatasetState;
/**
* Tests for {@link CommitSequence}.
*
* @author Ziyang Liu
*/
@Test(groups = { "gobblin.runtime.commit" })
public class CommitSequenceTest {
private static final String ROOT_DIR = "commit-sequence-test";
private FileSystem fs;
private CommitSequence sequence;
@BeforeClass
public void setUp() throws IOException {
this.fs = FileSystem.getLocal(new Configuration());
this.fs.delete(new Path(ROOT_DIR), true);
Path storeRootDir = new Path(ROOT_DIR, "store");
Path dir1 = new Path(ROOT_DIR, "dir1");
Path dir2 = new Path(ROOT_DIR, "dir2");
this.fs.mkdirs(dir1);
this.fs.mkdirs(dir2);
Path src1 = new Path(dir1, "file1");
Path src2 = new Path(dir2, "file2");
Path dst1 = new Path(dir2, "file1");
Path dst2 = new Path(dir1, "file2");
this.fs.createNewFile(src1);
this.fs.createNewFile(src2);
DatasetState ds = new DatasetState("job-name", "job-id");
ds.setDatasetUrn("urn");
ds.setNoJobFailure();
State state = new State();
state.setProp(ConfigurationKeys.STATE_STORE_ROOT_DIR_KEY, storeRootDir.toString());
this.sequence = new CommitSequence.Builder().withJobName("testjob").withDatasetUrn("testurn")
.beginStep(FsRenameCommitStep.Builder.class).from(src1).to(dst1).withProps(state).endStep()
.beginStep(FsRenameCommitStep.Builder.class).from(src2).to(dst2).withProps(state).endStep()
.beginStep(DatasetStateCommitStep.Builder.class).withDatasetUrn("urn").withDatasetState(ds).withProps(state)
.endStep().build();
}
@AfterClass
public void tearDown() throws IOException {
this.fs.delete(new Path(ROOT_DIR), true);
}
@Test
public void testExecute() throws IOException {
this.sequence.execute();
Assert.assertTrue(this.fs.exists(new Path(ROOT_DIR, "dir1/file2")));
Assert.assertTrue(this.fs.exists(new Path(ROOT_DIR, "dir2/file1")));
Assert.assertTrue(this.fs.exists(new Path(ROOT_DIR, "store/job-name/urn-job-id.jst")));
Assert.assertTrue(this.fs.exists(new Path(ROOT_DIR, "store/job-name/urn-current.jst")));
}
}<|fim▁end|> | * limitations under the License.
*/
package gobblin.runtime.commit; |
<|file_name|>DataManager.java<|end_file_name|><|fim▁begin|>package dk.itu.pervasive.mobile.data;
import android.app.Activity;
import android.content.Context;
import android.database.Cursor;
import android.net.Uri;
import android.preference.PreferenceManager;
import android.provider.MediaStore;
import android.util.Log;
import android.widget.Toast;
import dk.itu.pervasive.mobile.R;
import java.io.FileOutputStream;
/**
* @author Tony Beltramelli www.tonybeltramelli.com
*/
public class DataManager
{
public static final String PREF_KEY_SAVE = "save";
public static final String PREF_KEY_USERNAME = "username";
public static final String PREF_KEY_SURFACE_ADDRESS = "surfaceAddress";
public static final String PREF_KEY_STICKER_ID = "stickerID";
private static DataManager _instance = null;<|fim▁hole|> private String _username = "";
private String _surfaceAddress = "";
private String _stickerID = "";
private DataManager()
{
}
public static DataManager getInstance()
{
if (_instance == null)
{
_instance = new DataManager();
}
return _instance;
}
public void saveData()
{
_username = PreferenceManager.getDefaultSharedPreferences(_context).getString(PREF_KEY_USERNAME, _context.getResources().getString(R.string.preference_user_name_default));
_surfaceAddress = PreferenceManager.getDefaultSharedPreferences(_context).getString(PREF_KEY_SURFACE_ADDRESS, _context.getResources().getString(R.string.preference_surface_address_default));
_stickerID = PreferenceManager.getDefaultSharedPreferences(_context).getString(PREF_KEY_STICKER_ID, _context.getResources().getString(R.string.preference_sticker_id_default));
Log.wtf("save data", _username + ", " + _surfaceAddress + ", " + _stickerID);
}
public String getPathFromUri(Uri uri)
{
String[] projection = { MediaStore.Images.Media.DATA };
Cursor cursor = _context.getContentResolver().query(uri, projection, null, null, null);
int column_index = cursor.getColumnIndexOrThrow(MediaStore.Images.Media.DATA);
cursor.moveToFirst();
return cursor.getString(column_index);
}
public void saveImage(String imageName, byte[] bytes)
{
FileOutputStream fos;
try
{
fos = _context.openFileOutput(imageName, Context.MODE_PRIVATE);
fos.write(bytes);
fos.close();
} catch (Exception e)
{
e.printStackTrace();
}
}
public void displayMessage(final String message)
{
_context.runOnUiThread(new Runnable() {
public void run() {
Toast.makeText(_context, message, Toast.LENGTH_SHORT).show();
}
});
}
public String getUsername()
{
return _username;
}
public String getSurfaceAddress()
{
return _surfaceAddress;
}
public String getStickerID()
{
return _stickerID;
}
public void setContext(Activity context)
{
_context = context;
saveData();
}
public Context getContext(){
return _context;
}
}<|fim▁end|> |
private Activity _context; |
<|file_name|>test_classify_documents.py<|end_file_name|><|fim▁begin|>from flask import json
from unittest.mock import patch, Mock
from urbansearch.gathering.indices_selector import IndicesSelector
from urbansearch.server.main import Server
from urbansearch.server import classify_documents
from urbansearch.server.classify_documents import _join_workers
from urbansearch.workers import Workers
s = Server(run=False)
@patch('urbansearch.server.classify_documents._join_workers')
@patch.object(Workers, 'run_classifying_workers')
@patch.object(IndicesSelector, 'run_workers')
def test_download_indices_for_url(mock_rcw, mock_rw, mock_jw):
with s.app.test_client() as c:
resp = c.get('/api/v1/classify_documents/log_only?directory=test')
assert mock_rcw.called
assert mock_rw.called
assert mock_jw.called
@patch('urbansearch.server.classify_documents._join_workers')
@patch.object(Workers, 'run_classifying_workers')
@patch.object(IndicesSelector, 'run_workers')
def test_classify_indices_to_db(mock_rcw, mock_rw, mock_jw):
with s.app.test_client() as c:
resp = c.get('/api/v1/classify_documents/to_database?directory=test')
assert mock_rcw.called
assert mock_rw.called
assert mock_jw.called
@patch('urbansearch.server.classify_documents._join_workers')
@patch('urbansearch.server.classify_documents.db_utils')
def test_classify_indices_to_db_no_connection(mock_db, mock_jw):
mock_db.connected_to_db.return_value = False
with s.app.test_client() as c:
resp = c.get('/api/v1/classify_documents/to_database?directory=test')
assert not mock_jw.called
@patch('urbansearch.server.classify_documents._join_file_workers')
@patch.object(Workers, 'run_classifying_workers')
@patch.object(Workers, 'run_read_files_worker')
def test_classify_textfiles_to_db(mock_rfw, mock_rw, mock_jw):
classify_documents.classify_textfiles_to_db(0, 'test')
assert mock_rfw.called
assert mock_rw.called
assert mock_jw.called
@patch('urbansearch.server.classify_documents._join_workers')
@patch('urbansearch.server.classify_documents.db_utils')
def test_classify_textfiles_to_db_no_connection(mock_db, mock_jw):
mock_db.connected_to_db.return_value = False
classify_documents.classify_textfiles_to_db(0, None)
assert not mock_jw.called
def test_join_workers():
producers = [Mock()]
cworker = Mock()
consumers = [Mock()]
classify_documents._join_workers(cworker, producers, consumers)
for p in producers:
assert p.join.called
assert cworker.set_producers_done.called
for c in consumers:
assert c.join.called
assert cworker.clear_producers_done.called<|fim▁hole|>
def test_join_file_workers():
producers = [Mock()]
cworker = Mock()
consumers = [Mock()]
classify_documents._join_file_workers(cworker, producers, consumers)
for p in producers:
assert p.join.called
assert cworker.set_file_producers_done.called
for c in consumers:
assert c.join.called
assert cworker.clear_file_producers_done.called<|fim▁end|> | |
<|file_name|>SimpleForm.js<|end_file_name|><|fim▁begin|>import React from 'react';
import PropTypes from 'prop-types';
import { reduxForm } from 'redux-form';
import { connect } from 'react-redux';
import compose from 'recompose/compose';
import getDefaultValues from './getDefaultValues';
import FormField from './FormField';
import Toolbar from './Toolbar';
const noop = () => {};
export const SimpleForm = ({ children, handleSubmit, invalid, record, resource, basePath, submitOnEnter }) => {
return (
<form onSubmit={ submitOnEnter ? handleSubmit : noop } className="simple-form">
<div style={{ padding: '0 1em 1em 1em' }}>
{React.Children.map(children, input => input && (
<div key={input.props.source} className={`aor-input-${input.props.source}`} style={input.props.style}>
<FormField input={input} resource={resource} record={record} basePath={basePath} />
</div>
))}
</div>
<Toolbar invalid={invalid} submitOnEnter={submitOnEnter} />
</form>
);
};
SimpleForm.propTypes = {
children: PropTypes.node,
defaultValue: PropTypes.oneOfType([
PropTypes.object,
PropTypes.func,
]),
handleSubmit: PropTypes.func,
invalid: PropTypes.bool,
record: PropTypes.object,
resource: PropTypes.string,
basePath: PropTypes.string,
validate: PropTypes.func,
submitOnEnter: PropTypes.bool,
};
SimpleForm.defaultProps = {
submitOnEnter: true,
};<|fim▁hole|> })),
reduxForm({
form: 'record-form',
enableReinitialize: true,
}),
);
export default enhance(SimpleForm);<|fim▁end|> |
const enhance = compose(
connect((state, props) => ({
initialValues: getDefaultValues(state, props), |
<|file_name|>lc0100_same_tree.py<|end_file_name|><|fim▁begin|>"""Leetcode 100. Same Tree
Easy
URL: https://leetcode.com/problems/same-tree/
Given two binary trees, write a function to check if they are the same or not.
Two binary trees are considered the same if they are structurally identical and
the nodes have the same value.
Example 1:
Input: 1 1
/ \ / \
2 3 2 3
[1,2,3], [1,2,3]
Output: true
Example 2:
Input: 1 1
/ \
2 2
[1,2], [1,null,2]
Output: false
Example 3:
Input: 1 1
/ \ / \
2 1 1 2
[1,2,1], [1,1,2]
Output: false
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class SolutionPreorderRecur(object):
def isSameTree(self, p, q):
"""
:type p: TreeNode
:type q: TreeNode
:rtype: bool<|fim▁hole|>
Apply recursive preorder traversal to check same tree.
Time complexity: O(n).
Space complexity: O(n).
"""
# Check if both root don't exist.
if not p and not q:
return True
# Check if just one of roots exits.
if not p or not q:
return False
# If both exist, check their values are the same.
if p.val != q.val:
return False
# Recursively check left/right subtrees.
return (self.isSameTree(p.left, q.left) and
self.isSameTree(p.right, q.right))
class SolutionPreorderIter(object):
def isSameTree(self, p, q):
"""
:type p: TreeNode
:type q: TreeNode
:rtype: bool
Apply iterative preorder traversal to check same tree.
Time complexity: O(n).
Space complexity: O(n).
"""
stack = [(p, q)]
while stack:
cur_p, cur_q = stack.pop()
# Check if both root don't exist, continue,
# since there may be other node pairs to check.
if not cur_p and not cur_q:
continue
# Check if just one of roots exits.
if not cur_p or not cur_q:
return False
# If both exist, check their values are the same.
if cur_p.val != cur_q.val:
return False
# Add root's right and then left to stack, since stack is FILO.
stack.append((cur_p.right, cur_q.right))
stack.append((cur_p.left, cur_q.left))
return True
def main():
# Input: 1 1
# / \ / \
# 2 3 2 3
# [1,2,3], [1,2,3]
# Output: true
p = TreeNode(1)
p.left = TreeNode(2)
p.right = TreeNode(3)
q = TreeNode(1)
q.left = TreeNode(2)
q.right = TreeNode(3)
print SolutionPreorderRecur().isSameTree(p, q)
print SolutionPreorderIter().isSameTree(p, q)
# Input: 1 1
# / \
# 2 2
# [1,2], [1,null,2]
# Output: false
p = TreeNode(1)
p.left = TreeNode(2)
q = TreeNode(1)
q.right = TreeNode(2)
print SolutionPreorderRecur().isSameTree(p, q)
print SolutionPreorderIter().isSameTree(p, q)
# Input: 1 1
# / \ / \
# 2 1 1 2
# [1,2,1], [1,1,2]
# Output: false
p = TreeNode(1)
p.left = TreeNode(2)
p.right = TreeNode(1)
q = TreeNode(1)
q.left = TreeNode(1)
q.right = TreeNode(2)
print SolutionPreorderRecur().isSameTree(p, q)
print SolutionPreorderIter().isSameTree(p, q)
# Input: [10,5,15], [10,5,null,null,15]
p = TreeNode(10)
p.left = TreeNode(5)
p.right = TreeNode(15)
q = TreeNode(10)
q.left = TreeNode(5)
q.left.right = TreeNode(15)
print SolutionPreorderRecur().isSameTree(p, q)
print SolutionPreorderIter().isSameTree(p, q)
if __name__ == '__main__':
main()<|fim▁end|> | |
<|file_name|>parser.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
COHORTE configuration file parser: converts a parsed configuration file to
beans
:author: Thomas Calmant
:license: Apache Software License 2.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Python standard library
import collections
import logging
import uuid
# iPOPO Decorators
from pelix.ipopo.decorators import ComponentFactory, Provides, Instantiate, \
Requires
# COHORTE constants
import cohorte
# ------------------------------------------------------------------------------
# Documentation strings format
__docformat__ = "restructuredtext en"
# Version
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# Component to be instantiated
Component = collections.namedtuple(
'Component', ('factory', 'name', 'properties'))
# Bundle to be installed
Bundle = collections.namedtuple(
'Bundle', ('name', 'filename', 'properties', 'version', 'optional'))
# Simplest configuration possible
BootConfiguration = collections.namedtuple(
'BootConfiguration', ('bundles', 'composition', 'properties',
'environment', 'boot_args'))
# Boot configuration + Isolate basic description
Isolate = collections.namedtuple(
'Isolate', BootConfiguration._fields + ('name', 'kind', 'node',
'level', 'sublevel'))
def _recursive_namedtuple_convert(data):
"""
Recursively converts the named tuples in the given object to dictionaries
:param data: An object in a named tuple or its children
:return: The converted object
"""
if isinstance(data, list):
# List
return [_recursive_namedtuple_convert(item) for item in data]
elif hasattr(data, '_asdict'):
# Named tuple
dict_value = dict(data._asdict())
for key, value in dict_value.items():
dict_value[key] = _recursive_namedtuple_convert(value)
return dict_value
else:
# Standard object
return data
# ------------------------------------------------------------------------------
@ComponentFactory('cohorte-config-parser-factory')
@Provides(cohorte.SERVICE_CONFIGURATION_READER)
@Requires('_reader', cohorte.SERVICE_FILE_READER)
@Instantiate('cohorte-config-parser')
class BootConfigParser(object):
"""
Boot configuration parser
"""
def __init__(self):
"""
Sets up the members
"""
# File reader
self._reader = None
# Loaded isolates configurations
self._isolates = None
@staticmethod
def _parse_bundle(json_object):
"""
Reads the given JSON object and returns its Bundle representation
:param json_object: A parsed JSON object
:return: A Bundle object
:raise KeyError: A mandatory parameter is missing
"""
# Use a copy of the properties
properties = {}
json_properties = json_object.get('properties')
if json_properties:
properties.update(json_properties)
return Bundle(name=json_object['name'],
filename=json_object.get('file'),
properties=properties,
version=json_object.get('version'),
optional=json_object.get('optional', False))
def _parse_bundles(self, bundles):
"""
Parses the bundles in the given list. Returns an empty list if the
given one is None or empty.
:param bundles: A list of bundles representations
:return: A list of Bundle objects
:raise KeyError: A mandatory parameter is missing
"""
if not bundles:
return []
return [self._parse_bundle(bundle) for bundle in bundles]
@staticmethod
def _parse_component(json_object):
"""
Reads the given JSON object and returns its Component representation
:param json_object: A parsed JSON object
:return: A Component object
:raise KeyError: A mandatory parameter is missing
"""
# Mandatory values
factory = json_object['factory']
# Computed name (if needed)
name = json_object.get('name', factory + '-instance')
# Use a copy of the properties
properties = {}
json_properties = json_object.get('properties')
if json_properties:
properties.update(json_properties)
return Component(factory=factory, name=name, properties=properties)
def _parse_components(self, components):
"""
Parses the components in the given list. Returns an empty list if the
given one is None or empty.
:param components: A list of components representations
:return: A list of Component objects
:raise KeyError: A mandatory parameter is missing
"""
if not components:
return []
return [self._parse_component(component) for component in components]
def _parse_isolate(self, json_object):
"""
Reads the given JSON object and returns its Isolate representation
:param json_object: A parsed JSON object
:return: An Isolate object
:raise KeyError: A mandatory parameter is missing
"""
# Reuse the boot parser
boot_config = self.load_boot_dict(json_object)
return Isolate(name=json_object['name'],
kind=json_object['kind'],
level=json_object['level'],
sublevel=json_object['sublevel'],
# Reuse boot configuration values
**boot_config._asdict())
def _prepare_configuration(self, uid, name, kind,
bundles=None, composition=None,
base_configuration=None):
"""
Prepares and returns a configuration dictionary to be stored in the
configuration broker, to start an isolate of the given kind.
:param uid: The isolate UID
:param name: The isolate name
:param kind: The kind of isolate to boot
:param bundles: Extra bundles to install
:param composition: Extra components to instantiate
:param base_configuration: Base configuration (to override)
:return: A configuration dictionary
(updated base_configuration if given)
:raise IOError: Unknown/unaccessible kind of isolate
:raise KeyError: A parameter is missing in the configuration files
:raise ValueError: Error reading the configuration
"""
if isinstance(base_configuration, dict):
configuration = base_configuration
else:
configuration = {}
# Set up isolate properties
configuration['uid'] = uid \
or configuration.get('custom_uid') or str(uuid.uuid4())
configuration['name'] = name
configuration['kind'] = kind
# Boot configuration for this kind
new_boot = configuration.setdefault('boot', {})
new_boot.update(_recursive_namedtuple_convert(self.load_boot(kind)))
# Add bundles (or an empty list)
if bundles:
new_bundles = configuration.setdefault('bundles', [])
new_bundles.extend(_recursive_namedtuple_convert(
[self.normalize_bundle(bundle) for bundle in bundles]))
# Add components (or an empty list)
if composition:
new_compo = configuration.setdefault('composition', [])
new_compo.extend(_recursive_namedtuple_convert(composition))
# Return the configuration dictionary
return configuration
@staticmethod
def normalize_bundle(bundle):
"""
Make a Bundle object from the given Bundle-like object attributes,
using default values when necessary.
:param bundle: A Bundle-like object
:return: A Bundle object
:raise AttributeError: A mandatory attribute is missing
:raise ValueError: Invalid attribute value
"""
if isinstance(bundle, Bundle):
# Already a bundle
return bundle
# Bundle name is mandatory
name = bundle.name
if not name:
raise ValueError("A bundle must have a name: {0}".format(bundle))
# Get the filename
for fileattr in ('filename', 'file'):
filename = getattr(bundle, fileattr, None)
if filename:
break
# Normalize bundle properties
properties = getattr(bundle, 'properties', {})
if not isinstance(properties, dict):
properties = {}
# Normalize bundle version
version = getattr(bundle, 'version', None)
if version is not None:
version = str(version)
<|fim▁hole|>
def load_boot(self, kind):
"""
Loads the boot configuration for the given kind of isolate, or returns
the one in the cache.
:param kind: The kind of isolate to boot
:return: The loaded BootConfiguration object
:raise IOError: Unknown/unaccessible kind of isolate
:raise KeyError: A parameter is missing in the configuration files
:raise ValueError: Error reading the configuration
"""
# Prepare & store the bean representation
return self.load_boot_dict(self.load_conf_raw('boot', kind))
def load_conf_raw(self, level, kind):
"""
Loads the boot configuration for the given kind of isolate, or returns
the one in the cache.
:param level: The level of configuration (boot, java, python)
:param kind: The kind of isolate to boot
:return: The loaded BootConfiguration object
:raise IOError: Unknown/unaccessible kind of isolate
:raise KeyError: A parameter is missing in the configuration files
:raise ValueError: Error reading the configuration
"""
# Load the boot file
return self.read('{0}-{1}.js'.format(level, kind))
def load_boot_dict(self, dict_config):
"""
Parses a boot configuration from the given dictionary
:param dict_config: A configuration dictionary
:return: The parsed BootConfiguration object
:raise KeyError: A parameter is missing in the configuration files
:raise ValueError: Error reading the configuration
"""
# Use a copy of environment
environment = {}
json_env = dict_config.get('environment')
if json_env:
environment.update(json_env)
# Parse the properties
properties = {}
dict_properties = dict_config.get('properties')
if dict_properties:
properties.update(dict_properties)
# Prepare the bean representation
bundles = self._parse_bundles(dict_config.get('bundles'))
composition = self._parse_components(dict_config.get('composition'))
return BootConfiguration(bundles=bundles,
composition=composition,
boot_args=dict_config.get('boot_args'),
environment=environment,
properties=properties)
def prepare_isolate(self, uid, name, kind, level, sublevel,
bundles=None, composition=None):
"""
Prepares and returns a configuration dictionary to be stored in the
configuration broker, to start an isolate of the given kind.
:param uid: The isolate UID
:param name: The isolate name
:param kind: The kind of isolate to boot (pelix, osgi, ...)
:param level: The level of configuration (boot, java, python, ...)
:param sublevel: Category of configuration (monitor, isolate, ...)
:param bundles: Extra bundles to install
:param composition: Extra components to instantiate
:return: A configuration dictionary
:raise IOError: Unknown/unaccessible kind of isolate
:raise KeyError: A parameter is missing in the configuration files
:raise ValueError: Error reading the configuration
"""
# Load the isolate model file
configuration = self.load_conf_raw(level, sublevel)
try:
# Try to load the isolate-specific configuration
# without logging "file not found" errors
isolate_conf = self.read(name + ".js", False)
except IOError:
# Ignore I/O errors (file not found)
# Propagate ValueError (parsing errors)
pass
else:
# Merge the configurations: this method considers that the first
# parameter has priority on the second
configuration = self._reader.merge_object(isolate_conf,
configuration)
# Extend with the boot configuration
return self._prepare_configuration(uid, name, kind,
bundles, composition, configuration)
def read(self, filename, reader_log_error=True):
"""
Reads the content of the given file, without parsing it.
:param filename: A configuration file name
:param reader_log_error: If True, the reader will log I/O errors
:return: The dictionary read from the file
"""
return self._reader.load_file(filename, 'conf',
log_error=reader_log_error)<|fim▁end|> | return Bundle(name, filename, properties, version,
getattr(bundle, 'optional', False)) |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># portage.py -- core Portage functionality
# Copyright 1998-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
VERSION="HEAD"
# ===========================================================================
# START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
# ===========================================================================
try:
import sys
import errno
if not hasattr(errno, 'ESTALE'):
# ESTALE may not be defined on some systems, such as interix.
errno.ESTALE = -1
import re
import types
import platform
# Temporarily delete these imports, to ensure that only the
# wrapped versions are imported by portage internals.
import os
del os
import shutil
del shutil
except ImportError as e:
sys.stderr.write("\n\n")
sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
sys.stderr.write(" "+str(e)+"\n\n");
raise
try:
import portage.proxy.lazyimport
import portage.proxy as proxy
proxy.lazyimport.lazyimport(globals(),
'portage.cache.cache_errors:CacheError',
'portage.checksum',
'portage.checksum:perform_checksum,perform_md5,prelink_capable',
'portage.cvstree',
'portage.data',
'portage.data:lchown,ostype,portage_gid,portage_uid,secpass,' + \
'uid,userland,userpriv_groups,wheelgid',
'portage.dbapi',
'portage.dbapi.bintree:bindbapi,binarytree',
'portage.dbapi.cpv_expand:cpv_expand',
'portage.dbapi.dep_expand:dep_expand',
'portage.dbapi.porttree:close_portdbapi_caches,FetchlistDict,' + \
'portagetree,portdbapi',
'portage.dbapi.vartree:dblink,merge,unmerge,vardbapi,vartree',
'portage.dbapi.virtual:fakedbapi',
'portage.dep',
'portage.dep:best_match_to_list,dep_getcpv,dep_getkey,' + \
'flatten,get_operator,isjustname,isspecific,isvalidatom,' + \
'match_from_list,match_to_list',
'portage.dep.dep_check:dep_check,dep_eval,dep_wordreduce,dep_zapdeps',
'portage.eclass_cache',
'portage.exception',
'portage.getbinpkg',
'portage.locks',
'portage.locks:lockdir,lockfile,unlockdir,unlockfile',
'portage.mail',
'portage.manifest:Manifest',
'portage.output',
'portage.output:bold,colorize',
'portage.package.ebuild.doebuild:doebuild,' + \
'doebuild_environment,spawn,spawnebuild',
'portage.package.ebuild.config:autouse,best_from_dict,' + \
'check_config_instance,config',
'portage.package.ebuild.deprecated_profile_check:' + \
'deprecated_profile_check',
'portage.package.ebuild.digestcheck:digestcheck',
'portage.package.ebuild.digestgen:digestgen',
'portage.package.ebuild.fetch:fetch',
'portage.package.ebuild.getmaskingreason:getmaskingreason',
'portage.package.ebuild.getmaskingstatus:getmaskingstatus',
'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
'portage.process',
'portage.process:atexit_register,run_exitfuncs',
'portage.update:dep_transform,fixdbentries,grab_updates,' + \
'parse_updates,update_config_files,update_dbentries,' + \
'update_dbentry',
'portage.util',
'portage.util:atomic_ofstream,apply_secpass_permissions,' + \
'apply_recursive_permissions,dump_traceback,getconfig,' + \
'grabdict,grabdict_package,grabfile,grabfile_package,' + \
'map_dictlist_vals,new_protect_filename,normalize_path,' + \
'pickle_read,pickle_write,stack_dictlist,stack_dicts,' + \
'stack_lists,unique_array,varexpand,writedict,writemsg,' + \
'writemsg_stdout,write_atomic',
'portage.util.digraph:digraph',
'portage.util.env_update:env_update',
'portage.util.ExtractKernelVersion:ExtractKernelVersion',
'portage.util.listdir:cacheddir,listdir',
'portage.util.movefile:movefile',
'portage.util.mtimedb:MtimeDB',
'portage.versions',
'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,' + \
'cpv_getkey@getCPFromCPV,endversion_keys,' + \
'suffix_value@endversion,pkgcmp,pkgsplit,vercmp,ververify',
'portage.xpak',
'subprocess',
'time',
)
try:
from collections import OrderedDict
except ImportError:
proxy.lazyimport.lazyimport(globals(),
'portage.cache.mappings:OrderedDict')
import portage.const
from portage.const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
INCREMENTALS, EAPI, MISC_SH_BINARY, REPO_NAME_LOC, REPO_NAME_FILE
except ImportError as e:
sys.stderr.write("\n\n")
sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
sys.stderr.write("!!! a recovery of portage.\n")
sys.stderr.write(" "+str(e)+"\n\n")
raise
if sys.hexversion >= 0x3000000:
basestring = str
long = int
# We use utf_8 encoding everywhere. Previously, we used
# sys.getfilesystemencoding() for the 'merge' encoding, but that had
# various problems:
#
# 1) If the locale is ever changed then it can cause orphan files due
# to changed character set translation.
#
# 2) Ebuilds typically install files with utf_8 encoded file names,
# and then portage would be forced to rename those files to match
# sys.getfilesystemencoding(), possibly breaking things.
#
# 3) Automatic translation between encodings can lead to nonsensical
# file names when the source encoding is unknown by portage.
#
# 4) It's inconvenient for ebuilds to convert the encodings of file
# names to match the current locale, and upstreams typically encode
# file names with utf_8 encoding.
#
# So, instead of relying on sys.getfilesystemencoding(), we avoid the above
# problems by using a constant utf_8 'merge' encoding for all locales, as
# discussed in bug #382199 and bug #381509.
_encodings = {
'content' : 'utf_8',
'fs' : 'utf_8',
'merge' : 'utf_8',
'repo.content' : 'utf_8',
'stdio' : 'utf_8',
}<|fim▁hole|>
if sys.hexversion >= 0x3000000:
def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
if isinstance(s, str):
s = s.encode(encoding, errors)
return s
def _unicode_decode(s, encoding=_encodings['content'], errors='replace'):
if isinstance(s, bytes):
s = str(s, encoding=encoding, errors=errors)
return s
else:
def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
if isinstance(s, unicode):
s = s.encode(encoding, errors)
return s
def _unicode_decode(s, encoding=_encodings['content'], errors='replace'):
if isinstance(s, bytes):
s = unicode(s, encoding=encoding, errors=errors)
return s
class _unicode_func_wrapper(object):
"""
Wraps a function, converts arguments from unicode to bytes,
and return values to unicode from bytes. Function calls
will raise UnicodeEncodeError if an argument fails to be
encoded with the required encoding. Return values that
are single strings are decoded with errors='replace'. Return
values that are lists of strings are decoded with errors='strict'
and elements that fail to be decoded are omitted from the returned
list.
"""
__slots__ = ('_func', '_encoding')
def __init__(self, func, encoding=_encodings['fs']):
self._func = func
self._encoding = encoding
def __call__(self, *args, **kwargs):
encoding = self._encoding
wrapped_args = [_unicode_encode(x, encoding=encoding, errors='strict')
for x in args]
if kwargs:
wrapped_kwargs = dict(
(k, _unicode_encode(v, encoding=encoding, errors='strict'))
for k, v in kwargs.items())
else:
wrapped_kwargs = {}
rval = self._func(*wrapped_args, **wrapped_kwargs)
# Don't use isinstance() since we don't want to convert subclasses
# of tuple such as posix.stat_result in Python >=3.2.
if rval.__class__ in (list, tuple):
decoded_rval = []
for x in rval:
try:
x = _unicode_decode(x, encoding=encoding, errors='strict')
except UnicodeDecodeError:
pass
else:
decoded_rval.append(x)
if isinstance(rval, tuple):
rval = tuple(decoded_rval)
else:
rval = decoded_rval
else:
rval = _unicode_decode(rval, encoding=encoding, errors='replace')
return rval
class _unicode_module_wrapper(object):
"""
Wraps a module and wraps all functions with _unicode_func_wrapper.
"""
__slots__ = ('_mod', '_encoding', '_overrides', '_cache')
def __init__(self, mod, encoding=_encodings['fs'], overrides=None, cache=True):
object.__setattr__(self, '_mod', mod)
object.__setattr__(self, '_encoding', encoding)
object.__setattr__(self, '_overrides', overrides)
if cache:
cache = {}
else:
cache = None
object.__setattr__(self, '_cache', cache)
def __getattribute__(self, attr):
cache = object.__getattribute__(self, '_cache')
if cache is not None:
result = cache.get(attr)
if result is not None:
return result
result = getattr(object.__getattribute__(self, '_mod'), attr)
encoding = object.__getattribute__(self, '_encoding')
overrides = object.__getattribute__(self, '_overrides')
override = None
if overrides is not None:
override = overrides.get(id(result))
if override is not None:
result = override
elif isinstance(result, type):
pass
elif type(result) is types.ModuleType:
result = _unicode_module_wrapper(result,
encoding=encoding, overrides=overrides)
elif hasattr(result, '__call__'):
result = _unicode_func_wrapper(result, encoding=encoding)
if cache is not None:
cache[attr] = result
return result
import os as _os
_os_overrides = {
id(_os.fdopen) : _os.fdopen,
id(_os.mkfifo) : _os.mkfifo,
id(_os.popen) : _os.popen,
id(_os.read) : _os.read,
id(_os.system) : _os.system,
}
if hasattr(_os, 'statvfs'):
_os_overrides[id(_os.statvfs)] = _os.statvfs
os = _unicode_module_wrapper(_os, overrides=_os_overrides,
encoding=_encodings['fs'])
_os_merge = _unicode_module_wrapper(_os,
encoding=_encodings['merge'], overrides=_os_overrides)
import shutil as _shutil
shutil = _unicode_module_wrapper(_shutil, encoding=_encodings['fs'])
# Imports below this point rely on the above unicode wrapper definitions.
try:
__import__('selinux')
import portage._selinux
selinux = _unicode_module_wrapper(_selinux,
encoding=_encodings['fs'])
_selinux_merge = _unicode_module_wrapper(_selinux,
encoding=_encodings['merge'])
except (ImportError, OSError) as e:
if isinstance(e, OSError):
sys.stderr.write("!!! SELinux not loaded: %s\n" % str(e))
del e
_selinux = None
selinux = None
_selinux_merge = None
# ===========================================================================
# END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
# ===========================================================================
_python_interpreter = os.path.realpath(sys.executable)
_bin_path = PORTAGE_BIN_PATH
_pym_path = PORTAGE_PYM_PATH
def _shell_quote(s):
"""
Quote a string in double-quotes and use backslashes to
escape any backslashes, double-quotes, dollar signs, or
backquotes in the string.
"""
for letter in "\\\"$`":
if letter in s:
s = s.replace(letter, "\\" + letter)
return "\"%s\"" % s
bsd_chflags = None
if platform.system() in ('FreeBSD',):
class bsd_chflags(object):
@classmethod
def chflags(cls, path, flags, opts=""):
cmd = ['chflags']
if opts:
cmd.append(opts)
cmd.append('%o' % (flags,))
cmd.append(path)
encoding = _encodings['fs']
if sys.hexversion < 0x3000000 or sys.hexversion >= 0x3020000:
# Python 3.1 does not support bytes in Popen args.
cmd = [_unicode_encode(x, encoding=encoding, errors='strict')
for x in cmd]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = proc.communicate()[0]
status = proc.wait()
if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
return
# Try to generate an ENOENT error if appropriate.
if 'h' in opts:
_os_merge.lstat(path)
else:
_os_merge.stat(path)
# Make sure the binary exists.
if not portage.process.find_binary('chflags'):
raise portage.exception.CommandNotFound('chflags')
# Now we're not sure exactly why it failed or what
# the real errno was, so just report EPERM.
output = _unicode_decode(output, encoding=encoding)
e = OSError(errno.EPERM, output)
e.errno = errno.EPERM
e.filename = path
e.message = output
raise e
@classmethod
def lchflags(cls, path, flags):
return cls.chflags(path, flags, opts='-h')
def load_mod(name):
modname = ".".join(name.split(".")[:-1])
mod = __import__(modname)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def getcwd():
"this fixes situations where the current directory doesn't exist"
try:
return os.getcwd()
except OSError: #dir doesn't exist
os.chdir("/")
return "/"
getcwd()
def abssymlink(symlink, target=None):
"This reads symlinks, resolving the relative symlinks, and returning the absolute."
if target is not None:
mylink = target
else:
mylink = os.readlink(symlink)
if mylink[0] != '/':
mydir=os.path.dirname(symlink)
mylink=mydir+"/"+mylink
return os.path.normpath(mylink)
_doebuild_manifest_exempt_depend = 0
_testing_eapis = frozenset(["4-python", "4-slot-abi", "5-progress"])
_deprecated_eapis = frozenset(["4_pre1", "3_pre2", "3_pre1", "5_pre1", "5_pre2"])
def _eapi_is_deprecated(eapi):
return eapi in _deprecated_eapis
def eapi_is_supported(eapi):
if not isinstance(eapi, basestring):
# Only call str() when necessary since with python2 it
# can trigger UnicodeEncodeError if EAPI is corrupt.
eapi = str(eapi)
eapi = eapi.strip()
if _eapi_is_deprecated(eapi):
return True
if eapi in _testing_eapis:
return True
try:
eapi = int(eapi)
except ValueError:
eapi = -1
if eapi < 0:
return False
return eapi <= portage.const.EAPI
# This pattern is specified by PMS section 7.3.1.
_pms_eapi_re = re.compile(r"^[ \t]*EAPI=(['\"]?)([A-Za-z0-9+_.-]*)\1[ \t]*([ \t]#.*)?$")
_comment_or_blank_line = re.compile(r"^\s*(#.*)?$")
def _parse_eapi_ebuild_head(f):
eapi = None
eapi_lineno = None
lineno = 0
for line in f:
lineno += 1
m = _comment_or_blank_line.match(line)
if m is None:
eapi_lineno = lineno
m = _pms_eapi_re.match(line)
if m is not None:
eapi = m.group(2)
break
return (eapi, eapi_lineno)
def _movefile(src, dest, **kwargs):
"""Calls movefile and raises a PortageException if an error occurs."""
if movefile(src, dest, **kwargs) is None:
raise portage.exception.PortageException(
"mv '%s' '%s'" % (src, dest))
auxdbkeys = (
'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
'KEYWORDS', 'INHERITED', 'IUSE', 'REQUIRED_USE',
'PDEPEND', 'PROVIDE', 'EAPI',
'PROPERTIES', 'DEFINED_PHASES', 'UNUSED_05', 'UNUSED_04',
'UNUSED_03', 'UNUSED_02', 'UNUSED_01',
)
auxdbkeylen=len(auxdbkeys)
def portageexit():
close_portdbapi_caches()
class _trees_dict(dict):
__slots__ = ('_running_eroot', '_target_eroot',)
def __init__(self, *pargs, **kargs):
dict.__init__(self, *pargs, **kargs)
self._running_eroot = None
self._target_eroot = None
def create_trees(config_root=None, target_root=None, trees=None, env=None,
eprefix=None):
if trees is not None:
# clean up any existing portdbapi instances
for myroot in trees:
portdb = trees[myroot]["porttree"].dbapi
portdb.close_caches()
portdbapi.portdbapi_instances.remove(portdb)
del trees[myroot]["porttree"], myroot, portdb
if trees is None:
trees = _trees_dict()
elif not isinstance(trees, _trees_dict):
# caller passed a normal dict or something,
# but we need a _trees_dict instance
trees = _trees_dict(trees)
if env is None:
env = os.environ
settings = config(config_root=config_root, target_root=target_root,
env=env, eprefix=eprefix)
settings.lock()
trees._target_eroot = settings['EROOT']
myroots = [(settings['EROOT'], settings)]
if settings["ROOT"] == "/":
trees._running_eroot = trees._target_eroot
else:
# When ROOT != "/" we only want overrides from the calling
# environment to apply to the config that's associated
# with ROOT != "/", so pass a nearly empty dict for the env parameter.
clean_env = {}
for k in ('PATH', 'PORTAGE_GRPNAME', 'PORTAGE_USERNAME',
'SSH_AGENT_PID', 'SSH_AUTH_SOCK', 'TERM',
'ftp_proxy', 'http_proxy', 'no_proxy',
'__PORTAGE_TEST_HARDLINK_LOCKS'):
v = settings.get(k)
if v is not None:
clean_env[k] = v
settings = config(config_root=None, target_root="/",
env=clean_env, eprefix=eprefix)
settings.lock()
trees._running_eroot = settings['EROOT']
myroots.append((settings['EROOT'], settings))
for myroot, mysettings in myroots:
trees[myroot] = portage.util.LazyItemsDict(trees.get(myroot, {}))
trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals)
trees[myroot].addLazySingleton(
"vartree", vartree, categories=mysettings.categories,
settings=mysettings)
trees[myroot].addLazySingleton("porttree",
portagetree, settings=mysettings)
trees[myroot].addLazySingleton("bintree",
binarytree, pkgdir=mysettings["PKGDIR"], settings=mysettings)
return trees
if VERSION == 'HEAD':
class _LazyVersion(proxy.objectproxy.ObjectProxy):
def _get_target(self):
global VERSION
if VERSION is not self:
return VERSION
if os.path.isdir(os.path.join(PORTAGE_BASE_PATH, '.git')):
encoding = _encodings['fs']
cmd = [BASH_BINARY, "-c", ("cd %s ; git describe --tags || exit $? ; " + \
"if [ -n \"`git diff-index --name-only --diff-filter=M HEAD`\" ] ; " + \
"then echo modified ; git rev-list --format=%%ct -n 1 HEAD ; fi ; " + \
"exit 0") % _shell_quote(PORTAGE_BASE_PATH)]
if sys.hexversion < 0x3000000 or sys.hexversion >= 0x3020000:
# Python 3.1 does not support bytes in Popen args.
cmd = [_unicode_encode(x, encoding=encoding, errors='strict')
for x in cmd]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = _unicode_decode(proc.communicate()[0], encoding=encoding)
status = proc.wait()
if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
output_lines = output.splitlines()
if output_lines:
version_split = output_lines[0].split('-')
if version_split:
VERSION = version_split[0].lstrip('v')
patchlevel = False
if len(version_split) > 1:
patchlevel = True
VERSION = "%s_p%s" %(VERSION, version_split[1])
if len(output_lines) > 1 and output_lines[1] == 'modified':
head_timestamp = None
if len(output_lines) > 3:
try:
head_timestamp = long(output_lines[3])
except ValueError:
pass
timestamp = long(time.time())
if head_timestamp is not None and timestamp > head_timestamp:
timestamp = timestamp - head_timestamp
if not patchlevel:
VERSION = "%s_p0" % (VERSION,)
VERSION = "%s_p%d" % (VERSION, timestamp)
return VERSION
VERSION = 'HEAD'
return VERSION
VERSION = _LazyVersion()
if "_legacy_globals_constructed" in globals():
# The module has been reloaded, so perform any relevant cleanup
# and prevent memory leaks.
if "db" in _legacy_globals_constructed:
try:
db
except NameError:
pass
else:
if isinstance(db, dict) and db:
for _x in db.values():
try:
if "porttree" in _x.lazy_items:
continue
except (AttributeError, TypeError):
continue
try:
_x = _x["porttree"].dbapi
except (AttributeError, KeyError):
continue
if not isinstance(_x, portdbapi):
continue
_x.close_caches()
try:
portdbapi.portdbapi_instances.remove(_x)
except ValueError:
pass
del _x
class _LegacyGlobalProxy(proxy.objectproxy.ObjectProxy):
__slots__ = ('_name',)
def __init__(self, name):
proxy.objectproxy.ObjectProxy.__init__(self)
object.__setattr__(self, '_name', name)
def _get_target(self):
name = object.__getattribute__(self, '_name')
from portage._legacy_globals import _get_legacy_global
return _get_legacy_global(name)
_legacy_global_var_names = ("archlist", "db", "features",
"groups", "mtimedb", "mtimedbfile", "pkglines",
"portdb", "profiledir", "root", "selinux_enabled",
"settings", "thirdpartymirrors")
for k in _legacy_global_var_names:
globals()[k] = _LegacyGlobalProxy(k)
del k
_legacy_globals_constructed = set()
def _disable_legacy_globals():
"""
This deletes the ObjectProxy instances that are used
for lazy initialization of legacy global variables.
The purpose of deleting them is to prevent new code
from referencing these deprecated variables.
"""
global _legacy_global_var_names
for k in _legacy_global_var_names:
globals().pop(k, None)<|fim▁end|> | |
<|file_name|>envelope-square.d.ts<|end_file_name|><|fim▁begin|>// TypeScript Version: 2.1
import * as React from 'react';<|fim▁hole|><|fim▁end|> | import { IconBaseProps } from 'react-icon-base';
export default class FaEnvelopeSquare extends React.Component<IconBaseProps, any> { } |
<|file_name|>manifestation.rs<|end_file_name|><|fim▁begin|>//! Maintains a Rust installation by installing individual Rust
//! platform components from a distribution server.
use config::Config;
use manifest::{Component, Manifest, TargetedPackage};
use dist::{download_and_check, DownloadCfg, TargetTriple, DEFAULT_DIST_SERVER, File};
use component::{Components, Transaction, TarGzPackage, Package};
use temp;
use errors::*;
use notifications::*;
use rustup_utils::utils;
use prefix::InstallPrefix;
use std::path::Path;
pub const DIST_MANIFEST: &'static str = "multirust-channel-manifest.toml";
pub const CONFIG_FILE: &'static str = "multirust-config.toml";
#[derive(Debug)]
pub struct Manifestation {
installation: Components,
target_triple: TargetTriple
}
#[derive(Debug)]
pub struct Changes {
pub add_extensions: Vec<Component>,
pub remove_extensions: Vec<Component>,
}
impl Changes {
pub fn none() -> Self {
Changes {
add_extensions: Vec::new(),
remove_extensions: Vec::new(),
}
}
}
#[derive(PartialEq, Debug)]
pub enum UpdateStatus { Changed, Unchanged }
impl Manifestation {
/// Open the install prefix for updates from a distribution
/// channel. The install prefix directory does not need to exist;
/// it will be created as needed. If there's an existing install
/// then the rust-install installation format will be verified. A
/// bad installer version is the only reason this will fail.
pub fn open(prefix: InstallPrefix, triple: TargetTriple) -> Result<Self> {
// TODO: validate the triple with the existing install as well
// as the metadata format of the existing install
Ok(Manifestation {
installation: try!(Components::open(prefix)),
target_triple: triple,
})
}
/// Install or update from a given channel manifest, while
/// selecting extension components to add or remove.
///
/// `update` takes a manifest describing a release of Rust (which
/// may be either a freshly-downloaded one, or the same one used
/// for the previous install), as well as lists off extension
/// components to add and remove.
/// From that it schedules a list of components to uninstall and
/// to uninstall to bring the installation up to date. It
/// downloads the components' packages. Then in a Transaction
/// uninstalls old packages and installs new packages, writes the
/// distribution manifest to "rustlib/rustup-dist.toml" and a
/// configuration containing the component name-target pairs to
/// "rustlib/rustup-config.toml".
pub fn update(&self,
new_manifest: &Manifest,
changes: Changes,
download_cfg: &DownloadCfg,
notify_handler: &Fn(Notification)) -> Result<UpdateStatus> {
// Some vars we're going to need a few times
let temp_cfg = download_cfg.temp_cfg;
let prefix = self.installation.prefix();
let ref rel_installed_manifest_path = prefix.rel_manifest_file(DIST_MANIFEST);
let ref installed_manifest_path = prefix.path().join(rel_installed_manifest_path);
let rust_package = try!(new_manifest.get_package("rust"));
let rust_target_package = try!(rust_package.get_target(Some(&self.target_triple)));
// Load the previous dist manifest
let ref old_manifest = try!(self.load_manifest());
// Load the configuration and list of installed components.
let ref config = try!(self.read_config());
// Create the lists of components needed for installation
let component_lists = try!(build_update_component_lists(new_manifest, old_manifest, config,
changes, &rust_target_package,
notify_handler));
let (components_to_uninstall,
components_to_install,
final_component_list) = component_lists;
if components_to_uninstall.is_empty() && components_to_install.is_empty() {
return Ok(UpdateStatus::Unchanged);
}
// Validate that the requested components are available
let unavailable_components: Vec<Component> = components_to_install.iter().filter(|c| {
use manifest::*;
let pkg: Option<&Package> = new_manifest.get_package(&c.pkg).ok();
let target_pkg: Option<&TargetedPackage> = pkg.and_then(|p| p.get_target(c.target.as_ref()).ok());
target_pkg.map(|tp| tp.available) != Some(true)
}).cloned().collect();
if !unavailable_components.is_empty() {
return Err(ErrorKind::RequestedComponentsUnavailable(unavailable_components).into());
}
// Map components to urls and hashes
let mut components_urls_and_hashes: Vec<(Component, String, String)> = Vec::new();
for component in components_to_install {
let package = try!(new_manifest.get_package(&component.pkg));
let target_package = try!(package.get_target(component.target.as_ref()));
let c_u_h = (component, target_package.url.clone(), target_package.hash.clone());
components_urls_and_hashes.push(c_u_h);
}
let altered = temp_cfg.dist_server != DEFAULT_DIST_SERVER;
// Download component packages and validate hashes
let mut things_to_install: Vec<(Component, File)> = Vec::new();
let mut things_downloaded: Vec<String> = Vec::new();
for (component, url, hash) in components_urls_and_hashes {
notify_handler(Notification::DownloadingComponent(&component.pkg,
&self.target_triple,
component.target.as_ref()));
let url = if altered {<|fim▁hole|> url.replace(DEFAULT_DIST_SERVER, temp_cfg.dist_server.as_str())
} else {
url
};
let url_url = try!(utils::parse_url(&url));
let dowloaded_file = try!(download_cfg.download(&url_url, &hash, ¬ify_handler).chain_err(|| {
ErrorKind::ComponentDownloadFailed(component.clone())
}));
things_downloaded.push(hash);
things_to_install.push((component, dowloaded_file));
}
// Begin transaction
let mut tx = Transaction::new(prefix.clone(), temp_cfg, notify_handler);
// If the previous installation was from a v1 manifest we need
// to uninstall it first.
tx = try!(self.maybe_handle_v2_upgrade(config, tx));
// Uninstall components
for component in components_to_uninstall {
tx = try!(self.uninstall_component(&component, tx, notify_handler.clone()));
}
// Install components
for (component, installer_file) in things_to_install {
notify_handler(Notification::InstallingComponent(&component.pkg,
&self.target_triple,
component.target.as_ref()));
let package = try!(TarGzPackage::new_file(&installer_file, temp_cfg));
// For historical reasons, the rust-installer component
// names are not the same as the dist manifest component
// names. Some are just the component name some are the
// component name plus the target triple.
let ref name = component.name();
let ref short_name = format!("{}", component.pkg);
// If the package doesn't contain the component that the
// manifest says it does the somebody must be playing a joke on us.
if !package.contains(name, Some(short_name)) {
return Err(ErrorKind::CorruptComponent(component.pkg.clone()).into());
}
tx = try!(package.install(&self.installation,
name, Some(short_name),
tx));
}
// Install new distribution manifest
let ref new_manifest_str = new_manifest.clone().stringify();
try!(tx.modify_file(rel_installed_manifest_path.to_owned()));
try!(utils::write_file("manifest", installed_manifest_path, new_manifest_str));
// Write configuration.
//
// NB: This configuration is mostly for keeping track of the name/target pairs
// that identify installed components. The rust-installer metadata maintained by
// `Components` *also* tracks what is installed, but it only tracks names, not
// name/target. Needs to be fixed in rust-installer.
let mut config = Config::new();
config.components = final_component_list;
let ref config_str = config.stringify();
let ref rel_config_path = prefix.rel_manifest_file(CONFIG_FILE);
let ref config_path = prefix.path().join(rel_config_path);
try!(tx.modify_file(rel_config_path.to_owned()));
try!(utils::write_file("dist config", config_path, config_str));
// End transaction
tx.commit();
try!(download_cfg.clean(&things_downloaded));
Ok(UpdateStatus::Changed)
}
pub fn uninstall(&self, temp_cfg: &temp::Cfg, notify_handler: &Fn(Notification)) -> Result<()> {
let prefix = self.installation.prefix();
let mut tx = Transaction::new(prefix.clone(), temp_cfg, notify_handler);
// Read configuration and delete it
let rel_config_path = prefix.rel_manifest_file(CONFIG_FILE);
let ref config_str = try!(utils::read_file("dist config", &prefix.path().join(&rel_config_path)));
let config = try!(Config::parse(config_str));
try!(tx.remove_file("dist config", rel_config_path));
for component in config.components {
tx = try!(self.uninstall_component(&component, tx, notify_handler));
}
tx.commit();
Ok(())
}
fn uninstall_component<'a>(&self, component: &Component, mut tx: Transaction<'a>,
notify_handler: &Fn(Notification)) -> Result<Transaction<'a>> {
// For historical reasons, the rust-installer component
// names are not the same as the dist manifest component
// names. Some are just the component name some are the
// component name plus the target triple.
let ref name = component.name();
let ref short_name = format!("{}", component.pkg);
if let Some(c) = try!(self.installation.find(&name)) {
tx = try!(c.uninstall(tx));
} else if let Some(c) = try!(self.installation.find(&short_name)) {
tx = try!(c.uninstall(tx));
} else {
notify_handler(Notification::MissingInstalledComponent(&name));
}
Ok(tx)
}
// Read the config file. Config files are presently only created
// for v2 installations.
pub fn read_config(&self) -> Result<Option<Config>> {
let prefix = self.installation.prefix();
let ref rel_config_path = prefix.rel_manifest_file(CONFIG_FILE);
let ref config_path = prefix.path().join(rel_config_path);
if utils::path_exists(config_path) {
let ref config_str = try!(utils::read_file("dist config", config_path));
Ok(Some(try!(Config::parse(config_str))))
} else {
Ok(None)
}
}
pub fn load_manifest(&self) -> Result<Option<Manifest>> {
let prefix = self.installation.prefix();
let ref old_manifest_path = prefix.manifest_file(DIST_MANIFEST);
if utils::path_exists(old_manifest_path) {
let ref manifest_str = try!(utils::read_file("installed manifest", old_manifest_path));
Ok(Some(try!(Manifest::parse(manifest_str))))
} else {
Ok(None)
}
}
/// Installation using the legacy v1 manifest format
pub fn update_v1(&self,
new_manifest: &[String],
update_hash: Option<&Path>,
temp_cfg: &temp::Cfg,
notify_handler: &Fn(Notification)) -> Result<Option<String>> {
// If there's already a v2 installation then something has gone wrong
if try!(self.read_config()).is_some() {
return Err("the server unexpectedly provided an obsolete version of the distribution manifest".into());
}
let url = new_manifest.iter().find(|u| u.contains(&format!("{}{}", self.target_triple, ".tar.gz")));
if url.is_none() {
return Err(format!("binary package was not provided for '{}'",
self.target_triple.to_string()).into());
}
// Only replace once. The cost is inexpensive.
let url = url.unwrap().replace(DEFAULT_DIST_SERVER, temp_cfg.dist_server.as_str());
notify_handler(Notification::DownloadingComponent("rust",
&self.target_triple,
Some(&self.target_triple)));
use std::path::PathBuf;
let dld_dir = PathBuf::from("bogus");
let dlcfg = DownloadCfg {
dist_root: "bogus",
download_dir: &dld_dir,
temp_cfg: temp_cfg,
notify_handler: notify_handler
};
let dl = try!(download_and_check(&url, update_hash, ".tar.gz", dlcfg));
if dl.is_none() {
return Ok(None);
};
let (installer_file, installer_hash) = dl.unwrap();
let prefix = self.installation.prefix();
notify_handler(Notification::InstallingComponent("rust",
&self.target_triple,
Some(&self.target_triple)));
// Begin transaction
let mut tx = Transaction::new(prefix.clone(), temp_cfg, notify_handler);
// Uninstall components
for component in try!(self.installation.list()) {
tx = try!(component.uninstall(tx));
}
// Install all the components in the installer
let package = try!(TarGzPackage::new_file(&installer_file, temp_cfg));
for component in package.components() {
tx = try!(package.install(&self.installation,
&component, None,
tx));
}
// End transaction
tx.commit();
Ok(Some(installer_hash))
}
// If the previous installation was from a v1 manifest, then it
// doesn't have a configuration or manifest-derived list of
// component/target pairs. Uninstall it using the intaller's
// component list before upgrading.
fn maybe_handle_v2_upgrade<'a>(&self,
config: &Option<Config>,
mut tx: Transaction<'a>) -> Result<Transaction<'a>> {
let installed_components = try!(self.installation.list());
let looks_like_v1 = config.is_none() && !installed_components.is_empty();
if !looks_like_v1 { return Ok(tx) }
for component in installed_components {
tx = try!(component.uninstall(tx));
}
Ok(tx)
}
}
/// Returns components to uninstall, install, and the list of all
/// components that will be up to date after the update.
fn build_update_component_lists(
new_manifest: &Manifest,
old_manifest: &Option<Manifest>,
config: &Option<Config>,
changes: Changes,
rust_target_package: &TargetedPackage,
notify_handler: &Fn(Notification),
) -> Result<(Vec<Component>, Vec<Component>, Vec<Component>)> {
// Check some invariantns
for component_to_add in &changes.add_extensions {
assert!(rust_target_package.extensions.contains(component_to_add),
"package must contain extension to add");
assert!(!changes.remove_extensions.contains(component_to_add),
"can't both add and remove extensions");
}
for component_to_remove in &changes.remove_extensions {
assert!(rust_target_package.extensions.contains(component_to_remove),
"package must contain extension to remove");
let config = config.as_ref().expect("removing extension on fresh install?");
assert!(config.components.contains(component_to_remove),
"removing package that isn't installed");
}
// The list of components already installed, empty if a new install
let starting_list = config.as_ref().map(|c| c.components.clone()).unwrap_or(Vec::new());
// The list of components we'll have installed at the end
let mut final_component_list = Vec::new();
// The lists of components to uninstall and to install
let mut components_to_uninstall = Vec::new();
let mut components_to_install = Vec::new();
// Find the final list of components we want to be left with when
// we're done: required components, added extensions, and existing
// installed extensions.
// Add components required by the package, according to the
// manifest
for required_component in &rust_target_package.components {
final_component_list.push(required_component.clone());
}
// Add requested extension components
for extension in &changes.add_extensions {
final_component_list.push(extension.clone());
}
// Add extensions that are already installed
for existing_component in &starting_list {
let is_extension = rust_target_package.extensions.contains(existing_component);
let is_removed = changes.remove_extensions.contains(existing_component);
let is_already_included = final_component_list.contains(existing_component);
if is_extension && !is_removed && !is_already_included{
final_component_list.push(existing_component.clone());
}
}
// If this is a full upgrade then the list of components to
// uninstall is all that are currently installed, and those
// to install the final list. It's a complete reinstall.
//
// If it's a modification then the components to uninstall are
// those that are currently installed but not in the final list.
// To install are those on the final list but not already
// installed.
let just_modifying_existing_install = old_manifest.as_ref() == Some(new_manifest);
if !just_modifying_existing_install {
components_to_uninstall = starting_list.clone();
components_to_install = final_component_list.clone();
} else {
for existing_component in &starting_list {
if !final_component_list.contains(existing_component) {
components_to_uninstall.push(existing_component.clone())
}
}
for component in &final_component_list {
if !starting_list.contains(component) {
components_to_install.push(component.clone());
} else {
if changes.add_extensions.contains(&component) {
notify_handler(Notification::ComponentAlreadyInstalled(&component));
}
}
}
}
Ok((components_to_uninstall, components_to_install, final_component_list))
}<|fim▁end|> | |
<|file_name|>phonegap.min.js<|end_file_name|><|fim▁begin|>#!/usr/bin/env node
/*!
* Module dependencies.
*/
var CLI = require('../lib/cli'),
argv = require('optimist').boolean('d')
.boolean('device')
.boolean('e')<|fim▁hole|> .boolean('V')
.boolean('verbose')
.boolean('v')
.boolean('version')
.boolean('h')
.boolean('help')
.argv;
/*!
* Run the command-line client.
*/
var cli = new CLI().argv(argv);<|fim▁end|> | .boolean('emulator') |
<|file_name|>wire.py<|end_file_name|><|fim▁begin|># Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""Implement standard (and unused) TCP protocols.
These protocols are either provided by inetd, or are not provided at all.
"""
from __future__ import absolute_import, division
import time
import struct
from zope.interface import implementer
from twisted.internet import protocol, interfaces
from twisted.python.compat import _PY3
class Echo(protocol.Protocol):
"""As soon as any data is received, write it back (RFC 862)"""
def dataReceived(self, data):
self.transport.write(data)
class Discard(protocol.Protocol):
"""Discard any received data (RFC 863)"""
def dataReceived(self, data):
# I'm ignoring you, nyah-nyah
pass
@implementer(interfaces.IProducer)
class Chargen(protocol.Protocol):
"""Generate repeating noise (RFC 864)"""
noise = r'@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~ !"#$%&?'
def connectionMade(self):
self.transport.registerProducer(self, 0)
def resumeProducing(self):
self.transport.write(self.noise)
def pauseProducing(self):
pass
def stopProducing(self):
pass
class QOTD(protocol.Protocol):
"""Return a quote of the day (RFC 865)"""
def connectionMade(self):
self.transport.write(self.getQuote())
self.transport.loseConnection()
def getQuote(self):
"""Return a quote. May be overrriden in subclasses."""
return "An apple a day keeps the doctor away.\r\n"
class Who(protocol.Protocol):
"""Return list of active users (RFC 866)"""
def connectionMade(self):
self.transport.write(self.getUsers())
self.transport.loseConnection()
def getUsers(self):
"""Return active users. Override in subclasses."""
return "root\r\n"
class Daytime(protocol.Protocol):
"""Send back the daytime in ASCII form (RFC 867)"""
def connectionMade(self):
self.transport.write(time.asctime(time.gmtime(time.time())) + '\r\n')
self.transport.loseConnection()
class Time(protocol.Protocol):
"""Send back the time in machine readable form (RFC 868)"""
def connectionMade(self):
# is this correct only for 32-bit machines?
result = struct.pack("!i", int(time.time()))
self.transport.write(result)
self.transport.loseConnection()
__all__ = ["Echo", "Discard", "Chargen", "QOTD", "Who", "Daytime", "Time"]<|fim▁hole|> for name in __all__[:]:
if name not in __all3__:
__all__.remove(name)
del globals()[name]
del name, __all3__<|fim▁end|> |
if _PY3:
__all3__ = ["Echo"] |
<|file_name|>observers.ts<|end_file_name|><|fim▁begin|>import { ModelData, ModelReference } from './dataTypes';
import { Plump } from './plump';
import { Observable } from 'rxjs';
import * as deepEqual from 'deep-equal';<|fim▁hole|> o: Observable<ModelData>,
attr: string,
): Observable<T> {
return o
.filter(v => !!v)
.map(v => v.attributes[attr])
.distinctUntilChanged<T>(deepEqual);
}
export function observeChild(
o: Observable<ModelData>,
rel: string,
plump: Plump,
): Observable<ModelData[]> {
return observeList(o.filter(v => !!v).map(v => v.relationships[rel]), plump);
}
export function observeList(
list: Observable<ModelReference[]>,
plump: Plump,
): Observable<(ModelData)[]> {
const cache = {};
return list
.distinctUntilChanged(deepEqual)
.map(children => {
return children.map(item => {
if (!cache[item.id]) {
cache[item.id] = plump.find(item);
}
cache[item.id].meta = item.meta;
return cache[item.id];
});
})
.map(refs => {
return refs.map(ref => {
return {
model: ref,
meta: ref.meta,
};
});
})
.switchMap(coms => {
if (!coms || coms.length === 0) {
return Observable.of([]);
} else {
return Observable.combineLatest(
coms.map(ed =>
ed.model
.asObservable(['attributes'])
.catch(() => Observable.of(ed.model.empty()))
.map(v => {
return Object.assign(v, { meta: ed.meta });
}),
),
).map(children => children.filter(child => !child.empty));
}
})
.startWith([])
.shareReplay(1);
}<|fim▁end|> |
export function observeAttribute<T>( |
<|file_name|>validate.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import absolute_import
from math import isinf, isnan
from warnings import warn
NOT_MASS_BALANCED_TERMS = {"SBO:0000627", # EXCHANGE
"SBO:0000628", # DEMAND
"SBO:0000629", # BIOMASS
"SBO:0000631", # PSEUDOREACTION
"SBO:0000632", # SINK
}
def check_mass_balance(model):
unbalanced = {}
for reaction in model.reactions:
if reaction.annotation.get("SBO") not in NOT_MASS_BALANCED_TERMS:
balance = reaction.check_mass_balance()
if balance:
unbalanced[reaction] = balance
return unbalanced
# no longer strictly necessary, done by optlang solver interfaces
def check_reaction_bounds(model):
warn("no longer necessary, done by optlang solver interfaces",
DeprecationWarning)
errors = []
for reaction in model.reactions:
if reaction.lower_bound > reaction.upper_bound:
errors.append("Reaction '%s' has lower bound > upper bound" %
reaction.id)
if isinf(reaction.lower_bound):
errors.append("Reaction '%s' has infinite lower_bound" %
reaction.id)
elif isnan(reaction.lower_bound):
errors.append("Reaction '%s' has NaN for lower_bound" %
reaction.id)
if isinf(reaction.upper_bound):
errors.append("Reaction '%s' has infinite upper_bound" %
reaction.id)
elif isnan(reaction.upper_bound):
errors.append("Reaction '%s' has NaN for upper_bound" %
reaction.id)
return errors
def check_metabolite_compartment_formula(model):
errors = []
for met in model.metabolites:
if met.formula is not None and len(met.formula) > 0:
if not met.formula.isalnum():
errors.append("Metabolite '%s' formula '%s' not alphanumeric" %<|fim▁hole|><|fim▁end|> | (met.id, met.formula))
return errors |
<|file_name|>TwitchFollowedStreamsResponse.java<|end_file_name|><|fim▁begin|><|fim▁hole|>import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
@JsonIgnoreProperties(ignoreUnknown = true)
public class TwitchFollowedStreamsResponse {
private TwitchStream[] streams;
public TwitchStream[] getStreams() {
return streams.clone();
}
public void setStreams(TwitchStream[] streams) {
this.streams = streams.clone();
}
@Override
public String toString() {
return "TwitchFollowedStreamsResponse [streams=" + Arrays.toString(streams)
+ "]";
}
}<|fim▁end|> | package com.derpgroup.livefinder.manager;
import java.util.Arrays;
|
<|file_name|>core.py<|end_file_name|><|fim▁begin|>from abc import ABCMeta, abstractmethod, abstractproperty
from contextlib import contextmanager
from functools import wraps
import gzip
from inspect import getargspec
from itertools import (
combinations,
count,
product,
)
import operator
import os
from os.path import abspath, dirname, join, realpath
import shutil
from sys import _getframe
import tempfile
from logbook import TestHandler
from mock import patch
from nose.tools import nottest
from numpy.testing import assert_allclose, assert_array_equal
import pandas as pd
from six import itervalues, iteritems, with_metaclass
from six.moves import filter, map
from sqlalchemy import create_engine
from testfixtures import TempDirectory
from toolz import concat, curry
from catalyst.assets import AssetFinder, AssetDBWriter
from catalyst.assets.synthetic import make_simple_equity_info
from catalyst.data.data_portal import DataPortal
from catalyst.data.loader import get_benchmark_filename, INDEX_MAPPING
from catalyst.data.minute_bars import (
BcolzMinuteBarReader,
BcolzMinuteBarWriter,
US_EQUITIES_MINUTES_PER_DAY
)
from catalyst.data.us_equity_pricing import (
BcolzDailyBarReader,
BcolzDailyBarWriter,
SQLiteAdjustmentWriter,
)
from catalyst.finance.blotter import Blotter
from catalyst.finance.trading import TradingEnvironment
from catalyst.finance.order import ORDER_STATUS
from catalyst.lib.labelarray import LabelArray
from catalyst.pipeline.data import USEquityPricing
from catalyst.pipeline.engine import SimplePipelineEngine
from catalyst.pipeline.factors import CustomFactor
from catalyst.pipeline.loaders.testing import make_seeded_random_loader
from catalyst.utils import security_list
from catalyst.utils.calendars import get_calendar
from catalyst.utils.input_validation import expect_dimensions
from catalyst.utils.numpy_utils import as_column, isnat
from catalyst.utils.pandas_utils import timedelta_to_integral_seconds
from catalyst.utils.paths import ensure_directory
from catalyst.utils.sentinel import sentinel
import numpy as np
from numpy import float64
EPOCH = pd.Timestamp(0, tz='UTC')
def seconds_to_timestamp(seconds):
return pd.Timestamp(seconds, unit='s', tz='UTC')
def to_utc(time_str):
"""Convert a string in US/Eastern time to UTC"""
return pd.Timestamp(time_str, tz='US/Eastern').tz_convert('UTC')
def str_to_seconds(s):
"""
Convert a pandas-intelligible string to (integer) seconds since UTC.
>>> from pandas import Timestamp
>>> (Timestamp('2014-01-01') - Timestamp(0)).total_seconds()
1388534400.0
>>> str_to_seconds('2014-01-01')
1388534400
"""
return timedelta_to_integral_seconds(pd.Timestamp(s, tz='UTC') - EPOCH)
def drain_catalyst(test, catalyst):
output = []
transaction_count = 0
msg_counter = 0
# start the simulation
for update in catalyst:
msg_counter += 1
output.append(update)
if 'daily_perf' in update:
transaction_count += \
len(update['daily_perf']['transactions'])
return output, transaction_count
def check_algo_results(test,
results,
expected_transactions_count=None,
expected_order_count=None,
expected_positions_count=None,
sid=None):
if expected_transactions_count is not None:
txns = flatten_list(results["transactions"])
test.assertEqual(expected_transactions_count, len(txns))
if expected_positions_count is not None:
raise NotImplementedError
if expected_order_count is not None:
# de-dup orders on id, because orders are put back into perf packets
# whenever they a txn is filled
orders = set([order['id'] for order in
flatten_list(results["orders"])])
test.assertEqual(expected_order_count, len(orders))
def flatten_list(list):
return [item for sublist in list for item in sublist]
def assert_single_position(test, catalyst):
output, transaction_count = drain_catalyst(test, catalyst)
if 'expected_transactions' in test.catalyst_test_config:
test.assertEqual(
test.catalyst_test_config['expected_transactions'],
transaction_count
)
else:
test.assertEqual(
test.catalyst_test_config['order_count'],
transaction_count
)
# the final message is the risk report, the second to
# last is the final day's results. Positions is a list of
# dicts.
closing_positions = output[-2]['daily_perf']['positions']
# confirm that all orders were filled.
# iterate over the output updates, overwriting
# orders when they are updated. Then check the status on all.
orders_by_id = {}
for update in output:
if 'daily_perf' in update:
if 'orders' in update['daily_perf']:
for order in update['daily_perf']['orders']:
orders_by_id[order['id']] = order
for order in itervalues(orders_by_id):
test.assertEqual(
order['status'],
ORDER_STATUS.FILLED,
"")
test.assertEqual(
len(closing_positions),
1,
"Portfolio should have one position."
)
sid = test.catalyst_test_config['sid']
test.assertEqual(
closing_positions[0]['sid'],
sid,
"Portfolio should have one position in " + str(sid)
)
return output, transaction_count
@contextmanager
def security_list_copy():
old_dir = security_list.SECURITY_LISTS_DIR
new_dir = tempfile.mkdtemp()
try:
for subdir in os.listdir(old_dir):
shutil.copytree(os.path.join(old_dir, subdir),
os.path.join(new_dir, subdir))
with patch.object(security_list, 'SECURITY_LISTS_DIR', new_dir), \
patch.object(security_list, 'using_copy', True,
create=True):
yield
finally:
shutil.rmtree(new_dir, True)
def add_security_data(adds, deletes):
if not hasattr(security_list, 'using_copy'):
raise Exception('add_security_data must be used within '
'security_list_copy context')
directory = os.path.join(
security_list.SECURITY_LISTS_DIR,
"leveraged_etf_list/20150127/20150125"
)
if not os.path.exists(directory):
os.makedirs(directory)
del_path = os.path.join(directory, "delete")
with open(del_path, 'w') as f:
for sym in deletes:
f.write(sym)
f.write('\n')
add_path = os.path.join(directory, "add")
with open(add_path, 'w') as f:
for sym in adds:
f.write(sym)
f.write('\n')
def all_pairs_matching_predicate(values, pred):
"""
Return an iterator of all pairs, (v0, v1) from values such that
`pred(v0, v1) == True`
Parameters
----------
values : iterable
pred : function
Returns
-------
pairs_iterator : generator
Generator yielding pairs matching `pred`.
Examples
--------
>>> from catalyst.testing import all_pairs_matching_predicate
>>> from operator import eq, lt
>>> list(all_pairs_matching_predicate(range(5), eq))
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
>>> list(all_pairs_matching_predicate("abcd", lt))
[('a', 'b'), ('a', 'c'), ('a', 'd'), ('b', 'c'), ('b', 'd'), ('c', 'd')]
"""
return filter(lambda pair: pred(*pair), product(values, repeat=2))
def product_upper_triangle(values, include_diagonal=False):
"""
Return an iterator over pairs, (v0, v1), drawn from values.
If `include_diagonal` is True, returns all pairs such that v0 <= v1.
If `include_diagonal` is False, returns all pairs such that v0 < v1.
"""
return all_pairs_matching_predicate(
values,
operator.le if include_diagonal else operator.lt,
)
def all_subindices(index):
"""
Return all valid sub-indices of a pandas Index.
"""
return (
index[start:stop]
for start, stop in product_upper_triangle(range(len(index) + 1))
)
def chrange(start, stop):
"""
Construct an iterable of length-1 strings beginning with `start` and ending
with `stop`.
Parameters
----------
start : str
The first character.
stop : str
The last character.
Returns
-------
chars: iterable[str]
Iterable of strings beginning with start and ending with stop.
Examples
--------
>>> chrange('A', 'C')
['A', 'B', 'C']
"""
return list(map(chr, range(ord(start), ord(stop) + 1)))
def make_trade_data_for_asset_info(dates,
asset_info,
price_start,
price_step_by_date,
price_step_by_sid,
volume_start,
volume_step_by_date,
volume_step_by_sid,
frequency,
writer=None):
"""
Convert the asset info dataframe into a dataframe of trade data for each
sid, and write to the writer if provided. Write NaNs for locations where
assets did not exist. Return a dict of the dataframes, keyed by sid.
"""
trade_data = {}
sids = asset_info.index
price_sid_deltas = np.arange(len(sids), dtype=float64) * price_step_by_sid
price_date_deltas = (np.arange(len(dates), dtype=float64) *
price_step_by_date)
prices = (price_sid_deltas + as_column(price_date_deltas)) + price_start
volume_sid_deltas = np.arange(len(sids)) * volume_step_by_sid
volume_date_deltas = np.arange(len(dates)) * volume_step_by_date
volumes = volume_sid_deltas + as_column(volume_date_deltas) + volume_start
for j, sid in enumerate(sids):
start_date, end_date = asset_info.loc[sid, ['start_date', 'end_date']]
# Normalize here so the we still generate non-NaN values on the minutes
# for an asset's last trading day.
for i, date in enumerate(dates.normalize()):
if not (start_date <= date <= end_date):
prices[i, j] = 0
volumes[i, j] = 0
df = pd.DataFrame(
{
"open": prices[:, j],
"high": prices[:, j],
"low": prices[:, j],
"close": prices[:, j],
"volume": volumes[:, j],
},
index=dates,
)
if writer:
writer.write_sid(sid, df)
trade_data[sid] = df
return trade_data
def check_allclose(actual,
desired,
rtol=1e-07,
atol=0,
err_msg='',
verbose=True):
"""
Wrapper around np.testing.assert_allclose that also verifies that inputs
are ndarrays.
See Also
--------
np.assert_allclose
"""
if type(actual) != type(desired):
raise AssertionError("%s != %s" % (type(actual), type(desired)))
return assert_allclose(
actual,
desired,
atol=atol,
rtol=rtol,
err_msg=err_msg,
verbose=verbose,
)
def check_arrays(x, y, err_msg='', verbose=True, check_dtypes=True):
"""
Wrapper around np.testing.assert_array_equal that also verifies that inputs
are ndarrays.
See Also
--------
np.assert_array_equal
"""
assert type(x) == type(y), "{x} != {y}".format(x=type(x), y=type(y))
assert x.dtype == y.dtype, "{x.dtype} != {y.dtype}".format(x=x, y=y)
if isinstance(x, LabelArray):
# Check that both arrays have missing values in the same locations...
assert_array_equal(
x.is_missing(),
y.is_missing(),
err_msg=err_msg,
verbose=verbose,
)
# ...then check the actual values as well.
x = x.as_string_array()
y = y.as_string_array()
elif x.dtype.kind in 'mM':
x_isnat = isnat(x)
y_isnat = isnat(y)
assert_array_equal(
x_isnat,
y_isnat,
err_msg="NaTs not equal",
verbose=verbose,
)
# Fill NaTs with zero for comparison.
x = np.where(x_isnat, np.zeros_like(x), x)
y = np.where(y_isnat, np.zeros_like(y), y)
return assert_array_equal(x, y, err_msg=err_msg, verbose=verbose)
class UnexpectedAttributeAccess(Exception):
pass
class ExplodingObject(object):
"""
Object that will raise an exception on any attribute access.
Useful for verifying that an object is never touched during a
function/method call.
"""
def __getattribute__(self, name):
raise UnexpectedAttributeAccess(name)
def write_minute_data(trading_calendar, tempdir, minutes, sids):
first_session = trading_calendar.minute_to_session_label(
minutes[0], direction="none"
)
last_session = trading_calendar.minute_to_session_label(
minutes[-1], direction="none"<|fim▁hole|> write_bcolz_minute_data(
trading_calendar,
sessions,
tempdir.path,
create_minute_bar_data(minutes, sids),
)
return tempdir.path
def create_minute_bar_data(minutes, sids):
length = len(minutes)
for sid_idx, sid in enumerate(sids):
yield sid, pd.DataFrame(
{
'open': np.arange(length) + 10 + sid_idx,
'high': np.arange(length) + 15 + sid_idx,
'low': np.arange(length) + 8 + sid_idx,
'close': np.arange(length) + 10 + sid_idx,
'volume': 100 + sid_idx,
},
index=minutes,
)
def create_daily_bar_data(sessions, sids):
length = len(sessions)
for sid_idx, sid in enumerate(sids):
yield sid, pd.DataFrame(
{
"open": (np.array(range(10, 10 + length)) + sid_idx),
"high": (np.array(range(15, 15 + length)) + sid_idx),
"low": (np.array(range(8, 8 + length)) + sid_idx),
"close": (np.array(range(10, 10 + length)) + sid_idx),
"volume": np.array(range(100, 100 + length)) + sid_idx,
"day": [session.value for session in sessions]
},
index=sessions,
)
def write_daily_data(tempdir, sim_params, sids, trading_calendar):
path = os.path.join(tempdir.path, "testdaily.bcolz")
BcolzDailyBarWriter(path, trading_calendar,
sim_params.start_session,
sim_params.end_session).write(
create_daily_bar_data(sim_params.sessions, sids),
)
return path
def create_data_portal(asset_finder, tempdir, sim_params, sids,
trading_calendar, adjustment_reader=None):
if sim_params.data_frequency == "daily":
daily_path = write_daily_data(tempdir, sim_params, sids,
trading_calendar)
equity_daily_reader = BcolzDailyBarReader(daily_path)
return DataPortal(
asset_finder, trading_calendar,
first_trading_day=equity_daily_reader.first_trading_day,
equity_daily_reader=equity_daily_reader,
adjustment_reader=adjustment_reader
)
else:
minutes = trading_calendar.minutes_in_range(
sim_params.first_open,
sim_params.last_close
)
minute_path = write_minute_data(trading_calendar, tempdir, minutes,
sids)
equity_minute_reader = BcolzMinuteBarReader(minute_path)
return DataPortal(
asset_finder, trading_calendar,
first_trading_day=equity_minute_reader.first_trading_day,
equity_minute_reader=equity_minute_reader,
adjustment_reader=adjustment_reader
)
def write_bcolz_minute_data(trading_calendar, days, path, data):
BcolzMinuteBarWriter(
path,
trading_calendar,
days[0],
days[-1],
US_EQUITIES_MINUTES_PER_DAY
).write(data)
def create_minute_df_for_asset(trading_calendar,
start_dt,
end_dt,
interval=1,
start_val=1,
minute_blacklist=None):
asset_minutes = trading_calendar.minutes_for_sessions_in_range(
start_dt, end_dt
)
minutes_count = len(asset_minutes)
minutes_arr = np.array(range(start_val, start_val + minutes_count))
df = pd.DataFrame(
{
"open": minutes_arr + 1,
"high": minutes_arr + 2,
"low": minutes_arr - 1,
"close": minutes_arr,
"volume": 100 * minutes_arr,
},
index=asset_minutes,
)
if interval > 1:
counter = 0
while counter < len(minutes_arr):
df[counter:(counter + interval - 1)] = 0
counter += interval
if minute_blacklist is not None:
for minute in minute_blacklist:
df.loc[minute] = 0
return df
def create_daily_df_for_asset(trading_calendar, start_day, end_day,
interval=1):
days = trading_calendar.sessions_in_range(start_day, end_day)
days_count = len(days)
days_arr = np.arange(days_count) + 2
df = pd.DataFrame(
{
"open": days_arr + 1,
"high": days_arr + 2,
"low": days_arr - 1,
"close": days_arr,
"volume": days_arr * 100,
},
index=days,
)
if interval > 1:
# only keep every 'interval' rows
for idx, _ in enumerate(days_arr):
if (idx + 1) % interval != 0:
df["open"].iloc[idx] = 0
df["high"].iloc[idx] = 0
df["low"].iloc[idx] = 0
df["close"].iloc[idx] = 0
df["volume"].iloc[idx] = 0
return df
def trades_by_sid_to_dfs(trades_by_sid, index):
for sidint, trades in iteritems(trades_by_sid):
opens = []
highs = []
lows = []
closes = []
volumes = []
for trade in trades:
opens.append(trade.open_price)
highs.append(trade.high)
lows.append(trade.low)
closes.append(trade.close_price)
volumes.append(trade.volume)
yield sidint, pd.DataFrame(
{
"open": opens,
"high": highs,
"low": lows,
"close": closes,
"volume": volumes,
},
index=index,
)
def create_data_portal_from_trade_history(asset_finder, trading_calendar,
tempdir, sim_params, trades_by_sid):
if sim_params.data_frequency == "daily":
path = os.path.join(tempdir.path, "testdaily.bcolz")
writer = BcolzDailyBarWriter(
path, trading_calendar,
sim_params.start_session,
sim_params.end_session
)
writer.write(
trades_by_sid_to_dfs(trades_by_sid, sim_params.sessions),
)
equity_daily_reader = BcolzDailyBarReader(path)
return DataPortal(
asset_finder, trading_calendar,
first_trading_day=equity_daily_reader.first_trading_day,
daily_reader=equity_daily_reader,
)
else:
minutes = trading_calendar.minutes_in_range(
sim_params.first_open,
sim_params.last_close
)
length = len(minutes)
assets = {}
for sidint, trades in iteritems(trades_by_sid):
opens = np.zeros(length)
highs = np.zeros(length)
lows = np.zeros(length)
closes = np.zeros(length)
volumes = np.zeros(length)
for trade in trades:
# put them in the right place
idx = minutes.searchsorted(trade.dt)
opens[idx] = trade.open_price * 1000
highs[idx] = trade.high * 1000
lows[idx] = trade.low * 1000
closes[idx] = trade.close_price * 1000
volumes[idx] = trade.volume
assets[sidint] = pd.DataFrame({
"open": opens,
"high": highs,
"low": lows,
"close": closes,
"volume": volumes,
"dt": minutes
}).set_index("dt")
write_bcolz_minute_data(
trading_calendar,
sim_params.sessions,
tempdir.path,
assets
)
equity_minute_reader = BcolzMinuteBarReader(tempdir.path)
return DataPortal(
asset_finder, trading_calendar,
first_trading_day=equity_minute_reader.first_trading_day,
equity_minute_reader=equity_minute_reader,
)
class FakeDataPortal(DataPortal):
def __init__(self, env, trading_calendar=None,
first_trading_day=None):
if trading_calendar is None:
trading_calendar = get_calendar("NYSE")
super(FakeDataPortal, self).__init__(env.asset_finder,
trading_calendar,
first_trading_day)
def get_spot_value(self, asset, field, dt, data_frequency):
if field == "volume":
return 100
else:
return 1.0
def get_history_window(self, assets, end_dt, bar_count, frequency, field,
data_frequency, ffill=True):
if frequency == "1d":
end_idx = \
self.trading_calendar.all_sessions.searchsorted(end_dt)
days = self.trading_calendar.all_sessions[
(end_idx - bar_count + 1):(end_idx + 1)
]
df = pd.DataFrame(
np.full((bar_count, len(assets)), 100.0),
index=days,
columns=assets
)
return df
class FetcherDataPortal(DataPortal):
"""
Mock dataportal that returns fake data for history and non-fetcher
spot value.
"""
def __init__(self, asset_finder, trading_calendar, first_trading_day=None):
super(FetcherDataPortal, self).__init__(asset_finder, trading_calendar,
first_trading_day)
def get_spot_value(self, asset, field, dt, data_frequency):
# if this is a fetcher field, exercise the regular code path
if self._is_extra_source(asset, field, self._augmented_sources_map):
return super(FetcherDataPortal, self).get_spot_value(
asset, field, dt, data_frequency)
# otherwise just return a fixed value
return int(asset)
# XXX: These aren't actually the methods that are used by the superclasses,
# so these don't do anything, and this class will likely produce unexpected
# results for history().
def _get_daily_window_for_sid(self, asset, field, days_in_window,
extra_slot=True):
return np.arange(days_in_window, dtype=np.float64)
def _get_minute_window_for_asset(self, asset, field, minutes_for_window):
return np.arange(minutes_for_window, dtype=np.float64)
class tmp_assets_db(object):
"""Create a temporary assets sqlite database.
This is meant to be used as a context manager.
Parameters
----------
url : string
The URL for the database connection.
**frames
The frames to pass to the AssetDBWriter.
By default this maps equities:
('A', 'B', 'C') -> map(ord, 'ABC')
See Also
--------
empty_assets_db
tmp_asset_finder
"""
_default_equities = sentinel('_default_equities')
def __init__(self,
url='sqlite:///:memory:',
equities=_default_equities,
**frames):
self._url = url
self._eng = None
if equities is self._default_equities:
equities = make_simple_equity_info(
list(map(ord, 'ABC')),
pd.Timestamp(0),
pd.Timestamp('2015'),
)
frames['equities'] = equities
self._frames = frames
self._eng = None # set in enter and exit
def __enter__(self):
self._eng = eng = create_engine(self._url)
AssetDBWriter(eng).write(**self._frames)
return eng
def __exit__(self, *excinfo):
assert self._eng is not None, '_eng was not set in __enter__'
self._eng.dispose()
self._eng = None
def empty_assets_db():
"""Context manager for creating an empty assets db.
See Also
--------
tmp_assets_db
"""
return tmp_assets_db(equities=None)
class tmp_asset_finder(tmp_assets_db):
"""Create a temporary asset finder using an in memory sqlite db.
Parameters
----------
url : string
The URL for the database connection.
finder_cls : type, optional
The type of asset finder to create from the assets db.
**frames
Forwarded to ``tmp_assets_db``.
See Also
--------
tmp_assets_db
"""
def __init__(self,
url='sqlite:///:memory:',
finder_cls=AssetFinder,
**frames):
self._finder_cls = finder_cls
super(tmp_asset_finder, self).__init__(url=url, **frames)
def __enter__(self):
return self._finder_cls(super(tmp_asset_finder, self).__enter__())
def empty_asset_finder():
"""Context manager for creating an empty asset finder.
See Also
--------
empty_assets_db
tmp_assets_db
tmp_asset_finder
"""
return tmp_asset_finder(equities=None)
class tmp_trading_env(tmp_asset_finder):
"""Create a temporary trading environment.
Parameters
----------
load : callable, optional
Function that returns benchmark returns and treasury curves.
finder_cls : type, optional
The type of asset finder to create from the assets db.
**frames
Forwarded to ``tmp_assets_db``.
See Also
--------
empty_trading_env
tmp_asset_finder
"""
def __init__(self, load=None, *args, **kwargs):
super(tmp_trading_env, self).__init__(*args, **kwargs)
self._load = load
def __enter__(self):
return TradingEnvironment(
load=self._load,
asset_db_path=super(tmp_trading_env, self).__enter__().engine,
)
def empty_trading_env():
return tmp_trading_env(equities=None)
class SubTestFailures(AssertionError):
def __init__(self, *failures):
self.failures = failures
def __str__(self):
return 'failures:\n %s' % '\n '.join(
'\n '.join((
', '.join('%s=%r' % item for item in scope.items()),
'%s: %s' % (type(exc).__name__, exc),
)) for scope, exc in self.failures,
)
@nottest
def subtest(iterator, *_names):
"""
Construct a subtest in a unittest.
Consider using ``catalyst.testing.parameter_space`` when subtests
are constructed over a single input or over the cross-product of multiple
inputs.
``subtest`` works by decorating a function as a subtest. The decorated
function will be run by iterating over the ``iterator`` and *unpacking the
values into the function. If any of the runs fail, the result will be put
into a set and the rest of the tests will be run. Finally, if any failed,
all of the results will be dumped as one failure.
Parameters
----------
iterator : iterable[iterable]
The iterator of arguments to pass to the function.
*name : iterator[str]
The names to use for each element of ``iterator``. These will be used
to print the scope when a test fails. If not provided, it will use the
integer index of the value as the name.
Examples
--------
::
class MyTest(TestCase):
def test_thing(self):
# Example usage inside another test.
@subtest(([n] for n in range(100000)), 'n')
def subtest(n):
self.assertEqual(n % 2, 0, 'n was not even')
subtest()
@subtest(([n] for n in range(100000)), 'n')
def test_decorated_function(self, n):
# Example usage to parameterize an entire function.
self.assertEqual(n % 2, 1, 'n was not odd')
Notes
-----
We use this when we:
* Will never want to run each parameter individually.
* Have a large parameter space we are testing
(see tests/utils/test_events.py).
``nose_parameterized.expand`` will create a test for each parameter
combination which bloats the test output and makes the travis pages slow.
We cannot use ``unittest2.TestCase.subTest`` because nose, pytest, and
nose2 do not support ``addSubTest``.
See Also
--------
catalyst.testing.parameter_space
"""
def dec(f):
@wraps(f)
def wrapped(*args, **kwargs):
names = _names
failures = []
for scope in iterator:
scope = tuple(scope)
try:
f(*args + scope, **kwargs)
except Exception as e:
if not names:
names = count()
failures.append((dict(zip(names, scope)), e))
if failures:
raise SubTestFailures(*failures)
return wrapped
return dec
class MockDailyBarReader(object):
def get_value(self, col, sid, dt):
return 100
def create_mock_adjustment_data(splits=None, dividends=None, mergers=None):
if splits is None:
splits = create_empty_splits_mergers_frame()
elif not isinstance(splits, pd.DataFrame):
splits = pd.DataFrame(splits)
if mergers is None:
mergers = create_empty_splits_mergers_frame()
elif not isinstance(mergers, pd.DataFrame):
mergers = pd.DataFrame(mergers)
if dividends is None:
dividends = create_empty_dividends_frame()
elif not isinstance(dividends, pd.DataFrame):
dividends = pd.DataFrame(dividends)
return splits, mergers, dividends
def create_mock_adjustments(tempdir, days, splits=None, dividends=None,
mergers=None):
path = tempdir.getpath("test_adjustments.db")
SQLiteAdjustmentWriter(path, MockDailyBarReader(), days).write(
*create_mock_adjustment_data(splits, dividends, mergers)
)
return path
def assert_timestamp_equal(left, right, compare_nat_equal=True, msg=""):
"""
Assert that two pandas Timestamp objects are the same.
Parameters
----------
left, right : pd.Timestamp
The values to compare.
compare_nat_equal : bool, optional
Whether to consider `NaT` values equal. Defaults to True.
msg : str, optional
A message to forward to `pd.util.testing.assert_equal`.
"""
if compare_nat_equal and left is pd.NaT and right is pd.NaT:
return
return pd.util.testing.assert_equal(left, right, msg=msg)
def powerset(values):
"""
Return the power set (i.e., the set of all subsets) of entries in `values`.
"""
return concat(combinations(values, i) for i in range(len(values) + 1))
def to_series(knowledge_dates, earning_dates):
"""
Helper for converting a dict of strings to a Series of datetimes.
This is just for making the test cases more readable.
"""
return pd.Series(
index=pd.to_datetime(knowledge_dates),
data=pd.to_datetime(earning_dates),
)
def gen_calendars(start, stop, critical_dates):
"""
Generate calendars to use as inputs.
"""
all_dates = pd.date_range(start, stop, tz='utc')
for to_drop in map(list, powerset(critical_dates)):
# Have to yield tuples.
yield (all_dates.drop(to_drop),)
# Also test with the trading calendar.
trading_days = get_calendar("NYSE").all_days
yield (trading_days[trading_days.slice_indexer(start, stop)],)
@contextmanager
def temp_pipeline_engine(calendar, sids, random_seed, symbols=None):
"""
A contextManager that yields a SimplePipelineEngine holding a reference to
an AssetFinder generated via tmp_asset_finder.
Parameters
----------
calendar : pd.DatetimeIndex
Calendar to pass to the constructed PipelineEngine.
sids : iterable[int]
Sids to use for the temp asset finder.
random_seed : int
Integer used to seed instances of SeededRandomLoader.
symbols : iterable[str], optional
Symbols for constructed assets. Forwarded to make_simple_equity_info.
"""
equity_info = make_simple_equity_info(
sids=sids,
start_date=calendar[0],
end_date=calendar[-1],
symbols=symbols,
)
loader = make_seeded_random_loader(random_seed, calendar, sids)
def get_loader(column):
return loader
with tmp_asset_finder(equities=equity_info) as finder:
yield SimplePipelineEngine(get_loader, calendar, finder)
def parameter_space(__fail_fast=False, **params):
"""
Wrapper around subtest that allows passing keywords mapping names to
iterables of values.
The decorated test function will be called with the cross-product of all
possible inputs
Examples
--------
>>> from unittest import TestCase
>>> class SomeTestCase(TestCase):
... @parameter_space(x=[1, 2], y=[2, 3])
... def test_some_func(self, x, y):
... # Will be called with every possible combination of x and y.
... self.assertEqual(somefunc(x, y), expected_result(x, y))
See Also
--------
catalyst.testing.subtest
"""
def decorator(f):
argspec = getargspec(f)
if argspec.varargs:
raise AssertionError("parameter_space() doesn't support *args")
if argspec.keywords:
raise AssertionError("parameter_space() doesn't support **kwargs")
if argspec.defaults:
raise AssertionError("parameter_space() doesn't support defaults.")
# Skip over implicit self.
argnames = argspec.args
if argnames[0] == 'self':
argnames = argnames[1:]
extra = set(params) - set(argnames)
if extra:
raise AssertionError(
"Keywords %s supplied to parameter_space() are "
"not in function signature." % extra
)
unspecified = set(argnames) - set(params)
if unspecified:
raise AssertionError(
"Function arguments %s were not "
"supplied to parameter_space()." % extra
)
def make_param_sets():
return product(*(params[name] for name in argnames))
if __fail_fast:
@wraps(f)
def wrapped(self):
for args in make_param_sets():
f(self, *args)
return wrapped
else:
@wraps(f)
def wrapped(*args, **kwargs):
subtest(make_param_sets(), *argnames)(f)(*args, **kwargs)
return wrapped
return decorator
def create_empty_dividends_frame():
return pd.DataFrame(
np.array(
[],
dtype=[
('ex_date', 'datetime64[ns]'),
('pay_date', 'datetime64[ns]'),
('record_date', 'datetime64[ns]'),
('declared_date', 'datetime64[ns]'),
('amount', 'float64'),
('sid', 'int32'),
],
),
index=pd.DatetimeIndex([], tz='UTC'),
)
def create_empty_splits_mergers_frame():
return pd.DataFrame(
np.array(
[],
dtype=[
('effective_date', 'int64'),
('ratio', 'float64'),
('sid', 'int64'),
],
),
index=pd.DatetimeIndex([]),
)
def make_alternating_boolean_array(shape, first_value=True):
"""
Create a 2D numpy array with the given shape containing alternating values
of False, True, False, True,... along each row and each column.
Examples
--------
>>> make_alternating_boolean_array((4,4))
array([[ True, False, True, False],
[False, True, False, True],
[ True, False, True, False],
[False, True, False, True]], dtype=bool)
>>> make_alternating_boolean_array((4,3), first_value=False)
array([[False, True, False],
[ True, False, True],
[False, True, False],
[ True, False, True]], dtype=bool)
"""
if len(shape) != 2:
raise ValueError(
'Shape must be 2-dimensional. Given shape was {}'.format(shape)
)
alternating = np.empty(shape, dtype=np.bool)
for row in alternating:
row[::2] = first_value
row[1::2] = not(first_value)
first_value = not(first_value)
return alternating
def make_cascading_boolean_array(shape, first_value=True):
"""
Create a numpy array with the given shape containing cascading boolean
values, with `first_value` being the top-left value.
Examples
--------
>>> make_cascading_boolean_array((4,4))
array([[ True, True, True, False],
[ True, True, False, False],
[ True, False, False, False],
[False, False, False, False]], dtype=bool)
>>> make_cascading_boolean_array((4,2))
array([[ True, False],
[False, False],
[False, False],
[False, False]], dtype=bool)
>>> make_cascading_boolean_array((2,4))
array([[ True, True, True, False],
[ True, True, False, False]], dtype=bool)
"""
if len(shape) != 2:
raise ValueError(
'Shape must be 2-dimensional. Given shape was {}'.format(shape)
)
cascading = np.full(shape, not(first_value), dtype=np.bool)
ending_col = shape[1] - 1
for row in cascading:
if ending_col > 0:
row[:ending_col] = first_value
ending_col -= 1
else:
break
return cascading
@expect_dimensions(array=2)
def permute_rows(seed, array):
"""
Shuffle each row in ``array`` based on permutations generated by ``seed``.
Parameters
----------
seed : int
Seed for numpy.RandomState
array : np.ndarray[ndim=2]
Array over which to apply permutations.
"""
rand = np.random.RandomState(seed)
return np.apply_along_axis(rand.permutation, 1, array)
@nottest
def make_test_handler(testcase, *args, **kwargs):
"""
Returns a TestHandler which will be used by the given testcase. This
handler can be used to test log messages.
Parameters
----------
testcase: unittest.TestCase
The test class in which the log handler will be used.
*args, **kwargs
Forwarded to the new TestHandler object.
Returns
-------
handler: logbook.TestHandler
The handler to use for the test case.
"""
handler = TestHandler(*args, **kwargs)
testcase.addCleanup(handler.close)
return handler
def write_compressed(path, content):
"""
Write a compressed (gzipped) file to `path`.
"""
with gzip.open(path, 'wb') as f:
f.write(content)
def read_compressed(path):
"""
Write a compressed (gzipped) file from `path`.
"""
with gzip.open(path, 'rb') as f:
return f.read()
catalyst_git_root = abspath(
join(realpath(dirname(__file__)), '..', '..'),
)
@nottest
def test_resource_path(*path_parts):
return os.path.join(catalyst_git_root, 'tests', 'resources', *path_parts)
@contextmanager
def patch_os_environment(remove=None, **values):
"""
Context manager for patching the operating system environment.
"""
old_values = {}
remove = remove or []
for key in remove:
old_values[key] = os.environ.pop(key)
for key, value in values.iteritems():
old_values[key] = os.getenv(key)
os.environ[key] = value
try:
yield
finally:
for old_key, old_value in old_values.iteritems():
if old_value is None:
# Value was not present when we entered, so del it out if it's
# still present.
try:
del os.environ[key]
except KeyError:
pass
else:
# Restore the old value.
os.environ[old_key] = old_value
class tmp_dir(TempDirectory, object):
"""New style class that wrapper for TempDirectory in python 2.
"""
pass
class _TmpBarReader(with_metaclass(ABCMeta, tmp_dir)):
"""A helper for tmp_bcolz_equity_minute_bar_reader and
tmp_bcolz_equity_daily_bar_reader.
Parameters
----------
env : TradingEnvironment
The trading env.
days : pd.DatetimeIndex
The days to write for.
data : dict[int -> pd.DataFrame]
The data to write.
path : str, optional
The path to the directory to write the data into. If not given, this
will be a unique name.
"""
@abstractproperty
def _reader_cls(self):
raise NotImplementedError('_reader')
@abstractmethod
def _write(self, env, days, path, data):
raise NotImplementedError('_write')
def __init__(self, env, days, data, path=None):
super(_TmpBarReader, self).__init__(path=path)
self._env = env
self._days = days
self._data = data
def __enter__(self):
tmpdir = super(_TmpBarReader, self).__enter__()
env = self._env
try:
self._write(
env,
self._days,
tmpdir.path,
self._data,
)
return self._reader_cls(tmpdir.path)
except:
self.__exit__(None, None, None)
raise
class tmp_bcolz_equity_minute_bar_reader(_TmpBarReader):
"""A temporary BcolzMinuteBarReader object.
Parameters
----------
env : TradingEnvironment
The trading env.
days : pd.DatetimeIndex
The days to write for.
data : iterable[(int, pd.DataFrame)]
The data to write.
path : str, optional
The path to the directory to write the data into. If not given, this
will be a unique name.
See Also
--------
tmp_bcolz_equity_daily_bar_reader
"""
_reader_cls = BcolzMinuteBarReader
_write = staticmethod(write_bcolz_minute_data)
class tmp_bcolz_equity_daily_bar_reader(_TmpBarReader):
"""A temporary BcolzDailyBarReader object.
Parameters
----------
env : TradingEnvironment
The trading env.
days : pd.DatetimeIndex
The days to write for.
data : dict[int -> pd.DataFrame]
The data to write.
path : str, optional
The path to the directory to write the data into. If not given, this
will be a unique name.
See Also
--------
tmp_bcolz_equity_daily_bar_reader
"""
_reader_cls = BcolzDailyBarReader
@staticmethod
def _write(env, days, path, data):
BcolzDailyBarWriter(path, days).write(data)
@contextmanager
def patch_read_csv(url_map, module=pd, strict=False):
"""Patch pandas.read_csv to map lookups from url to another.
Parameters
----------
url_map : mapping[str or file-like object -> str or file-like object]
The mapping to use to redirect read_csv calls.
module : module, optional
The module to patch ``read_csv`` on. By default this is ``pandas``.
This should be set to another module if ``read_csv`` is early-bound
like ``from pandas import read_csv`` instead of late-bound like:
``import pandas as pd; pd.read_csv``.
strict : bool, optional
If true, then this will assert that ``read_csv`` is only called with
elements in the ``url_map``.
"""
read_csv = pd.read_csv
def patched_read_csv(filepath_or_buffer, *args, **kwargs):
if filepath_or_buffer in url_map:
return read_csv(url_map[filepath_or_buffer], *args, **kwargs)
elif not strict:
return read_csv(filepath_or_buffer, *args, **kwargs)
else:
raise AssertionError(
'attempted to call read_csv on %r which not in the url map' %
filepath_or_buffer,
)
with patch.object(module, 'read_csv', patched_read_csv):
yield
def copy_market_data(src_market_data_dir, dest_root_dir):
symbol = 'SPY'
filenames = (get_benchmark_filename(symbol), INDEX_MAPPING[symbol][1])
ensure_directory(os.path.join(dest_root_dir, 'data'))
for filename in filenames:
shutil.copyfile(
os.path.join(src_market_data_dir, filename),
os.path.join(dest_root_dir, 'data', filename)
)
@curry
def ensure_doctest(f, name=None):
"""Ensure that an object gets doctested. This is useful for instances
of objects like curry or partial which are not discovered by default.
Parameters
----------
f : any
The thing to doctest.
name : str, optional
The name to use in the doctest function mapping. If this is None,
Then ``f.__name__`` will be used.
Returns
-------
f : any
``f`` unchanged.
"""
_getframe(2).f_globals.setdefault('__test__', {})[
f.__name__ if name is None else name
] = f
return f
class RecordBatchBlotter(Blotter):
"""Blotter that tracks how its batch_order method was called.
"""
def __init__(self, data_frequency):
super(RecordBatchBlotter, self).__init__(data_frequency)
self.order_batch_called = []
def batch_order(self, *args, **kwargs):
self.order_batch_called.append((args, kwargs))
return super(RecordBatchBlotter, self).batch_order(*args, **kwargs)
####################################
# Shared factors for pipeline tests.
####################################
class AssetID(CustomFactor):
"""
CustomFactor that returns the AssetID of each asset.
Useful for providing a Factor that produces a different value for each
asset.
"""
window_length = 1
inputs = ()
def compute(self, today, assets, out):
out[:] = assets
class AssetIDPlusDay(CustomFactor):
window_length = 1
inputs = ()
def compute(self, today, assets, out):
out[:] = assets + today.day
class OpenPrice(CustomFactor):
window_length = 1
inputs = [USEquityPricing.open]
def compute(self, today, assets, out, open):
out[:] = open<|fim▁end|> | )
sessions = trading_calendar.sessions_in_range(first_session, last_session)
|
<|file_name|>PartitionAlloc.cpp<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2013 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "flutter/sky/engine/wtf/PartitionAlloc.h"
#include <string.h>
#ifndef NDEBUG
#include <stdio.h>
#endif
// Two partition pages are used as guard / metadata page so make sure the super
// page size is bigger.
COMPILE_ASSERT(WTF::kPartitionPageSize * 4 <= WTF::kSuperPageSize, ok_super_page_size);
COMPILE_ASSERT(!(WTF::kSuperPageSize % WTF::kPartitionPageSize), ok_super_page_multiple);
// Four system pages gives us room to hack out a still-guard-paged piece
// of metadata in the middle of a guard partition page.
COMPILE_ASSERT(WTF::kSystemPageSize * 4 <= WTF::kPartitionPageSize, ok_partition_page_size);
COMPILE_ASSERT(!(WTF::kPartitionPageSize % WTF::kSystemPageSize), ok_partition_page_multiple);
COMPILE_ASSERT(sizeof(WTF::PartitionPage) <= WTF::kPageMetadataSize, PartitionPage_not_too_big);
COMPILE_ASSERT(sizeof(WTF::PartitionBucket) <= WTF::kPageMetadataSize, PartitionBucket_not_too_big);
COMPILE_ASSERT(sizeof(WTF::PartitionSuperPageExtentEntry) <= WTF::kPageMetadataSize, PartitionSuperPageExtentEntry_not_too_big);
COMPILE_ASSERT(WTF::kPageMetadataSize * WTF::kNumPartitionPagesPerSuperPage <= WTF::kSystemPageSize, page_metadata_fits_in_hole);
// Check that some of our zanier calculations worked out as expected.
COMPILE_ASSERT(WTF::kGenericSmallestBucket == 8, generic_smallest_bucket);
COMPILE_ASSERT(WTF::kGenericMaxBucketed == 983040, generic_max_bucketed);
namespace WTF {
int PartitionRootBase::gInitializedLock = 0;
bool PartitionRootBase::gInitialized = false;
PartitionPage PartitionRootBase::gSeedPage;
PartitionBucket PartitionRootBase::gPagedBucket;
static size_t partitionBucketNumSystemPages(size_t size)
{
// This works out reasonably for the current bucket sizes of the generic
// allocator, and the current values of partition page size and constants.
// Specifically, we have enough room to always pack the slots perfectly into
// some number of system pages. The only waste is the waste associated with
// unfaulted pages (i.e. wasted address space).
// TODO: we end up using a lot of system pages for very small sizes. For
// example, we'll use 12 system pages for slot size 24. The slot size is
// so small that the waste would be tiny with just 4, or 1, system pages.
// Later, we can investigate whether there are anti-fragmentation benefits
// to using fewer system pages.
double bestWasteRatio = 1.0f;
size_t bestPages = 0;
if (size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) {
ASSERT(!(size % kSystemPageSize));
return size / kSystemPageSize;
}
ASSERT(size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize);
for (size_t i = kNumSystemPagesPerPartitionPage - 1; i <= kMaxSystemPagesPerSlotSpan; ++i) {
size_t pageSize = kSystemPageSize * i;
size_t numSlots = pageSize / size;
size_t waste = pageSize - (numSlots * size);
// Leaving a page unfaulted is not free; the page will occupy an empty page table entry.
// Make a simple attempt to account for that.
size_t numRemainderPages = i & (kNumSystemPagesPerPartitionPage - 1);
size_t numUnfaultedPages = numRemainderPages ? (kNumSystemPagesPerPartitionPage - numRemainderPages) : 0;
waste += sizeof(void*) * numUnfaultedPages;
double wasteRatio = (double) waste / (double) pageSize;
if (wasteRatio < bestWasteRatio) {
bestWasteRatio = wasteRatio;
bestPages = i;
}
}
ASSERT(bestPages > 0);
return bestPages;
}
static void parititonAllocBaseInit(PartitionRootBase* root)
{
ASSERT(!root->initialized);
spinLockLock(&PartitionRootBase::gInitializedLock);
if (!PartitionRootBase::gInitialized) {
PartitionRootBase::gInitialized = true;
// We mark the seed page as free to make sure it is skipped by our
// logic to find a new active page.
PartitionRootBase::gPagedBucket.activePagesHead = &PartitionRootGeneric::gSeedPage;
}
spinLockUnlock(&PartitionRootBase::gInitializedLock);
root->initialized = true;
root->totalSizeOfCommittedPages = 0;
root->totalSizeOfSuperPages = 0;
root->nextSuperPage = 0;
root->nextPartitionPage = 0;
root->nextPartitionPageEnd = 0;
root->firstExtent = 0;
root->currentExtent = 0;
memset(&root->globalEmptyPageRing, '\0', sizeof(root->globalEmptyPageRing));
root->globalEmptyPageRingIndex = 0;
// This is a "magic" value so we can test if a root pointer is valid.
root->invertedSelf = ~reinterpret_cast<uintptr_t>(root);
}
static void partitionBucketInitBase(PartitionBucket* bucket, PartitionRootBase* root)
{
bucket->activePagesHead = &PartitionRootGeneric::gSeedPage;
bucket->freePagesHead = 0;
bucket->numFullPages = 0;
bucket->numSystemPagesPerSlotSpan = partitionBucketNumSystemPages(bucket->slotSize);
}
void partitionAllocInit(PartitionRoot* root, size_t numBuckets, size_t maxAllocation)
{
parititonAllocBaseInit(root);
root->numBuckets = numBuckets;
root->maxAllocation = maxAllocation;
size_t i;
for (i = 0; i < root->numBuckets; ++i) {
PartitionBucket* bucket = &root->buckets()[i];
if (!i)
bucket->slotSize = kAllocationGranularity;
else
bucket->slotSize = i << kBucketShift;
partitionBucketInitBase(bucket, root);
}
}
void partitionAllocGenericInit(PartitionRootGeneric* root)
{
parititonAllocBaseInit(root);
root->lock = 0;
// Precalculate some shift and mask constants used in the hot path.
// Example: malloc(41) == 101001 binary.
// Order is 6 (1 << 6-1)==32 is highest bit set.
// orderIndex is the next three MSB == 010 == 2.
// subOrderIndexMask is a mask for the remaining bits == 11 (masking to 01 for the subOrderIndex).
size_t order;
for (order = 0; order <= kBitsPerSizet; ++order) {
size_t orderIndexShift;
if (order < kGenericNumBucketsPerOrderBits + 1)
orderIndexShift = 0;
else
orderIndexShift = order - (kGenericNumBucketsPerOrderBits + 1);
root->orderIndexShifts[order] = orderIndexShift;
size_t subOrderIndexMask;
if (order == kBitsPerSizet) {
// This avoids invoking undefined behavior for an excessive shift.
subOrderIndexMask = static_cast<size_t>(-1) >> (kGenericNumBucketsPerOrderBits + 1);
} else {
subOrderIndexMask = ((1 << order) - 1) >> (kGenericNumBucketsPerOrderBits + 1);
}
root->orderSubIndexMasks[order] = subOrderIndexMask;
}
// Set up the actual usable buckets first.
// Note that typical values (i.e. min allocation size of 8) will result in
// invalid buckets (size==9 etc. or more generally, size is not a multiple
// of the smallest allocation granularity).
// We avoid them in the bucket lookup map, but we tolerate them to keep the
// code simpler and the structures more generic.
size_t i, j;
size_t currentSize = kGenericSmallestBucket;
size_t currentIncrement = kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits;
PartitionBucket* bucket = &root->buckets[0];
for (i = 0; i < kGenericNumBucketedOrders; ++i) {
for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
bucket->slotSize = currentSize;
partitionBucketInitBase(bucket, root);
// Disable invalid buckets so that touching them faults.
if (currentSize % kGenericSmallestBucket)
bucket->activePagesHead = 0;
currentSize += currentIncrement;
++bucket;
}
currentIncrement <<= 1;
}
ASSERT(currentSize == 1 << kGenericMaxBucketedOrder);
ASSERT(bucket == &root->buckets[0] + (kGenericNumBucketedOrders * kGenericNumBucketsPerOrder));
// Then set up the fast size -> bucket lookup table.
bucket = &root->buckets[0];
PartitionBucket** bucketPtr = &root->bucketLookups[0];
for (order = 0; order <= kBitsPerSizet; ++order) {
for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
if (order < kGenericMinBucketedOrder) {
// Use the bucket of finest granularity for malloc(0) etc.
*bucketPtr++ = &root->buckets[0];
} else if (order > kGenericMaxBucketedOrder) {
*bucketPtr++ = &PartitionRootGeneric::gPagedBucket;
} else {
PartitionBucket* validBucket = bucket;
// Skip over invalid buckets.
while (validBucket->slotSize % kGenericSmallestBucket)
validBucket++;
*bucketPtr++ = validBucket;
bucket++;
}
}
}
ASSERT(bucket == &root->buckets[0] + (kGenericNumBucketedOrders * kGenericNumBucketsPerOrder));
ASSERT(bucketPtr == &root->bucketLookups[0] + ((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder));
// And there's one last bucket lookup that will be hit for e.g. malloc(-1),
// which tries to overflow to a non-existant order.
*bucketPtr = &PartitionRootGeneric::gPagedBucket;
}
static bool partitionAllocShutdownBucket(PartitionBucket* bucket)
{
// Failure here indicates a memory leak.
bool noLeaks = !bucket->numFullPages;
PartitionPage* page = bucket->activePagesHead;
while (page) {
if (page->numAllocatedSlots)
noLeaks = false;
page = page->nextPage;
}
return noLeaks;
}
static void partitionAllocBaseShutdown(PartitionRootBase* root)
{
ASSERT(root->initialized);
root->initialized = false;
// Now that we've examined all partition pages in all buckets, it's safe
// to free all our super pages. We first collect the super page pointers
// on the stack because some of them are themselves store in super pages.
char* superPages[kMaxPartitionSize / kSuperPageSize];
size_t numSuperPages = 0;
PartitionSuperPageExtentEntry* entry = root->firstExtent;
while (entry) {
char* superPage = entry->superPageBase;
while (superPage != entry->superPagesEnd) {
superPages[numSuperPages] = superPage;
numSuperPages++;
superPage += kSuperPageSize;
}
entry = entry->next;
}
ASSERT(numSuperPages == root->totalSizeOfSuperPages / kSuperPageSize);
for (size_t i = 0; i < numSuperPages; ++i)
freePages(superPages[i], kSuperPageSize);
}
bool partitionAllocShutdown(PartitionRoot* root)
{
bool noLeaks = true;
size_t i;
for (i = 0; i < root->numBuckets; ++i) {
PartitionBucket* bucket = &root->buckets()[i];
if (!partitionAllocShutdownBucket(bucket))
noLeaks = false;
}
partitionAllocBaseShutdown(root);
return noLeaks;
}
bool partitionAllocGenericShutdown(PartitionRootGeneric* root)
{
bool noLeaks = true;
size_t i;
for (i = 0; i < kGenericNumBucketedOrders * kGenericNumBucketsPerOrder; ++i) {
PartitionBucket* bucket = &root->buckets[i];
if (!partitionAllocShutdownBucket(bucket))
noLeaks = false;
}
partitionAllocBaseShutdown(root);
return noLeaks;
}
static NEVER_INLINE void partitionOutOfMemory()
{
IMMEDIATE_CRASH();
}
static NEVER_INLINE void partitionFull()
{
IMMEDIATE_CRASH();
}
static ALWAYS_INLINE void partitionDecommitSystemPages(PartitionRootBase* root, void* addr, size_t len)
{
decommitSystemPages(addr, len);
ASSERT(root->totalSizeOfCommittedPages > len);
root->totalSizeOfCommittedPages -= len;
}
static ALWAYS_INLINE void partitionRecommitSystemPages(PartitionRootBase* root, void* addr, size_t len)
{
recommitSystemPages(addr, len);
root->totalSizeOfCommittedPages += len;
}
static ALWAYS_INLINE void* partitionAllocPartitionPages(PartitionRootBase* root, int flags, size_t numPartitionPages)
{
ASSERT(!(reinterpret_cast<uintptr_t>(root->nextPartitionPage) % kPartitionPageSize));
ASSERT(!(reinterpret_cast<uintptr_t>(root->nextPartitionPageEnd) % kPartitionPageSize));
RELEASE_ASSERT(numPartitionPages <= kNumPartitionPagesPerSuperPage);
size_t totalSize = kPartitionPageSize * numPartitionPages;
root->totalSizeOfCommittedPages += totalSize;
size_t numPartitionPagesLeft = (root->nextPartitionPageEnd - root->nextPartitionPage) >> kPartitionPageShift;
if (LIKELY(numPartitionPagesLeft >= numPartitionPages)) {
// In this case, we can still hand out pages from the current super page
// allocation.
char* ret = root->nextPartitionPage;
root->nextPartitionPage += totalSize;
return ret;
}
// Need a new super page.
root->totalSizeOfSuperPages += kSuperPageSize;
if (root->totalSizeOfSuperPages > kMaxPartitionSize)
partitionFull();
char* requestedAddress = root->nextSuperPage;
char* superPage = reinterpret_cast<char*>(allocPages(requestedAddress, kSuperPageSize, kSuperPageSize));
if (UNLIKELY(!superPage)) {
if (flags & PartitionAllocReturnNull)
return 0;
partitionOutOfMemory();
}
root->nextSuperPage = superPage + kSuperPageSize;
char* ret = superPage + kPartitionPageSize;
root->nextPartitionPage = ret + totalSize;
root->nextPartitionPageEnd = root->nextSuperPage - kPartitionPageSize;
// Make the first partition page in the super page a guard page, but leave a
// hole in the middle.
// This is where we put page metadata and also a tiny amount of extent
// metadata.
setSystemPagesInaccessible(superPage, kSystemPageSize);
setSystemPagesInaccessible(superPage + (kSystemPageSize * 2), kPartitionPageSize - (kSystemPageSize * 2));
// Also make the last partition page a guard page.
setSystemPagesInaccessible(superPage + (kSuperPageSize - kPartitionPageSize), kPartitionPageSize);
// If we were after a specific address, but didn't get it, assume that
// the system chose a lousy address and re-randomize the next
// allocation.
if (requestedAddress && requestedAddress != superPage)
root->nextSuperPage = 0;
// We allocated a new super page so update super page metadata.
// First check if this is a new extent or not.
PartitionSuperPageExtentEntry* latestExtent = reinterpret_cast<PartitionSuperPageExtentEntry*>(partitionSuperPageToMetadataArea(superPage));
PartitionSuperPageExtentEntry* currentExtent = root->currentExtent;
bool isNewExtent = (superPage != requestedAddress);
if (UNLIKELY(isNewExtent)) {
latestExtent->next = 0;
if (UNLIKELY(!currentExtent)) {
root->firstExtent = latestExtent;
} else {
ASSERT(currentExtent->superPageBase);
currentExtent->next = latestExtent;
}
root->currentExtent = latestExtent;
currentExtent = latestExtent;
currentExtent->superPageBase = superPage;
currentExtent->superPagesEnd = superPage + kSuperPageSize;
} else {
// We allocated next to an existing extent so just nudge the size up a little.
currentExtent->superPagesEnd += kSuperPageSize;
ASSERT(ret >= currentExtent->superPageBase && ret < currentExtent->superPagesEnd);
}
// By storing the root in every extent metadata object, we have a fast way
// to go from a pointer within the partition to the root object.
latestExtent->root = root;
return ret;
}
static ALWAYS_INLINE void partitionUnusePage(PartitionRootBase* root, PartitionPage* page)
{
ASSERT(page->bucket->numSystemPagesPerSlotSpan);
void* addr = partitionPageToPointer(page);
partitionDecommitSystemPages(root, addr, page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize);
}
static ALWAYS_INLINE size_t partitionBucketSlots(const PartitionBucket* bucket)
{
return (bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / bucket->slotSize;
}
static ALWAYS_INLINE size_t partitionBucketPartitionPages(const PartitionBucket* bucket)
{
return (bucket->numSystemPagesPerSlotSpan + (kNumSystemPagesPerPartitionPage - 1)) / kNumSystemPagesPerPartitionPage;
}
static ALWAYS_INLINE void partitionPageReset(PartitionPage* page, PartitionBucket* bucket)
{
ASSERT(page != &PartitionRootGeneric::gSeedPage);
page->numAllocatedSlots = 0;
page->numUnprovisionedSlots = partitionBucketSlots(bucket);
ASSERT(page->numUnprovisionedSlots);
page->bucket = bucket;
page->nextPage = 0;
// NULLing the freelist is not strictly necessary but it makes an ASSERT in partitionPageFillFreelist simpler.
page->freelistHead = 0;
page->pageOffset = 0;
page->freeCacheIndex = -1;
size_t numPartitionPages = partitionBucketPartitionPages(bucket);
size_t i;
char* pageCharPtr = reinterpret_cast<char*>(page);
for (i = 1; i < numPartitionPages; ++i) {
pageCharPtr += kPageMetadataSize;
PartitionPage* secondaryPage = reinterpret_cast<PartitionPage*>(pageCharPtr);
secondaryPage->pageOffset = i;
}
}
static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist(PartitionPage* page)
{
ASSERT(page != &PartitionRootGeneric::gSeedPage);
size_t numSlots = page->numUnprovisionedSlots;
ASSERT(numSlots);
PartitionBucket* bucket = page->bucket;
// We should only get here when _every_ slot is either used or unprovisioned.
// (The third state is "on the freelist". If we have a non-empty freelist, we should not get here.)
ASSERT(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket));
// Similarly, make explicitly sure that the freelist is empty.
ASSERT(!page->freelistHead);
ASSERT(page->numAllocatedSlots >= 0);
size_t size = bucket->slotSize;
char* base = reinterpret_cast<char*>(partitionPageToPointer(page));
char* returnObject = base + (size * page->numAllocatedSlots);
char* firstFreelistPointer = returnObject + size;
char* firstFreelistPointerExtent = firstFreelistPointer + sizeof(PartitionFreelistEntry*);
// Our goal is to fault as few system pages as possible. We calculate the
// page containing the "end" of the returned slot, and then allow freelist
// pointers to be written up to the end of that page.
char* subPageLimit = reinterpret_cast<char*>((reinterpret_cast<uintptr_t>(firstFreelistPointer) + kSystemPageOffsetMask) & kSystemPageBaseMask);
char* slotsLimit = returnObject + (size * page->numUnprovisionedSlots);
char* freelistLimit = subPageLimit;
if (UNLIKELY(slotsLimit < freelistLimit))
freelistLimit = slotsLimit;
size_t numNewFreelistEntries = 0;
if (LIKELY(firstFreelistPointerExtent <= freelistLimit)) {
// Only consider used space in the slot span. If we consider wasted
// space, we may get an off-by-one when a freelist pointer fits in the
// wasted space, but a slot does not.
// We know we can fit at least one freelist pointer.
numNewFreelistEntries = 1;
// Any further entries require space for the whole slot span.
numNewFreelistEntries += (freelistLimit - firstFreelistPointerExtent) / size;
}
// We always return an object slot -- that's the +1 below.
// We do not neccessarily create any new freelist entries, because we cross sub page boundaries frequently for large bucket sizes.
ASSERT(numNewFreelistEntries + 1 <= numSlots);
numSlots -= (numNewFreelistEntries + 1);
page->numUnprovisionedSlots = numSlots;
page->numAllocatedSlots++;
if (LIKELY(numNewFreelistEntries)) {
char* freelistPointer = firstFreelistPointer;
PartitionFreelistEntry* entry = reinterpret_cast<PartitionFreelistEntry*>(freelistPointer);
page->freelistHead = entry;
while (--numNewFreelistEntries) {
freelistPointer += size;
PartitionFreelistEntry* nextEntry = reinterpret_cast<PartitionFreelistEntry*>(freelistPointer);
entry->next = partitionFreelistMask(nextEntry);
entry = nextEntry;
}
entry->next = partitionFreelistMask(0);
} else {
page->freelistHead = 0;
}
return returnObject;
}
// This helper function scans the active page list for a suitable new active
// page, starting at the passed in page.
// When it finds a suitable new active page (one that has free slots), it is
// set as the new active page and true is returned. If there is no suitable new
// active page, false is returned and the current active page is set to null.
// As potential pages are scanned, they are tidied up according to their state.
// Freed pages are swept on to the free page list and full pages are unlinked
// from any list.
static ALWAYS_INLINE bool partitionSetNewActivePage(PartitionPage* page)
{
if (page == &PartitionRootBase::gSeedPage) {
ASSERT(!page->nextPage);
return false;
}
PartitionPage* nextPage = 0;
PartitionBucket* bucket = page->bucket;
for (; page; page = nextPage) {
nextPage = page->nextPage;
ASSERT(page->bucket == bucket);
ASSERT(page != bucket->freePagesHead);
ASSERT(!bucket->freePagesHead || page != bucket->freePagesHead->nextPage);
// Page is usable if it has something on the freelist, or unprovisioned
// slots that can be turned into a freelist.
if (LIKELY(page->freelistHead != 0) || LIKELY(page->numUnprovisionedSlots)) {
bucket->activePagesHead = page;
return true;
}
ASSERT(page->numAllocatedSlots >= 0);
if (LIKELY(page->numAllocatedSlots == 0)) {
ASSERT(page->freeCacheIndex == -1);
// We hit a free page, so shepherd it on to the free page list.
page->nextPage = bucket->freePagesHead;
bucket->freePagesHead = page;
} else {
// If we get here, we found a full page. Skip over it too, and also
// tag it as full (via a negative value). We need it tagged so that
// free'ing can tell, and move it back into the active page list.
ASSERT(page->numAllocatedSlots == static_cast<int>(partitionBucketSlots(bucket)));
page->numAllocatedSlots = -page->numAllocatedSlots;
++bucket->numFullPages;
// numFullPages is a uint16_t for efficient packing so guard against
// overflow to be safe.
RELEASE_ASSERT(bucket->numFullPages);
// Not necessary but might help stop accidents.
page->nextPage = 0;
}
}
bucket->activePagesHead = 0;
return false;
}
struct PartitionDirectMapExtent {
size_t mapSize; // Mapped size, not including guard pages and meta-data.
};
static ALWAYS_INLINE PartitionDirectMapExtent* partitionPageToDirectMapExtent(PartitionPage* page)
{
ASSERT(partitionBucketIsDirectMapped(page->bucket));
return reinterpret_cast<PartitionDirectMapExtent*>(reinterpret_cast<char*>(page) + 2 * kPageMetadataSize);
}
static ALWAYS_INLINE void* partitionDirectMap(PartitionRootBase* root, int flags, size_t size)
{
size = partitionDirectMapSize(size);
// Because we need to fake looking like a super page, We need to allocate
// a bunch of system pages more than "size":
// - The first few system pages are the partition page in which the super
// page metadata is stored. We fault just one system page out of a partition
// page sized clump.
// - We add a trailing guard page.
size_t mapSize = size + kPartitionPageSize + kSystemPageSize;
// Round up to the allocation granularity.
mapSize += kPageAllocationGranularityOffsetMask;
mapSize &= kPageAllocationGranularityBaseMask;
// TODO: we may want to let the operating system place these allocations
// where it pleases. On 32-bit, this might limit address space
// fragmentation and on 64-bit, this might have useful savings for TLB
// and page table overhead.
// TODO: if upsizing realloc()s are common on large sizes, we could
// consider over-allocating address space on 64-bit, "just in case".
// TODO: consider pre-populating page tables (e.g. MAP_POPULATE on Linux,
// MADV_WILLNEED on POSIX).
// TODO: these pages will be zero-filled. Consider internalizing an
// allocZeroed() API so we can avoid a memset() entirely in this case.
char* ptr = reinterpret_cast<char*>(allocPages(0, mapSize, kSuperPageSize));
if (!ptr) {
if (flags & PartitionAllocReturnNull)
return 0;
partitionOutOfMemory();
}
char* ret = ptr + kPartitionPageSize;
// TODO: due to all the guard paging, this arrangement creates 4 mappings.
// We could get it down to three by using read-only for the metadata page,
// or perhaps two by leaving out the trailing guard page on 64-bit.
setSystemPagesInaccessible(ptr, kSystemPageSize);
setSystemPagesInaccessible(ptr + (kSystemPageSize * 2), kPartitionPageSize - (kSystemPageSize * 2));
setSystemPagesInaccessible(ret + size, kSystemPageSize);
PartitionSuperPageExtentEntry* extent = reinterpret_cast<PartitionSuperPageExtentEntry*>(partitionSuperPageToMetadataArea(ptr));
extent->root = root;
PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ret);
PartitionBucket* bucket = reinterpret_cast<PartitionBucket*>(reinterpret_cast<char*>(page) + kPageMetadataSize);
page->freelistHead = 0;
page->nextPage = 0;
page->bucket = bucket;
page->numAllocatedSlots = 1;
page->numUnprovisionedSlots = 0;
page->pageOffset = 0;
page->freeCacheIndex = 0;
bucket->activePagesHead = 0;
bucket->freePagesHead = 0;
bucket->slotSize = size;
bucket->numSystemPagesPerSlotSpan = 0;
bucket->numFullPages = 0;
PartitionDirectMapExtent* mapExtent = partitionPageToDirectMapExtent(page);
mapExtent->mapSize = mapSize - kPartitionPageSize - kSystemPageSize;
return ret;
}
static ALWAYS_INLINE void partitionDirectUnmap(PartitionPage* page)
{
size_t unmapSize = partitionPageToDirectMapExtent(page)->mapSize;
// Add on the size of the trailing guard page and preceeding partition
// page.
unmapSize += kPartitionPageSize + kSystemPageSize;
ASSERT(!(unmapSize & kPageAllocationGranularityOffsetMask));
char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page));
// Account for the mapping starting a partition page before the actual
// allocation address.
ptr -= kPartitionPageSize;
freePages(ptr, unmapSize);
}
void* partitionAllocSlowPath(PartitionRootBase* root, int flags, size_t size, PartitionBucket* bucket)
{
// The slow path is called when the freelist is empty.
ASSERT(!bucket->activePagesHead->freelistHead);
// For the partitionAllocGeneric API, we have a bunch of buckets marked
// as special cases. We bounce them through to the slow path so that we
// can still have a blazing fast hot path due to lack of corner-case
// branches.<|fim▁hole|> ASSERT(size > kGenericMaxBucketed);
ASSERT(bucket == &PartitionRootBase::gPagedBucket);
if (size > kGenericMaxDirectMapped) {
if (returnNull)
return 0;
RELEASE_ASSERT(false);
}
return partitionDirectMap(root, flags, size);
}
// First, look for a usable page in the existing active pages list.
// Change active page, accepting the current page as a candidate.
if (LIKELY(partitionSetNewActivePage(bucket->activePagesHead))) {
PartitionPage* newPage = bucket->activePagesHead;
if (LIKELY(newPage->freelistHead != 0)) {
PartitionFreelistEntry* ret = newPage->freelistHead;
newPage->freelistHead = partitionFreelistMask(ret->next);
newPage->numAllocatedSlots++;
return ret;
}
ASSERT(newPage->numUnprovisionedSlots);
return partitionPageAllocAndFillFreelist(newPage);
}
// Second, look in our list of freed but reserved pages.
PartitionPage* newPage = bucket->freePagesHead;
if (LIKELY(newPage != 0)) {
ASSERT(newPage != &PartitionRootGeneric::gSeedPage);
ASSERT(!newPage->freelistHead);
ASSERT(!newPage->numAllocatedSlots);
ASSERT(!newPage->numUnprovisionedSlots);
ASSERT(newPage->freeCacheIndex == -1);
bucket->freePagesHead = newPage->nextPage;
void* addr = partitionPageToPointer(newPage);
partitionRecommitSystemPages(root, addr, newPage->bucket->numSystemPagesPerSlotSpan * kSystemPageSize);
} else {
// Third. If we get here, we need a brand new page.
size_t numPartitionPages = partitionBucketPartitionPages(bucket);
void* rawNewPage = partitionAllocPartitionPages(root, flags, numPartitionPages);
if (UNLIKELY(!rawNewPage)) {
ASSERT(returnNull);
return 0;
}
// Skip the alignment check because it depends on page->bucket, which is not yet set.
newPage = partitionPointerToPageNoAlignmentCheck(rawNewPage);
}
partitionPageReset(newPage, bucket);
bucket->activePagesHead = newPage;
return partitionPageAllocAndFillFreelist(newPage);
}
static ALWAYS_INLINE void partitionFreePage(PartitionRootBase* root, PartitionPage* page)
{
ASSERT(page->freelistHead);
ASSERT(!page->numAllocatedSlots);
partitionUnusePage(root, page);
// We actually leave the freed page in the active list. We'll sweep it on
// to the free page list when we next walk the active page list. Pulling
// this trick enables us to use a singly-linked page list for all cases,
// which is critical in keeping the page metadata structure down to 32
// bytes in size.
page->freelistHead = 0;
page->numUnprovisionedSlots = 0;
}
static ALWAYS_INLINE void partitionRegisterEmptyPage(PartitionPage* page)
{
PartitionRootBase* root = partitionPageToRoot(page);
// If the page is already registered as empty, give it another life.
if (page->freeCacheIndex != -1) {
ASSERT(page->freeCacheIndex >= 0);
ASSERT(static_cast<unsigned>(page->freeCacheIndex) < kMaxFreeableSpans);
ASSERT(root->globalEmptyPageRing[page->freeCacheIndex] == page);
root->globalEmptyPageRing[page->freeCacheIndex] = 0;
}
size_t currentIndex = root->globalEmptyPageRingIndex;
PartitionPage* pageToFree = root->globalEmptyPageRing[currentIndex];
// The page might well have been re-activated, filled up, etc. before we get
// around to looking at it here.
if (pageToFree) {
ASSERT(pageToFree != &PartitionRootBase::gSeedPage);
ASSERT(pageToFree->freeCacheIndex >= 0);
ASSERT(static_cast<unsigned>(pageToFree->freeCacheIndex) < kMaxFreeableSpans);
ASSERT(pageToFree == root->globalEmptyPageRing[pageToFree->freeCacheIndex]);
if (!pageToFree->numAllocatedSlots && pageToFree->freelistHead) {
// The page is still empty, and not freed, so _really_ free it.
partitionFreePage(root, pageToFree);
}
pageToFree->freeCacheIndex = -1;
}
// We put the empty slot span on our global list of "pages that were once
// empty". thus providing it a bit of breathing room to get re-used before
// we really free it. This improves performance, particularly on Mac OS X
// which has subpar memory management performance.
root->globalEmptyPageRing[currentIndex] = page;
page->freeCacheIndex = currentIndex;
++currentIndex;
if (currentIndex == kMaxFreeableSpans)
currentIndex = 0;
root->globalEmptyPageRingIndex = currentIndex;
}
void partitionFreeSlowPath(PartitionPage* page)
{
PartitionBucket* bucket = page->bucket;
ASSERT(page != &PartitionRootGeneric::gSeedPage);
ASSERT(bucket->activePagesHead != &PartitionRootGeneric::gSeedPage);
if (LIKELY(page->numAllocatedSlots == 0)) {
// Page became fully unused.
if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) {
partitionDirectUnmap(page);
return;
}
// If it's the current active page, attempt to change it. We'd prefer to leave
// the page empty as a gentle force towards defragmentation.
if (LIKELY(page == bucket->activePagesHead) && page->nextPage) {
if (partitionSetNewActivePage(page->nextPage)) {
ASSERT(bucket->activePagesHead != page);
// Link the empty page back in after the new current page, to
// avoid losing a reference to it.
// TODO: consider walking the list to link the empty page after
// all non-empty pages?
PartitionPage* currentPage = bucket->activePagesHead;
page->nextPage = currentPage->nextPage;
currentPage->nextPage = page;
} else {
bucket->activePagesHead = page;
page->nextPage = 0;
}
}
partitionRegisterEmptyPage(page);
} else {
// Ensure that the page is full. That's the only valid case if we
// arrive here.
ASSERT(page->numAllocatedSlots < 0);
// A transition of numAllocatedSlots from 0 to -1 is not legal, and
// likely indicates a double-free.
RELEASE_ASSERT(page->numAllocatedSlots != -1);
page->numAllocatedSlots = -page->numAllocatedSlots - 2;
ASSERT(page->numAllocatedSlots == static_cast<int>(partitionBucketSlots(bucket) - 1));
// Fully used page became partially used. It must be put back on the
// non-full page list. Also make it the current page to increase the
// chances of it being filled up again. The old current page will be
// the next page.
page->nextPage = bucket->activePagesHead;
bucket->activePagesHead = page;
--bucket->numFullPages;
// Special case: for a partition page with just a single slot, it may
// now be empty and we want to run it through the empty logic.
if (UNLIKELY(page->numAllocatedSlots == 0))
partitionFreeSlowPath(page);
}
}
bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root, PartitionPage* page, size_t newSize)
{
ASSERT(partitionBucketIsDirectMapped(page->bucket));
newSize = partitionCookieSizeAdjustAdd(newSize);
// Note that the new size might be a bucketed size; this function is called
// whenever we're reallocating a direct mapped allocation.
newSize = partitionDirectMapSize(newSize);
if (newSize < kGenericMinDirectMappedDownsize)
return false;
// bucket->slotSize is the current size of the allocation.
size_t currentSize = page->bucket->slotSize;
if (newSize == currentSize)
return true;
char* charPtr = static_cast<char*>(partitionPageToPointer(page));
if (newSize < currentSize) {
size_t mapSize = partitionPageToDirectMapExtent(page)->mapSize;
// Don't reallocate in-place if new size is less than 80 % of the full
// map size, to avoid holding on to too much unused address space.
if ((newSize / kSystemPageSize) * 5 < (mapSize / kSystemPageSize) * 4)
return false;
// Shrink by decommitting unneeded pages and making them inaccessible.
size_t decommitSize = currentSize - newSize;
partitionDecommitSystemPages(root, charPtr + newSize, decommitSize);
setSystemPagesInaccessible(charPtr + newSize, decommitSize);
} else if (newSize <= partitionPageToDirectMapExtent(page)->mapSize) {
// Grow within the actually allocated memory. Just need to make the
// pages accessible again.
size_t recommitSize = newSize - currentSize;
setSystemPagesAccessible(charPtr + currentSize, recommitSize);
partitionRecommitSystemPages(root, charPtr + currentSize, recommitSize);
#if ENABLE(ASSERT)
memset(charPtr + currentSize, kUninitializedByte, recommitSize);
#endif
} else {
// We can't perform the realloc in-place.
// TODO: support this too when possible.
return false;
}
#if ENABLE(ASSERT)
// Write a new trailing cookie.
partitionCookieWriteValue(charPtr + newSize - kCookieSize);
#endif
page->bucket->slotSize = newSize;
return true;
}
void* partitionReallocGeneric(PartitionRootGeneric* root, void* ptr, size_t newSize)
{
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
return realloc(ptr, newSize);
#else
if (UNLIKELY(!ptr))
return partitionAllocGeneric(root, newSize);
if (UNLIKELY(!newSize)) {
partitionFreeGeneric(root, ptr);
return 0;
}
RELEASE_ASSERT(newSize <= kGenericMaxDirectMapped);
ASSERT(partitionPointerIsValid(partitionCookieFreePointerAdjust(ptr)));
PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
if (UNLIKELY(partitionBucketIsDirectMapped(page->bucket))) {
// We may be able to perform the realloc in place by changing the
// accessibility of memory pages and, if reducing the size, decommitting
// them.
if (partitionReallocDirectMappedInPlace(root, page, newSize))
return ptr;
}
size_t actualNewSize = partitionAllocActualSize(root, newSize);
size_t actualOldSize = partitionAllocGetSize(ptr);
// TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the
// new size is a significant percentage smaller. We could do the same if we
// determine it is a win.
if (actualNewSize == actualOldSize) {
// Trying to allocate a block of size newSize would give us a block of
// the same size as the one we've already got, so no point in doing
// anything here.
return ptr;
}
// This realloc cannot be resized in-place. Sadness.
void* ret = partitionAllocGeneric(root, newSize);
size_t copySize = actualOldSize;
if (newSize < copySize)
copySize = newSize;
memcpy(ret, ptr, copySize);
partitionFreeGeneric(root, ptr);
return ret;
#endif
}
#ifndef NDEBUG
void partitionDumpStats(const PartitionRoot& root)
{
size_t i;
size_t totalLive = 0;
size_t totalResident = 0;
size_t totalFreeable = 0;
for (i = 0; i < root.numBuckets; ++i) {
const PartitionBucket& bucket = root.buckets()[i];
if (bucket.activePagesHead == &PartitionRootGeneric::gSeedPage && !bucket.freePagesHead && !bucket.numFullPages) {
// Empty bucket with no freelist or full pages. Skip reporting it.
continue;
}
size_t numFreePages = 0;
PartitionPage* freePages = bucket.freePagesHead;
while (freePages) {
++numFreePages;
freePages = freePages->nextPage;
}
size_t bucketSlotSize = bucket.slotSize;
size_t bucketNumSlots = partitionBucketSlots(&bucket);
size_t bucketUsefulStorage = bucketSlotSize * bucketNumSlots;
size_t bucketPageSize = bucket.numSystemPagesPerSlotSpan * kSystemPageSize;
size_t bucketWaste = bucketPageSize - bucketUsefulStorage;
size_t numActiveBytes = bucket.numFullPages * bucketUsefulStorage;
size_t numResidentBytes = bucket.numFullPages * bucketPageSize;
size_t numFreeableBytes = 0;
size_t numActivePages = 0;
const PartitionPage* page = bucket.activePagesHead;
while (page) {
ASSERT(page != &PartitionRootGeneric::gSeedPage);
// A page may be on the active list but freed and not yet swept.
if (!page->freelistHead && !page->numUnprovisionedSlots && !page->numAllocatedSlots) {
++numFreePages;
} else {
++numActivePages;
numActiveBytes += (page->numAllocatedSlots * bucketSlotSize);
size_t pageBytesResident = (bucketNumSlots - page->numUnprovisionedSlots) * bucketSlotSize;
// Round up to system page size.
pageBytesResident = (pageBytesResident + kSystemPageOffsetMask) & kSystemPageBaseMask;
numResidentBytes += pageBytesResident;
if (!page->numAllocatedSlots)
numFreeableBytes += pageBytesResident;
}
page = page->nextPage;
}
totalLive += numActiveBytes;
totalResident += numResidentBytes;
totalFreeable += numFreeableBytes;
printf("bucket size %zu (pageSize %zu waste %zu): %zu alloc/%zu commit/%zu freeable bytes, %zu/%zu/%zu full/active/free pages\n", bucketSlotSize, bucketPageSize, bucketWaste, numActiveBytes, numResidentBytes, numFreeableBytes, static_cast<size_t>(bucket.numFullPages), numActivePages, numFreePages);
}
printf("total live: %zu bytes\n", totalLive);
printf("total resident: %zu bytes\n", totalResident);
printf("total freeable: %zu bytes\n", totalFreeable);
fflush(stdout);
}
#endif // !NDEBUG
} // namespace WTF<|fim▁end|> | bool returnNull = flags & PartitionAllocReturnNull;
if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { |
<|file_name|>md5s3stash.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
""" md5s3stash
content addressable storage in AWS S3
"""
from __future__ import unicode_literals
import sys
import os
import argparse
import tempfile
import urllib2
import urllib
import urlparse
import base64
import logging
import hashlib
import basin
import boto
import magic
from PIL import Image
from collections import namedtuple
import re
regex_s3 = re.compile(r's3.*amazonaws.com')
def main(argv=None):
parser = argparse.ArgumentParser(
description='content addressable storage in AWS S3')
parser.add_argument('url', nargs='+',
help='URL or path of source file to stash')
parser.add_argument('-b', '--bucket_base', nargs="?",
help='this must be a unique name in all of AWS S3')
parser.add_argument('-s', '--bucket_scheme', nargs="?",
default="simple", choices=['simple', 'multivalue'],
help='this must be a unique name in all of AWS S3')
parser.add_argument(
'-t', '--tempdir',
required=False,
help="if your files might be large, make sure this is on a big disk"
)
parser.add_argument(
'-w', '--warnings',
default=False,
help='show python `DeprecationWarning`s supressed by default',
required=False,
action='store_true',
)
parser.add_argument('--loglevel', default='ERROR', required=False)
parser.add_argument('-u', '--username', required=False,
help='username for downloads requiring BasicAuth')
parser.add_argument('-p', '--password', required=False,
help='password for downloads requiring BasicAuth')
if argv is None:
argv = parser.parse_args()
if argv.bucket_base:
bucket_base = argv.bucket_base
else:
assert 'BUCKET_BASE' in os.environ, "`-b` or `BUCKET_BASE` must be set"
bucket_base = os.environ['BUCKET_BASE']
if not argv.warnings:
# supress warnings
# http://stackoverflow.com/a/2047600/1763984
import warnings
warnings.simplefilter("ignore", DeprecationWarning)
if argv.tempdir:
tempfile.tempdir = argv.tempdir
auth = None
if argv.username:
auth = (argv.username, argv.password)
# set debugging level
numeric_level = getattr(logging, argv.loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % argv.loglevel)
logging.basicConfig(level=numeric_level, )
# if being used in a library, probably want to be able to recycle
# connection?
conn = boto.connect_s3()
for url in argv.url:
print("{0}\t{1}\t{2}\t{3}".format(
*md5s3stash(url, bucket_base, conn, url_auth=auth, bucket_scheme=argv.bucket_scheme)
))
def md5s3stash(
url,
bucket_base,
conn=None,
url_auth=None,
url_cache={},
hash_cache={},
bucket_scheme='simple'
):
""" stash a file at `url` in the named `bucket_base` ,
`conn` is an optional boto.connect_s3()
`url_auth` is optional Basic auth ('<username>', '<password'>) tuple
to use if the url to download requires authentication.
`url_cache` is an object with a dict interface, keyed on url
url_cache[url] = { md5: ..., If-None-Match: etag, If-Modified-Since: date }
`hash_cache` is an obhect with dict interface, keyed on md5
hash_cache[md5] = ( s3_url, mime_type, dimensions )
`bucket_scheme` is text string 'simple' or 'multibucket'
"""
StashReport = namedtuple('StashReport', 'url, md5, s3_url, mime_type, dimensions')
(file_path, md5, mime_type) = checkChunks(url, url_auth, url_cache)
try:
return StashReport(url, md5, *hash_cache[md5])
except KeyError:
pass
s3_url = md5_to_s3_url(md5, bucket_base, bucket_scheme=bucket_scheme)
if conn is None:
conn = boto.connect_s3()
s3move(file_path, s3_url, mime_type, conn)
(mime, dimensions) = image_info(file_path)
os.remove(file_path) # safer than rmtree
hash_cache[md5] = (s3_url, mime, dimensions)
report = StashReport(url, md5, *hash_cache[md5])
logging.getLogger('MD5S3:stash').info(report)
return report
# think about refactoring the next two functions
def md5_to_s3_url(md5, bucket_base, bucket_scheme='multibucket'):
""" calculate the s3 URL given an md5 and an bucket_base """
if bucket_scheme == 'simple':
url = "s3://{0}/{1}".format(
bucket_base,
md5
)
elif bucket_scheme == 'multibucket':
url = "s3://{0}.{1}/{2}".format(
md5_to_bucket_shard(md5),
bucket_base,
md5
)
return url
def md5_to_http_url(md5, bucket_base, bucket_scheme='multibucket', s3_endpoint='s3.amazonaws.com'):
""" calculate the http URL given an md5 and an bucket_base """
if bucket_scheme == 'simple':
url = "http://{0}/{1}/{2}".format(
s3_endpoint,
bucket_base,
md5
)
elif bucket_scheme == 'multibucket':
url = "http://{1}.{2}.{0}/{3}".format(
s3_endpoint,
md5_to_bucket_shard(md5),
bucket_base,
md5
)
return url
def md5_to_bucket_shard(md5):
""" calculate the shard label of the bucket name from md5 """
# "Consider utilizing multiple buckets that start with different
# alphanumeric characters. This will ensure a degree of partitioning
# from the start. The higher your volume of concurrent PUT and
# GET requests, the more impact this will likely have."
# -- http://aws.amazon.com/articles/1904
# "Bucket names must be a series of one or more labels. Adjacent
# labels are separated by a single period (.). [...] Each label must
# start and end with a lowercase letter or a number. "
# -- http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
# see also: http://en.wikipedia.org/wiki/Base_36
ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyz"
# http://stats.stackexchange.com/a/70884/14900
# take the first two digits of the hash and turn that into an inteter
# this should be evenly distributed
int_value = int(md5[0], 16)+10*int(md5[1], 16)
# divide by the length of the alphabet and take the remainder
bucket = int_value % len(ALPHABET)
return basin.encode(ALPHABET, bucket)
def is_s3_url(url):
'''For s3 urls, if you send http authentication headers, S3 will
send a "400 Bad Request" in response.
Now look for s3*.amazonaws.com
'''
# moving to OR this will be s3-us-west-2.amazonaws.com
match = regex_s3.search(url)
return True if match else False
def urlopen_with_auth(url, auth=None, cache={}):
'''Use urllib2 to open url if the auth is specified.
auth is tuple of (username, password)
'''
opener = urllib2.build_opener(DefaultErrorHandler())
req = urllib2.Request(url)
p = urlparse.urlparse(url)
# try to set headers for conditional get request
try:
here = cache[url]
if 'If-None-Match' in here:
req.add_header('If-None-Match', cache[url]['If-None-Match'],)
if 'If-Modified-Since' in here:
req.add_header('If-Modified-Since', cache[url]['If-Modified-Since'],)
except KeyError:
pass
if not auth or is_s3_url(url):
if p.scheme not in ['http', 'https']:
return urllib.urlopen(url) # urllib works with normal file paths
else:
# make sure https
if p.scheme != 'https':
raise urllib2.URLError('Basic auth not over https is bad idea! \
scheme:{0}'.format(p.scheme))
# Need to add header so it gets sent with first request,
# else redirected to shib
b64authstr = base64.b64encode('{0}:{1}'.format(*auth))
req.add_header('Authorization', 'Basic {0}'.format(b64authstr))<|fim▁hole|>
def checkChunks(url, auth=None, cache={}):
"""
Helper to download large files the only arg is a url this file
will go to a temp directory the file will also be downloaded in
chunks and md5 checksum is returned
based on downloadChunks@https://gist.github.com/gourneau/1430932
and http://www.pythoncentral.io/hashing-files-with-python/
"""
temp_file = tempfile.NamedTemporaryFile(delete=False, prefix='md5s3_')
logging.getLogger('MD5S3').info("temp file path %s" % temp_file.name)
hasher = hashlib.new('md5')
BLOCKSIZE = 1024 * hasher.block_size
try:
req = urlopen_with_auth(url, auth=auth, cache=cache)
thisurl = cache.get(url, dict())
if req.getcode() == 304:
return None, thisurl['md5'], None
mime_type = req.info()['Content-type']
# record these headers, they will let us pretend like we are a cacheing
# proxy server, and send conditional GETs next time we see this file
etag = req.info().get('ETag', None);
if etag:
thisurl['If-None-Match'] = etag
lmod = req.info().get('Last-Modified', None);
if lmod:
thisurl['If-Modified-Since'] = lmod
downloaded = 0
with temp_file:
while True:
chunk = req.read(BLOCKSIZE)
hasher.update(chunk)
downloaded += len(chunk)
if not chunk:
break
temp_file.write(chunk)
except urllib2.HTTPError, e:
print "HTTP Error:", e.code, url
return False
except urllib2.URLError, e:
print "URL Error:", e.reason, url
return False
md5 = hasher.hexdigest()
thisurl['md5'] = md5
cache[url] = thisurl
return temp_file.name, md5, mime_type
def s3move(place1, place2, mime, s3):
l = logging.getLogger('MD5S3:s3move')
l.debug({
'place1': place1,
'place2': place2,
'mime': mime,
's3': s3,
})
parts = urlparse.urlsplit(place2)
# SplitResult(scheme='s3', netloc='test.pdf', path='/dkd', query=''
# , fragment='')
try:
bucket = s3.get_bucket(parts.netloc, validate=False)
l.debug('bucket exists')
except boto.exception.S3ResponseError:
bucket = s3.create_bucket(parts.netloc)
l.debug('bucket created')
if not(bucket.get_key(parts.path, validate=False)):
key = bucket.new_key(parts.path)
# metadata has to be set before setting contents/creating object.
# See https://gist.github.com/garnaat/1791086
key.set_metadata("Content-Type", mime)
key.set_contents_from_filename(place1)
# key.set_acl('public-read')
l.debug('file sent to s3')
else:
l.info('key existed already')
def image_info(filepath):
''' get image info
`filepath` path to a file
returns
a tuple of two values
1. mime/type if an image; otherwise None
2. a tuple of (height, width) if an image; otherwise (0,0)
'''
try:
return (
magic.Magic(mime=True).from_file(filepath),
Image.open(filepath).size
)
except IOError as e:
if not e.message.startswith('cannot identify image file'):
raise e
else:
return (None, (0,0))
# example 11.7 Defining URL handlers
# http://www.diveintopython.net/http_web_services/etags.html
class DefaultErrorHandler(urllib2.HTTPDefaultErrorHandler):
def http_error_304(self, req, fp, code, msg, headers):
result = urllib2.HTTPError(
req.get_full_url(), code, msg, headers, fp)
result.status = code
return result
# main() idiom for importing into REPL for debugging
if __name__ == "__main__":
sys.exit(main())
"""
Copyright (c) 2015, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the {organization} nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""<|fim▁end|> |
# return urllib2.urlopen(req)
return opener.open(req) |
<|file_name|>views.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from pyramid.view import view_config<|fim▁hole|>import logging
import pysite.resmgr
L = logging.getLogger('PySite')
@view_config(
name='',
context=pysite.plugins.models.Node,
renderer='pysite:plugins/templates/index.mako',
permission='admin'
)
def index(context, request):
return dict()<|fim▁end|> | |
<|file_name|>tool_bar.rs<|end_file_name|><|fim▁begin|>// Copyright 2013-2015, The Rust-GNOME Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
//! Create bars of buttons and other widgets
use libc::c_int;
use ffi;
use glib::{to_bool, to_gboolean};
use cast::{GTK_TOOLBAR, GTK_TOOLITEM};
use {IconSize, ReliefStyle, ToolbarStyle};
/// Toolbar — Create bars of buttons and other widgets
/*
* # Availables signals :
* * `focus-home-or-end` : Action
* * `orientation-changed` : Run First
* * `popup-context-menu` : Run Last
* * `style-changed` : Run First
*/
struct_Widget!(Toolbar);
impl Toolbar {
pub fn new() -> Option<Toolbar> {
let tmp_pointer = unsafe { ffi::gtk_toolbar_new() };
check_pointer!(tmp_pointer, Toolbar)
}
pub fn insert<T: ::ToolItemTrait>(&self,
item: &T,
pos: i32) -> () {
unsafe {
ffi::gtk_toolbar_insert(GTK_TOOLBAR(self.pointer), GTK_TOOLITEM(item.unwrap_widget()), pos as c_int)
}
}
pub fn item_index<T: ::ToolItemTrait>(&self, item: &T) -> i32 {
unsafe {
ffi::gtk_toolbar_get_item_index(GTK_TOOLBAR(self.pointer), GTK_TOOLITEM(item.unwrap_widget())) as i32
}
}
pub fn get_n_items(&self) -> i32 {
unsafe {
ffi::gtk_toolbar_get_n_items(GTK_TOOLBAR(self.pointer)) as i32
}
}
pub fn get_nth_item(&self, n: i32) -> Option<::ToolItem> {
unsafe {
let tmp_pointer = ffi::gtk_toolbar_get_nth_item(GTK_TOOLBAR(self.pointer), n as c_int) as *mut ffi::C_GtkWidget;
if tmp_pointer.is_null() {
None
} else {
Some(::FFIWidget::wrap_widget(tmp_pointer))
}
}
}
pub fn get_drop_index(&self, x: i32, y: i32) -> i32 {
unsafe {
ffi::gtk_toolbar_get_drop_index(GTK_TOOLBAR(self.pointer), x as c_int, y as c_int) as i32
}
}
pub fn set_drop_highlight_item<T: ::ToolItemTrait>(&self, item: &T, index: i32) -> () {
unsafe {
ffi::gtk_toolbar_set_drop_highlight_item(GTK_TOOLBAR(self.pointer), GTK_TOOLITEM(item.unwrap_widget()), index as c_int);
}
}
pub fn set_show_arrow(&self, show_arrow: bool) -> () {<|fim▁hole|> pub fn unset_icon_size(&self) -> () {
unsafe {
ffi::gtk_toolbar_unset_icon_size(GTK_TOOLBAR(self.pointer))
}
}
pub fn get_show_arrow(&self) -> bool {
unsafe { to_bool(ffi::gtk_toolbar_get_show_arrow(GTK_TOOLBAR(self.pointer))) }
}
pub fn get_style(&self) -> ToolbarStyle {
unsafe {
ffi::gtk_toolbar_get_style(GTK_TOOLBAR(self.pointer))
}
}
pub fn get_icon_size(&self) -> IconSize {
unsafe {
ffi::gtk_toolbar_get_icon_size(GTK_TOOLBAR(self.pointer))
}
}
pub fn get_relief_style(&self) -> ReliefStyle {
unsafe {
ffi::gtk_toolbar_get_relief_style(GTK_TOOLBAR(self.pointer))
}
}
pub fn set_style(&self, style: ToolbarStyle) -> () {
unsafe {
ffi::gtk_toolbar_set_style(GTK_TOOLBAR(self.pointer), style);
}
}
pub fn set_icon_size(&self, icon_size: IconSize) -> () {
unsafe {
ffi::gtk_toolbar_set_icon_size(GTK_TOOLBAR(self.pointer), icon_size);
}
}
pub fn unset_style(&self) -> () {
unsafe {
ffi::gtk_toolbar_unset_style(GTK_TOOLBAR(self.pointer));
}
}
}
impl_drop!(Toolbar);
impl_TraitWidget!(Toolbar);
impl ::ContainerTrait for Toolbar {}
impl ::ToolShellTrait for Toolbar {}
impl ::OrientableTrait for Toolbar {}<|fim▁end|> | unsafe { ffi::gtk_toolbar_set_show_arrow(GTK_TOOLBAR(self.pointer), to_gboolean(show_arrow)); }
}
|
<|file_name|>models.py<|end_file_name|><|fim▁begin|># encoding: utf-8
from __future__ import absolute_import, unicode_literals
from apiview.model import AbstractUserMixin, BaseModel
from django.contrib.auth.base_user import AbstractBaseUser
from django.db import models
class User(AbstractUserMixin, BaseModel, AbstractBaseUser):
is_staff = False
def get_short_name(self):
return self.name
def get_full_name(self):
return self.nickname
USERNAME_FIELD = 'username'<|fim▁hole|> class Meta:
db_table = 'example_user'
app_label = 'example_app'
verbose_name = verbose_name_plural = "用户"<|fim▁end|> | username = models.CharField('用户名', unique=True, max_length=64, editable=False, null=False, blank=False)
password = models.CharField('密码', max_length=128, unique=True, editable=False, null=False, blank=True)
nickname = models.CharField('昵称', unique=True, max_length=64, editable=False, null=False, blank=False)
|
<|file_name|>SimpleBinaryTree.py<|end_file_name|><|fim▁begin|># simple binary tree
# in this implementation, a node is inserted between an existing node and the root
class BinaryTree():
def __init__(self,rootid):
self.left = None
self.right = None
self.rootid = rootid
def getLeftChild(self):
return self.left
def getRightChild(self):
return self.right
def setNodeValue(self,value):
self.rootid = value
def getNodeValue(self):
return self.rootid
def insertRight(self,newNode):
if self.right == None:
self.right = BinaryTree(newNode)
else:
tree = BinaryTree(newNode)
tree.right = self.right
self.right = tree
def insertLeft(self,newNode):
if self.left == None:
self.left = BinaryTree(newNode)
else:
tree = BinaryTree(newNode)
self.left = tree
tree.left = self.left
# try to insert to left, if not insert to right
def insert(self, newNode, max_depth, current_depth=0):
if self.left == None:
self.left = BinaryTree(newNode)
else:
if(current_depth < max_depth):
current_depth+=1
self.left.insert(newNode, max_depth, current_depth)
else:
if(self.right == None):
self.right = BinaryTree(newNode)
else:<|fim▁hole|>
'''
def insert(item, tree):
if (item < tree.entry):
if (tree.left != None):
insert(item, tree.left)
else:
tree.left = Tree(item)
else:
if (tree.right != None):
insert(item, tree.right)
else:
tree.right = Tree(item)
'''
def printTree(tree):
if tree != None:
printTree(tree.getLeftChild())
print(tree.getNodeValue())
printTree(tree.getRightChild())
# test tree
def testTree():
myTree = BinaryTree("Maud")
myTree.insertLeft("Bob")
myTree.insertRight("Tony")
myTree.insertRight("Steven")
printTree(myTree)
testTree()<|fim▁end|> | |
<|file_name|>smc2psmc.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
import numpy as np
import argparse
from smcpp.estimation_tools import load_data
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Convert SMC++-formatted data set into PSMCfa-style data.')
parser.add_argument("--contig", help="name of outputted contig")
parser.add_argument("input", metavar="file.smc[.gz]")
args = parser.parse_args()
args.contig = args.contig or args.input
contig = load_data([args.input])[0]
L = contig.data[:, 0].sum()
L += 100 - (L % 100)
fa = np.full(L, -1)
last = 0
for span, a, b, nb in contig.data:
fa[last:last + span] = a
last += span
fa.shape = (L // 100, -1)
code = fa.max(axis=1).astype('|S1')
code[code == b'0'] = b'T'
code[code == b'1'] = b'K'
code[code == b'2'] = b'T' # recode monomorphic sites
code[fa.min(axis=1) == -1] = b'N'
print(">" + args.contig)<|fim▁hole|> out = np.full([Lp, 80], b"\n", dtype='string_')
out[:, :-1] = code[:(79 * Lp)].reshape(Lp, 79)
print(out.tostring().decode('ascii')[:-1]) # omit trailing newline
print(code[(79 * Lp):].tostring().decode('ascii'))<|fim▁end|> | Lp = len(code) // 79
if Lp > 0: |
<|file_name|>introductions_spec.js<|end_file_name|><|fim▁begin|><|fim▁hole|> cy.login();
cy.createContact('John', 'Doe', 'Man');
cy.createContact('Jane', 'Doe', 'Woman');
cy.createContact('Joe', 'Shmoe', 'Man');
});
it('lets you fill first met without an introducer', function () {
cy.url().should('include', '/people/h:');
cy.get('.introductions a[href$="introductions/edit"]').click();
cy.url().should('include', '/introductions/edit');
cy.get('textarea[name=first_met_additional_info]').type('Lorem ipsum');
cy.get('button.btn-primary[type=submit]').click();
cy.url().should('include', '/people/h:');
cy.get('.alert-success');
cy.get('.introductions').contains('Lorem ipsum');
});
it('lets you save first met', function () {
cy.url().should('include', '/people/h:');
cy.get('.introductions a[href$="introductions/edit"]').click();
cy.url().should('include', '/introductions/edit');
cy.get('textarea[name=first_met_additional_info]').type('Lorem ipsum');
cy.get('#metThrough > .v-select input').click();
cy.get('#metThrough ul[role="listbox"]').contains('John Doe');
cy.get('#metThrough ul[role="listbox"]').contains('Jane Doe');
cy.get('#metThrough ul[role="listbox"]').contains('Joe Shmoe');
cy.get('#metThrough ul[role="listbox"]').contains('John Doe').click();
cy.get('button.btn-primary[type=submit]').click();
cy.url().should('include', '/people/h:');
cy.get('.alert-success');
cy.get('.introductions').contains('Lorem ipsum');
cy.get('.introductions').contains('John Doe');
});
it('lets you search first met', function () {
cy.url().should('include', '/people/h:');
cy.get('.introductions a[href$="introductions/edit"]').click();
cy.url().should('include', '/introductions/edit');
cy.get('textarea[name=first_met_additional_info]').type('Lorem ipsum');
cy.get('#metThrough input[type=search]').type('John');
cy.get('#metThrough ul[role="listbox"]').contains('John Doe');
cy.get('#metThrough ul[role="listbox"]').should('not.contain', 'Joe Shmoe');
cy.get('#metThrough ul[role="listbox"]').should('not.contain', 'Jane Doe');
cy.get('#metThrough ul[role="listbox"]').contains('John Doe').click();
cy.get('button.btn-primary[type=submit]').click();
cy.url().should('include', '/people/h:');
cy.get('.introductions').contains('Lorem ipsum');
cy.get('.introductions').contains('John Doe');
});
});<|fim▁end|> | describe('Introduction', function () {
beforeEach(function () { |
<|file_name|>effect-transfer.min.js<|end_file_name|><|fim▁begin|>/*!
* jQuery UI Effects Transfer 1.11.1
* http://jqueryui.com<|fim▁hole|> * http://jquery.org/license
*
* http://api.jqueryui.com/transfer-effect/
*/
!function(a){"function"==typeof define&&define.amd?define(["jquery","./effect"],a):a(jQuery)}(function(a){return a.effects.effect.transfer=function(b,c){var d=a(this),e=a(b.to),f="fixed"===e.css("position"),g=a("body"),h=f?g.scrollTop():0,i=f?g.scrollLeft():0,j=e.offset(),k={top:j.top-h,left:j.left-i,height:e.innerHeight(),width:e.innerWidth()},l=d.offset(),m=a("<div class='ui-effects-transfer'></div>").appendTo(document.body).addClass(b.className).css({top:l.top-h,left:l.left-i,height:d.innerHeight(),width:d.innerWidth(),position:f?"fixed":"absolute"}).animate(k,b.duration,b.easing,function(){m.remove(),c()})}});<|fim▁end|> | *
* Copyright 2014 jQuery Foundation and other contributors
* Released under the MIT license. |
<|file_name|>FeijianCode.java<|end_file_name|><|fim▁begin|>package com.gulj.common.util;
import java.io.UnsupportedEncodingException;
public enum FeijianCode {
SAVE_SUCCESS("0001","保存成功"),
SAVE_ERROR("0002","保存失败"),
UPDATE_SUCCESS("0003","修改成功"),
UPDATE_ERROR("0004","修改失败"),
DELETE_SUCCESS("0005","删除成功"),
DELETE_ERROR("0006","删除失败"),
USERORPWD_ERROR("0007","用户名或者密码不正确"),
USEROR_ERROR("0008","账号不存在"),
USER_FIBINDDEN_ERROR("0009","账号被禁止"),
CODE_ERROR("0010","验证码不正确"),
USER_EXIST_ERROR("0011","帐号已存在"),
USEROR_LOGIN_SUCCESS("0012","登录成功"),
USEROR_NONE_TOP_MENU("0013","无顶级菜单,请联系系统管理员进行权限分配"),
USEROR_NONE_CHILD_MENU("0014","无子级菜单,请联系系统管理员进行权限分配"),
SYS_EXCEPTION("1100","系统异常"),;
/** 错误码 */
private final String code;
/** 错误吗对应描述信息 */
private final String info;
FeijianCode(String code, String info) {
this.code = code;
this.info = info;
}
public String getCode() {
return code;
}
public String getInfo() {
return info;
}
@SuppressWarnings("finally")
@Override
public String toString() {
String result = "{\"code\":"+"\""+this.code+"\""+",\"message\":"+"\""+this.info+"\""+"}";
try {
result = new String(result.getBytes("utf-8"), "utf-8");
} catch (UnsupportedEncodingException e) {
result = e.getMessage();
e.printStackTrace();
}
<|fim▁hole|> finally{
return result;
}
}
}<|fim▁end|> | |
<|file_name|>quicksort.js<|end_file_name|><|fim▁begin|>const Comparator = require('../../util/comparator');
/**
* Swaps two elements in the array
*/
const swap = (array, x, y) => {
const tmp = array[y];
array[y] = array[x];
array[x] = tmp;
};
/**
* Chooses a pivot and makes every element that is
* lower than the pivot move to its left, and every
* greater element moves to its right
*
* @return Number the positon of the pivot
*/
const partition = (a, comparator, lo, hi) => {
// pick a random element, swap with the rightmost and
// use it as pivot
swap(a, Math.floor(Math.random() * (hi - lo)) + lo, hi);
const pivot = hi;
// dividerPosition keeps track of the position
// where the pivot should be inserted
let dividerPosition = lo;
for (let i = lo; i < hi; i++) {
if (comparator.lessThan(a[i], a[pivot])) {
swap(a, i, dividerPosition);
dividerPosition++;
}
}
swap(a, dividerPosition, pivot);
return dividerPosition;
};
/**
* Quicksort recursively sorts parts of the array in<|fim▁hole|> * O(n.lg n)
*/
const quicksortInit = (array, comparatorFn) => {
const comparator = new Comparator(comparatorFn);
return (function quicksort(array, lo, hi) {
while (lo < hi) {
const p = partition(array, comparator, lo, hi);
// Chooses only the smallest partition to use recursion on and
// tail-optimize the other. This guarantees O(log n) space in worst case.
if (p - lo < hi - p) {
quicksort(array, lo, p - 1);
lo = p + 1;
} else {
quicksort(array, p + 1, hi);
hi = p - 1;
}
}
return array;
})(array, 0, array.length - 1);
};
module.exports = quicksortInit;<|fim▁end|> | |
<|file_name|>BayesInference.py<|end_file_name|><|fim▁begin|>__author__ = 'Varun Nayyar'
from Utils.MFCCArrayGen import emotions, speakers, getCorpus
from MCMC import MCMCRun
from emailAlerter import alertMe
def main2(numRuns = 100000, numMixtures = 8, speakerIndex = 6):
import time
for emotion in emotions:
start = time.ctime()
Xpoints = getCorpus(emotion, speakers[speakerIndex])
message = MCMCRun(Xpoints, emotion+"-"+speakers[speakerIndex], numRuns, numMixtures)
message += "Start time: {}\nEnd Time: {}\n".format(start, time.ctime())
message += "\nNumRuns: {}, numMixtures:{}\n ".format(numRuns, numMixtures)<|fim▁hole|> alertMe(message)
if __name__ == "__main__":
for i in xrange(len(speakers)):
main2(numMixtures=8, speakerIndex=i)<|fim▁end|> |
message += "\nEmotion: {}, speaker:{}\n".format(emotion, speakers[speakerIndex])
|
<|file_name|>stock.ts<|end_file_name|><|fim▁begin|>import { Component } from '@angular/core';
import {ModalController} from 'ionic-angular';
import {RawMaterial, RawMaterialProvider} from "../../providers/raw-material/raw-material";
import {Observable} from "rxjs/Observable";
import {NewStockPage} from "../new-stock/new-stock";
import 'rxjs/add/operator/do'
/**
* Generated class for the StockPage page.
*
* See http://ionicframework.com/docs/components/#navigation for more info
* on Ionic pages and navigation.
*/<|fim▁hole|>})
export class StockPage {
rawMaterials: Observable<RawMaterial[]>;
loading: boolean;
constructor(private rawMaterialProvider: RawMaterialProvider,
public modalCtrl: ModalController) {
this.loading = true;
this.rawMaterials = this.rawMaterialProvider.getRawMaterials().do(
() => { this.loading = false;},
() => { this.loading = false;}
);
}
searchRawMaterials(event: any) {
this.rawMaterials = this.rawMaterialProvider.getRawMaterials();
let searchName: string = event.target.value;
if (searchName && searchName.trim() !== '') {
this.rawMaterials = this.rawMaterialProvider.getRawMaterials(searchName);
}
}
openAddNewStockModal(rawMaterial?: any) {
let modal = this.modalCtrl.create(NewStockPage, {
rawMaterial: rawMaterial
});
modal.present();
}
}<|fim▁end|> | @Component({
selector: 'page-stock',
templateUrl: 'stock.html', |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.